diff --git "a/5202.jsonl" "b/5202.jsonl" new file mode 100644--- /dev/null +++ "b/5202.jsonl" @@ -0,0 +1,2126 @@ +{"seq_id":"6368436930","text":"# Iterar filas y eliminar duplicados\n\nfrom openpyxl import Workbook, load_workbook\nfrom openpyxl.utils import get_column_letter\n\ndef eliminar(a):\n\tws.delete_rows(a)\n\tprint(\"Fila \" + str(a) + \" eliminada\")\n\nwb = load_workbook('null_25285844126313319.xlsx')\nws = wb.active\ncell = ws.cell(1,8)\nencontrados = 0\nfilastot = 8108\n\nfor x in range(filastot, 1, -1):\n\tnom1 = ws.cell(x, 2).value\n\tcat1 = ws.cell(x, 7).value\n\tcuil1 = ws.cell(x, 8).value\n\tdom1 = ws.cell(x, 16).value\n\tcel1 = ws.cell(x, 20).value\n\tprint(str(x))\n\tprint(\"Progreso: \" + \"%.2f\" % (100-(x*100/filastot)) + \"%\")\n\t# print(str(nom) + \"\\t\" + str(cat) + \"\\t\" + str(cuil) + \"\\t\" + str(dom) + \"\\t\" + str(cel))\n\tfor y in range(filastot, 1, -1):\n\t\t# print(str(y))\n\t\tnom2 = ws.cell(y, 2).value\n\t\tcat2 = ws.cell(y, 7).value\n\t\tcuil2 = ws.cell(y, 8).value\n\t\tdom2 = ws.cell(y, 16).value\n\t\tcel2 = ws.cell(y, 20).value\n\t\tif (x != y):\n\t\t\tif (nom1 == nom2 and cat1 == cat2 and cuil1 == cuil2 and dom1 == dom2 and cel1 == cel2):\n\t\t\t\tprint(\"Se encontro duplicado en filas \" + str(x) + \" y \" + str(y))\n\t\t\t\teliminar(y)\n\t\t\t\tencontrados += 1\nprint(\"Se encontraron \" + str(encontrados) + \" duplicados\")\n\nwb.save('null_25285844126313319.xlsx')","repo_name":"lucasdepetrisd/solomoto-openpyxl","sub_path":"eliminar_duplicados.py","file_name":"eliminar_duplicados.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"34197350420","text":"from generative.rag_hypers import RagHypers\nfrom torch_util.transformer_optimize import TransformerOptimize, LossHistory\nfrom util.line_corpus import read_lines, block_shuffle, jsonl_lines\nimport ujson as json\nimport random\nfrom generative.rag_util import prepare_seq2seq_batch_labels, prefered_answers, retrieved_docs_to_bart_input\nfrom corpus.corpus_client import CorpusClient\nimport logging\nimport torch\nimport torch.nn.functional as F\nfrom transformers import RagConfig\nfrom dpr.dpr_util import queries_to_vectors\nfrom table_augmentation.table import Table\nfrom table_augmentation.augmentation_tasks import Query\n\nlogger = logging.getLogger(__name__)\n\n\nclass Options(RagHypers):\n def __init__(self):\n super().__init__()\n\n\nhypers = Options()\nhypers.fill_from_args()\n\ntokenizer, model = hypers.get_tokenizer_and_model()\nconfig = RagConfig.from_pretrained('facebook/rag-token-nq')\n\nmodel = model.to(hypers.device)\nmodel.train()\n# construct rest retriever after the model\nrest_retriever = CorpusClient(hypers)\noptimizer = TransformerOptimize(hypers, hypers.num_train_epochs * hypers.num_instances, model)\nloss_history = LossHistory(hypers.num_instances //\n (hypers.full_train_batch_size // hypers.gradient_accumulation_steps))\nquery_maker = hypers.task.get_query_maker()\nbatch_count = 0\nif hypers.n_gpu < 1:\n raise ValueError('Must have GPU')\n# torch.autograd.set_detect_anomaly(True)\n\n\ndef retrieve(queries, id_batch):\n # CONSIDER: this could use a retriever_dpr (note it would split the optimizer though)\n query_vectors = queries_to_vectors(tokenizer, model.rag.question_encoder, queries,\n max_query_length=hypers.max_context_length)\n doc_scores, docs, doc_vectors = rest_retriever.retrieve(query_vectors, n_docs=hypers.n_docs,\n exclude_by_pid_prefix=id_batch)\n context_input_ids, context_attention_mask = retrieved_docs_to_bart_input(config, hypers.max_context_length,\n tokenizer, queries, docs)\n return context_input_ids.reshape(len(queries) * hypers.n_docs, -1).to(model.device), \\\n context_attention_mask.reshape(len(queries) * hypers.n_docs, -1).to(model.device), \\\n doc_scores.reshape(len(queries), hypers.n_docs), docs\n\n\ndef one_batch(queries, answers, id_batch):\n global batch_count\n context_input_ids, context_attention_mask, doc_scores, docs = retrieve(queries, id_batch)\n\n labels = prepare_seq2seq_batch_labels(tokenizer, answers, return_tensors=\"pt\",\n max_target_length=hypers.max_target_length).to(optimizer.hypers.device)\n\n outputs = optimizer.model(labels=labels,\n context_input_ids=context_input_ids, context_attention_mask=context_attention_mask,\n doc_scores=doc_scores)\n batch_count += 1\n loss = outputs.loss.mean()\n loss_history.note_loss(loss.item())\n optimizer.step_loss(loss,\n retrieval_time=rest_retriever.retrieval_time/(batch_count * hypers.per_gpu_train_batch_size))\n\n\ndef train():\n rand = random.Random(hypers.seed)\n query_batch = []\n answer_batch = []\n id_batch = []\n skip_count = 0\n while True:\n optimizer.model.train()\n inst_count = 0\n for line in block_shuffle(read_lines(hypers.tables, shuffled_files=rand), rand=rand, block_size=100000):\n inst = json.loads(line)\n if hypers.is_query:\n queries = [Query.from_dict(inst)]\n else:\n queries = query_maker(Table.from_dict(inst))\n if len(queries) == 0:\n skip_count += 1\n for query in queries:\n inst_count += 1\n if inst_count % hypers.world_size != hypers.global_rank:\n continue\n\n input_text = query.title + '\\n\\n' + query.text\n query_batch.append(input_text)\n answer_batch.append('; '.join(query.answers[:10])) # NOTE: just the first 10 answers?\n id_batch.append(query.table_id)\n if len(query_batch) == hypers.per_gpu_train_batch_size * hypers.n_gpu:\n one_batch(query_batch, answer_batch, id_batch)\n if not optimizer.should_continue():\n return\n query_batch = []\n answer_batch = []\n id_batch = []\n print(f'skipped {skip_count}')\n\n\ntrain()\nif hypers.world_size > 1:\n torch.distributed.barrier()\nif hypers.global_rank == 0:\n (optimizer.model.module if hasattr(optimizer.model, \"module\") else optimizer.model).save_pretrained(hypers.output_dir)\nlogger.info(f'loss_history = {loss_history.loss_history}')\nhypers.cleanup_corpus_server()\n","repo_name":"IBM/retrieval-table-augmentation","sub_path":"generative/rag_train.py","file_name":"rag_train.py","file_ext":"py","file_size_in_byte":4896,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"71"} +{"seq_id":"8709328907","text":"import communication_pb2\nfrom enum import Enum\nfrom cs50 import SQL\n\ndb = SQL(\"sqlite:///protobuf_gettingstarted/database.db\")\n\nclass PieceColor(Enum):\n WHITE = 0\n BLACK = 1\n\n def serialize(self) -> communication_pb2.PieceColor:\n return self.value\n\n @classmethod\n def deserialize(cls, proto: communication_pb2.PieceColor):\n return cls(proto)\n\nclass Board:\n def __init__(self, squares: dict = None) -> None:\n self.squares = {}\n if squares == None:\n for i in range(64):\n self.squares[i] = Square(i)\n else:\n for key in squares.keys():\n self.squares[key] = Square.deserialize(squares[key])\n\n\n def __str__(self) -> str:\n s = \"Board {squares: [\" \n for i in range(64):\n s += str(i) + ': ' + str(self.squares[i]) + \", \"\n s = s[:-2] + \"]}\"\n return s\n\n def serialize(self) -> communication_pb2.Board:\n proto = communication_pb2.Board()\n for key in self.squares.keys():\n proto.squares[key].CopyFrom(self.squares[key].serialize())\n return proto\n\n @classmethod\n def deserialize(cls, proto: communication_pb2.Board):\n return cls(proto.squares)\n\n\nclass Piece:\n def __init__(self, color: PieceColor = PieceColor.BLACK, identifier: str = 't', value: int = 2, location: int = 63) -> None:\n self.color = color\n self.identifier = identifier\n self.value = value\n self.location = location\n \n def serialize(self) -> communication_pb2.Piece:\n proto = communication_pb2.Piece()\n proto.color = self.color.serialize()\n proto.identifier = self.identifier\n proto.value = self.value\n proto.location = self.location\n return proto\n\n @classmethod\n def deserialize(cls, proto: communication_pb2.Piece):\n return cls(PieceColor.deserialize(proto.color), proto.identifier, proto.value, proto.location)\n\n def __str__(self) -> str:\n return \"Piece {color : \" + str(self.color) + \", identifier : \" + str(self.identifier) + \", value : \" + str(self.value) + \", location : \" + str(self.location) + \"}\"\n\nclass Square:\n def __init__(self, id: int = 23, isOccupied: bool = False, currentPiece: Piece = None) -> None:\n self.id = id\n self.isOccupied = isOccupied\n self.currentPiece = currentPiece\n\n def serialize(self) -> communication_pb2.Square:\n proto = communication_pb2.Square()\n proto.id = self.id\n if self.isOccupied:\n proto.currentPiece.CopyFrom(self.currentPiece.serialize())\n proto.isOccupied = self.isOccupied\n return proto\n \n @classmethod\n def deserialize(cls, proto: communication_pb2.Square):\n if proto.isOccupied:\n return cls(proto.id, proto.isOccupied, Piece.deserialize(proto.currentPiece))\n else: \n return cls(proto.id, proto.isOccupied, None)\n\n def __str__(self) -> str:\n return \"Square {id : \" + str(self.id) + \", isOccupied : \" + str(self.isOccupied) + \", currentPiece : \" + str(self.currentPiece) + \"}\"\n\n\n## WRITE/SERIALIZE: ###\nboard = Board()\n\nprint(board.squares[12])\n\nsquare = Square()\npiece = Piece()\nsquare.currentPiece = piece\nsquare.isOccupied = True\nboard.squares[12] = square\n\nprint(board.squares[12])\n\ndbId = db.execute(\"INSERT INTO binary_storage (board_protobuf) VALUES (?);\", board.serialize().SerializeToString())\n\n## READ/DESERIALIZE: ###\ndatabase_mock = communication_pb2.Board()\nresponse = db.execute(\"SELECT board_protobuf FROM binary_storage WHERE id = ?;\", dbId)[0]['board_protobuf']\ndatabase_mock.ParseFromString(response)\n\n\n# deserialize protobuf class to \"real\" class object:\nread_board = Board.deserialize(database_mock)\n\nprint(read_board.squares[12])\n\n\n","repo_name":"kai468/cs50final","sub_path":"protobuf_gettingstarted/backend_mock.py","file_name":"backend_mock.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"25318785020","text":"import pyxinput\r\nimport json\r\nimport asyncio\r\nimport websockets\r\n# import time\r\n\r\n# Xinput Documentation: https://docs.microsoft.com/en-us/windows/desktop/api/xinput/ns-xinput-_xinput_gamepad\r\n# Test multiple controllers online: http://html5gamepad.com/\r\n# System: Python 3.6.1 (64-bit, may work on any 64-bit python version) with ScpVBus x64 drivers\r\n# Note: Controller seems to only work when the\r\n# window of the program in use is focused on.\r\n# pyxinput.test_read()\r\n# pyxinput.test_virtual()\r\n\r\n# Read the input data from a physical controller plugged in.\r\nMyRead = pyxinput.rController(1)\r\n\r\n\r\n# while True:\r\n# print('State: ', MyRead.gamepad)\r\n# print('Buttons: ', MyRead.buttons)\r\n# time.sleep(0.5)\r\n\r\n# Create a virtual controller to test setting buttons/moving thumbstick.\r\n# MyVirtual = pyxinput.vController(percent=False)\r\n\r\n# Make the controller keep on scrolling to the left\r\n# by using the Left Thumbstick (x-axis), max left value is -32768\r\n# and neutral is 1024\r\n# while True:\r\n# MyVirtual.set_value('AxisLx', -32768)\r\n# time.sleep(0.5)\r\n\r\n# counter = 32768\r\n# while True:\r\n# MyVirtual.set_value('AxisLx', counter)\r\n# counter = counter + 1\r\n# if (counter == 32767 ):\r\n# counter = -32768\r\n# time.sleep(1)\r\n\r\n# NOTE: Be able to go straight into game and then change controller\r\n# to avoid the setting it up before.\r\n# for x in range(5):\r\n# MyVirtual.set_value('AxisLx', 32767)\r\n# time.sleep(0.5)\r\n#\r\n# for x in range(10):\r\n# MyVirtual.set_value('BtnA', 1)\r\n# time.sleep(0.5)\r\n# MyVirtual.set_value('BtnA', 0)\r\n# time.sleep(1)\r\n#\r\n# time.sleep(10)\r\n\r\n# vController uses percent by default,\r\n# all controls will accept a value between -1.0 to 1.0\r\n# print('Testing Value setting')\r\n# print('Connecting Controller:')\r\n# try:\r\n# con = pyxinput.vController(percent=False)\r\n# except pyxinput.virtual_controller.MaxInputsReachedError:\r\n# print('Unable to connect controller for testing.')\r\n# else:\r\n# print('This ID:', con.id)\r\n# print('Available:', pyxinput.vController.available_ids())\r\n# print('Setting TriggerR and AxisLx:')\r\n# for x in range(11):\r\n# val = 32767\r\n# # val = -(x / 10)\r\n# # print(val)\r\n# #con.set_value('TriggerR', val)\r\n# con.set_value('AxisLx', val)\r\n# time.sleep(0.5)\r\n#\r\n# print('Done, disconnecting controller.')\r\n# del con\r\n# print('Available:', pyxinput.vController.available_ids())\r\n# time.sleep(2)\r\n\r\n\r\n# TODO: To handle button press/release\r\n# as buttons will appear and disappear from the list.\r\n# buttons_dict = {\r\n# 'button_x': str,\r\n# 'button_y': str,\r\n# 'button_a': str,\r\n# 'button_b': str\r\n# }\r\n\r\n# input('Press any key to start copying input.')\r\n\r\n# TODO: Mimic buttons input.\r\n# Try reading input data from a physical controller\r\n# and mimic this onto the virtual controller.\r\nasync def start_emulation():\r\n\r\n # button_x = False\r\n # button_y = False\r\n # button_a = False\r\n # button_b = False\r\n #\r\n # button_left_shoulder = False\r\n # button_right_shoulder = False\r\n #\r\n # button_dpad_up = False\r\n # button_dpad_right = False\r\n # button_dpad_down = False\r\n # button_dpad_left = False\r\n #\r\n # button_left_thumb = False\r\n # button_right_thumb = False\r\n #\r\n # button_start = False\r\n # button_back = False\r\n\r\n async with websockets.connect('ws://youthful-driver.glitch.me/') as websocket:\r\n\r\n await websocket.send(\"Test Socket!\")\r\n response = await websocket.recv()\r\n print(response)\r\n\r\n while True:\r\n gamepad_dict = MyRead.gamepad.__dict__() # try str()\r\n gamepad_buttons = MyRead.buttons\r\n print('Read controller data.')\r\n\r\n json_pad = json.dumps(gamepad_dict)\r\n json_buttons = json.dumps(gamepad_buttons)\r\n\r\n await websocket.send(json_pad)\r\n response = await websocket.recv()\r\n print(response)\r\n\r\n await websocket.send(json_buttons)\r\n response = await websocket.recv()\r\n print(response)\r\n\r\n # TODO: Develop method to prevent unnecessarily calling set_value (if values have not changed).\r\n # Loop through the gamepad dictionary to set values.\r\n # for gamepad_item in gamepad_dict:\r\n # val = gamepad_dict[gamepad_item]\r\n #\r\n # print('Setting value for: {0} ({1})'.format(gamepad_item, val))\r\n # if gamepad_item == 'thumb_lx':\r\n # MyVirtual.set_value('AxisLx', val)\r\n # elif gamepad_item == 'thumb_ly':\r\n # MyVirtual.set_value('AxisLy', val)\r\n # elif gamepad_item == 'thumb_rx':\r\n # MyVirtual.set_value('AxisRx', val)\r\n # elif gamepad_item == 'thumb_ry':\r\n # MyVirtual.set_value('AxisRy', val)\r\n # elif gamepad_item == 'left_trigger':\r\n # MyVirtual.set_value('TriggerL', val)\r\n # elif gamepad_item == 'right_trigger':\r\n # MyVirtual.set_value('TriggerR', val)\r\n #\r\n # # Handle any button releases before we set them again.\r\n # # Since buttons pressed can be an empty list, a button release in the loop\r\n # # will not be recorded as the list would be empty, so run this before checking for\r\n # # new button presses.\r\n # if 'X' not in gamepad_buttons and button_x is True:\r\n # print('Release X')\r\n # button_x = False\r\n # # Set button x press to false.\r\n # MyVirtual.set_value('BtnX', 0)\r\n #\r\n # if 'Y' not in gamepad_buttons and button_y is True:\r\n # print('Release Y')\r\n # button_y = False\r\n # MyVirtual.set_value('BtnY', 0)\r\n #\r\n # if 'A' not in gamepad_buttons and button_a is True:\r\n # print('Release A')\r\n # button_a = False\r\n # MyVirtual.set_value('BtnA', 0)\r\n #\r\n # if 'B' not in gamepad_buttons and button_b is True:\r\n # print('Release B')\r\n # button_b = False\r\n # MyVirtual.set_value('BtnB', 0)\r\n #\r\n # if 'LEFT_SHOULDER' not in gamepad_buttons and button_left_shoulder is True:\r\n # print('Release LEFT_SHOULDER')\r\n # button_left_shoulder = False\r\n # MyVirtual.set_value('BtnShoulderL', 0)\r\n #\r\n # if 'RIGHT_SHOULDER' not in gamepad_buttons and button_right_shoulder is True:\r\n # print('Release RIGHT_SHOULDER')\r\n # button_right_shoulder = False\r\n # MyVirtual.set_value('BtnShoulderR', 0)\r\n #\r\n # # TODO: It is currently not possible to see which individual Dpad button\r\n # # has been released without seeing and working with the values from\r\n # # the controller.\r\n # if 'DPAD_UP' not in gamepad_buttons and button_dpad_up is True:\r\n # print('Release DPAD_UP')\r\n # button_dpad_up = False\r\n # # Set Dpad to off.\r\n # MyVirtual.set_value('Dpad', 0)\r\n #\r\n # if 'DPAD_DOWN' not in gamepad_buttons and button_dpad_down is True:\r\n # print('Release DPAD_DOWN')\r\n # button_dpad_down = False\r\n # # Set Dpad to off.\r\n # MyVirtual.set_value('Dpad', 0)\r\n #\r\n # if 'DPAD_LEFT' not in gamepad_buttons and button_dpad_left is True:\r\n # print('Release DPAD_LEFT')\r\n # button_dpad_left = False\r\n # # Set Dpad to off.\r\n # MyVirtual.set_value('Dpad', 0)\r\n #\r\n # if 'DPAD_RIGHT' not in gamepad_buttons and button_dpad_right is True:\r\n # print('Release DPAD_RIGHT')\r\n # button_dpad_right = False\r\n # # Set Dpad to off.\r\n # MyVirtual.set_value('Dpad', 0)\r\n #\r\n # if 'LEFT_THUMB' not in gamepad_buttons and button_left_thumb is True:\r\n # print('Release LEFT_THUMB')\r\n # button_left_thumb = False\r\n # MyVirtual.set_value('BtnThumbL', 0)\r\n #\r\n # if 'RIGHT_THUMB' not in gamepad_buttons and button_right_thumb is True:\r\n # print('Release RIGHT_THUMB')\r\n # button_right_thumb = False\r\n # MyVirtual.set_value('BtnThumbR', 0)\r\n #\r\n # if 'START' not in gamepad_buttons and button_start is True:\r\n # print('Release START')\r\n # button_start = False\r\n # MyVirtual.set_value('BtnStart', 0)\r\n #\r\n # if 'BACK' not in gamepad_buttons and button_back is True:\r\n # print('Release BACK')\r\n # button_back = False\r\n # MyVirtual.set_value('BtnBack', 0)\r\n #\r\n # # TODO: 1. Change the structure in which button press is returned from read_state.\r\n # # TODO: 2. Handle button press/release; what about the time in which\r\n # # the button is pressed for?\r\n # # TODO: 3.. Create a way for these to be asynchronous so that a button pressed is directly\r\n # # set or released and not having to wait in list to be set to press/release.\r\n # for button_no in range(len(gamepad_buttons)):\r\n # button_name = gamepad_buttons[button_no]\r\n # print('Handling press/release for button: ', button_name)\r\n #\r\n # # Handle buttons pressed.\r\n # if button_name == 'X' and button_x is False:\r\n # print('Pressed X')\r\n # button_x = True\r\n # # Set button X to true.\r\n # MyVirtual.set_value('BtnX', 1)\r\n #\r\n # elif button_name == 'Y' and button_y is False:\r\n # print('Pressed Y')\r\n # button_y = True\r\n # MyVirtual.set_value('BtnY', 1)\r\n #\r\n # elif button_name == 'A' and button_a is False:\r\n # print('Pressed A')\r\n # button_a = True\r\n # MyVirtual.set_value('BtnA', 1)\r\n #\r\n # elif button_name == 'B' and button_b is False:\r\n # print('Pressed B')\r\n # button_b = True\r\n # MyVirtual.set_value('BtnB', 1)\r\n #\r\n # elif button_name == 'LEFT_SHOULDER' and button_left_shoulder is False:\r\n # print('Pressed left shoulder.')\r\n # button_left_shoulder = True\r\n # MyVirtual.set_value('BtnShoulderL', 1)\r\n #\r\n # elif button_name == 'RIGHT_SHOULDER' and button_right_shoulder is False:\r\n # print('Pressed right shoulder.')\r\n # button_right_shoulder = True\r\n # MyVirtual.set_value('BtnShoulderR', 1)\r\n #\r\n # elif button_name == 'DPAD_UP' and button_dpad_up is False:\r\n # print('Pressed Dpad Up')\r\n # button_dpad_up = True\r\n # MyVirtual.set_value('Dpad', 1)\r\n #\r\n # elif button_name == 'DPAD_DOWN' and button_dpad_down is False:\r\n # print('Pressed Dpad Down')\r\n # button_dpad_down = True\r\n # MyVirtual.set_value('Dpad', 2)\r\n #\r\n # elif button_name == 'DPAD_LEFT' and button_dpad_left is False:\r\n # print('Got Dpad Left')\r\n # button_dpad_left = True\r\n # MyVirtual.set_value('Dpad', 4)\r\n #\r\n # elif button_name == 'DPAD_RIGHT' and button_dpad_right is False:\r\n # print('Pressed Dpad Right')\r\n # button_dpad_right = True\r\n # MyVirtual.set_value('Dpad', 8)\r\n #\r\n # elif button_name == 'LEFT_THUMB' and button_left_thumb is False:\r\n # print('Pressed Left Thumbstick')\r\n # button_left_thumb = True\r\n # MyVirtual.set_value('BtnThumbL', 1)\r\n #\r\n # elif button_name == 'RIGHT_THUMB' and button_right_thumb is False:\r\n # print('Pressed Right Thumbstick')\r\n # button_right_thumb = True\r\n # MyVirtual.set_value('BtnThumbR', 1)\r\n #\r\n # elif button_name == 'START' and button_start is False:\r\n # print('Pressed Start')\r\n # button_start = True\r\n # MyVirtual.set_value('BtnStart', 1)\r\n #\r\n # elif button_name == 'BACK' and button_back is False:\r\n # print('Pressed Back')\r\n # button_back = True\r\n # MyVirtual.set_value('BtnBack', 1)\r\n\r\nasyncio.get_event_loop().run_until_complete(start_emulation())\r\n","repo_name":"GoelBiju/xinput","sub_path":"scratch/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":13324,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"15568557128","text":"import tensorflow as tf\r\nimport DARNN\r\nimport numpy\r\nrnn = tf.nn.rnn_cell\r\n#InputCNN=tf.keras.layers.Conv2D(1,[1,3],1,padding='same',kernel_initializer=tf.ones_initializer)\r\n\r\nclass _2DAttnLayer():\r\n \"\"\"\r\n CNN+InputAttn+TempAttn\r\n 입력된 [-1,input_dimension,input_timesteps] 데이터를 [-1,input_dimension,LSTM_units]으로 출력.\r\n 입력된 데이터에 대해 CNN -> InputAttn -> TemporalAttn순으로 계산.\r\n \r\n\r\n \"\"\"\r\n def __init__(self,\r\n LSTM_units,\r\n input_dimension,\r\n input_timesteps,\r\n inputs,\r\n target_inputs,\r\n batch,\r\n name=None):\r\n self.output_dimension=LSTM_units\r\n\r\n inputCNN=tf.keras.layers.Conv2D(1,[1,5],1,padding='same')\r\n cnnResult=inputCNN(tf.reshape(inputs,[-1,input_timesteps,input_dimension,1]))\r\n cnnResult=tf.squeeze(cnnResult,axis=3)\r\n\r\n self.AttnCell=Attn_2D_Cell(LSTM_units,input_dimension,input_timesteps,cnnResult,inputs)\r\n self.result,_=tf.nn.dynamic_rnn(self.AttnCell,target_inputs,initial_state=tf.zeros([batch,LSTM_units*3]))\r\n\r\n def __call__(self):\r\n return self.result\r\n\r\nclass Attn_2D_Cell(rnn.BasicLSTMCell):\r\n \"\"\"\r\n MY LSTM proposed.\r\n \"\"\"\r\n def __init__(self,\r\n num_units,\r\n num_inputs,\r\n num_timesteps,\r\n CNN_states,\r\n input_states,\r\n forget_bias=1.0,\r\n state_is_tuple=True,\r\n activation=None,\r\n reuse=None,\r\n name=None,\r\n dtype=None,\r\n **kwargs):\r\n \"\"\"\r\n Initialize the basic LSTM cell.\r\n \"\"\" \r\n super(Attn_2D_Cell,self).__init__(num_units,\r\n forget_bias=1.0,\r\n state_is_tuple=False,\r\n activation=None,\r\n reuse=None,\r\n name=None,\r\n dtype=None,\r\n **kwargs)\r\n self.inputs=input_states\r\n self.timesteps=num_timesteps\r\n\r\n self.CNNinputs_for_IA=tf.split(CNN_states,num_inputs,2)\r\n self.CNNinputs_for_TA=tf.split(CNN_states,num_timesteps,1)\r\n\r\n \r\n self.build(num_inputs)\r\n\r\n \r\n def build(self, inputs_shape):\r\n if inputs_shape is None:\r\n raise ValueError(\"Expected inputs.shape to be known, saw shape: %s\"\r\n % str(inputs_shape))\r\n\r\n self.input_depth = inputs_shape \r\n h_depth = self._num_units\r\n self._kernel = self.add_variable(\r\n 'kernel',\r\n shape=[self.input_depth + h_depth+1, 4 * self._num_units])\r\n self._bias = self.add_variable(\r\n 'bias',\r\n shape=[4 * self._num_units],\r\n initializer=tf.zeros_initializer(dtype=tf.float32))\r\n\r\n #weights for input attention \r\n self._We = self.add_variable(\r\n 'We',\r\n shape=[self._num_units*2,self.timesteps])\r\n\r\n self._Bwe = self.add_variable(\r\n 'Bwe',\r\n shape=[self.timesteps],\r\n initializer=tf.zeros_initializer(dtype=tf.float32)\r\n ) \r\n\r\n self._Ue = self.add_variable(\r\n '_Ue',\r\n shape=[self.timesteps,self.timesteps])\r\n\r\n self._Bue = self.add_variable(\r\n 'Bue',\r\n shape=[self.timesteps],\r\n initializer=tf.zeros_initializer(dtype=tf.float32)\r\n ) \r\n\r\n self._v1 = self.add_variable(\r\n 'v1',\r\n shape=[self.timesteps,1])\r\n ##########################\r\n #weights for temporal attention\r\n self._Wd = self.add_variable(\r\n 'Wd',\r\n shape=[self._num_units*2,self.input_depth]) \r\n\r\n self._Ud = self.add_variable(\r\n '_Uwd',\r\n shape=[self.input_depth,self.input_depth])\r\n\r\n self._Bwd = self.add_variable(\r\n 'Bwd',\r\n shape=[self.input_depth],\r\n initializer=tf.zeros_initializer(dtype=tf.float32)\r\n ) \r\n self._Bud = self.add_variable(\r\n 'Bud',\r\n shape=[self.input_depth],\r\n initializer=tf.zeros_initializer(dtype=tf.float32)\r\n ) \r\n\r\n self._v2 = self.add_variable(\r\n 'v2',\r\n shape=[self.input_depth,1]) \r\n\r\n self._Wy = self.add_variable(\r\n '_Wy',\r\n shape=[self.input_depth+1,1])\r\n\r\n self._By = self.add_variable(\r\n 'By',\r\n shape=[1],\r\n initializer=tf.zeros_initializer(dtype=tf.float32)\r\n ) \r\n\r\n print(self._Wd.name)\r\n\r\n\r\n\r\n self.built = True\r\n \r\n \r\n def __call__(self,inputs,state,scope=None):\r\n \"\"\"Long short-term memory cell (LSTM).\"\"\"\r\n with tf.variable_scope(scope or type(self).__name__): # \"BasicLSTMCell\"\r\n # Parameters of gates are concatenated into one multiply for efficiency.\r\n if self._state_is_tuple:\r\n c, h = state\r\n else:\r\n c, h, h2 = tf.split(state,3,1) \r\n\r\n tempE = tf.matmul(tf.concat([h,h2],1),self._We)+self._Bwe \r\n alphas=[]\r\n for inp in self.CNNinputs_for_IA:\r\n tempU = tf.matmul(tf.squeeze(inp),self._Ue)+self._Bue\r\n tempF = tf.tanh(tempE+tempU)\r\n \r\n alpha = tf.matmul(tempF,self._v1)\r\n alphas.append(alpha)\r\n \r\n alphas = tf.concat(alphas,1)\r\n alphas = tf.nn.softmax(alphas)\r\n alphas = tf.reshape(alphas,[-1,1,self.input_depth])\r\n Xinputs = self.inputs*alphas \r\n\r\n\r\n tempE = tf.matmul(tf.concat([h,h2],1),self._Wd)+self._Bwd\r\n alphas=[]\r\n for inp in self.CNNinputs_for_TA:\r\n tempU = tf.matmul(tf.squeeze(inp),self._Ud)+self._Bud\r\n tempF = tf.tanh(tempE+tempU)\r\n \r\n alpha = tf.matmul(tempF,self._v2)\r\n alphas.append(alpha)\r\n alphas = tf.concat(alphas,1)\r\n alphas = tf.reshape(tf.nn.softmax(alphas),[-1,self.timesteps,1])\r\n\r\n context = tf.multiply(alphas,Xinputs)\r\n context = tf.squeeze(tf.reduce_sum(context,axis=1)) \r\n\r\n gate_inputs = tf.matmul(\r\n tf.concat([inputs,context, h], 1), self._kernel)\r\n gate_inputs = tf.add(gate_inputs, self._bias)\r\n\r\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\r\n i, j, f, o = tf.split(gate_inputs, 4, 1)\r\n\r\n old_h = h\r\n new_c = (c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) *\r\n self._activation(j))\r\n new_h = self._activation(new_c) * tf.sigmoid(o)\r\n\r\n if self._state_is_tuple:\r\n new_state = rnn.LSTMStateTuple(new_c, new_h)\r\n else:\r\n new_state = tf.concat([new_c, new_h, old_h],1)\r\n return new_h, new_state\r\n\r\n\"\"\"\r\nimport numpy as np\r\n\r\ninputs = tf.constant([1,2,3,4,5,6,7,8,9,10]*10,shape=[2,5,10],dtype=tf.float32)\r\nsoftmax= tf.nn.softmax(tf.ones([2,10]))\r\nsoftmax=tf.reshape(softmax,[-1,1,10])\r\n#y= tf.ones([2,5,1],dtype=tf.float32)\r\n\r\n#DAL = DualAttnLayer(16,10,5,inputs,y,2)\r\n#result = DAL()\r\n\r\nresult=inputs*softmax\r\n\r\nsess=tf.Session()\r\nsess.run(tf.global_variables_initializer())\r\nprint(sess.run(inputs))\r\nprint(sess.run(softmax))\r\nprint(sess.run([result,tf.shape(result)]))\r\n\"\"\"","repo_name":"birdomi/project","sub_path":"AttnAttn1.py","file_name":"AttnAttn1.py","file_ext":"py","file_size_in_byte":7541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"11308514507","text":"from flask import Flask\nfrom flask.ext.assets import Environment\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom rauth.service import OAuth2Service\nimport os\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\nSECRET_KEY = 'MY_SECRET'\nFACEBOOK_APP_ID = '595127900566216'\nFACEBOOK_APP_SECRET = 'APP_SECRET'\n\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, '../app.db')\n\napp = Flask(__name__)\nwa = Environment(app)\napp.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI\ndb = SQLAlchemy(app)\napp.secret_key = SECRET_KEY\n\nfacebook = OAuth2Service(\n name='facebook',\n base_url='https://graph.facebook.com/',\n access_token_url='https://graph.facebook.com/oauth/access_token',\n authorize_url='https://www.facebook.com/dialog/oauth',\n client_id=FACEBOOK_APP_ID,\n client_secret=FACEBOOK_APP_SECRET\n)\n\nfrom . import assets, requests, models\n","repo_name":"luizdepra/sketch_n_hit","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"28761546368","text":"'''\nCreated on Jun 13, 2014\n\n@author: lzrak47\n'''\n\nimport binascii\nimport os\nimport zlib\n\nfrom utils import cal_sha1\n\n\nclass BaseObject(object):\n '''\n git base object\n '''\n def __init__(self, workspace, content):\n '''\n Constructor\n '''\n self.content = zlib.compress(content)\n self.sha1 = cal_sha1(content)\n self.path = os.path.join(workspace, '.git', 'objects', self.sha1[:2], self.sha1[2:])\n\nclass Blob(BaseObject):\n \n def __init__(self, workspace, content):\n real_content = 'blob %d\\0%s' % (len(content), content) \n super(Blob, self).__init__(workspace, real_content)\n\nclass Tree(BaseObject):\n def __init__(self, workspace, args):\n content = ''\n for arg in args:\n content += '%04o %s\\0%s' % (arg['mode'], arg['name'], binascii.unhexlify(arg['sha1']))\n real_content = 'tree %d\\0%s' % (len(content), content)\n super(Tree, self).__init__(workspace, real_content)\n\n\nclass Commit(BaseObject):\n def __init__(self, workspace, **kwargs):\n content = 'tree %s\\n' % (kwargs['tree_sha1'])\n if kwargs['parent_sha1']:\n content += 'parent %s\\n' % (kwargs['parent_sha1'])\n \n content += 'author %s %s %s %s\\ncommitter %s %s %s %s\\n\\n%s\\n' \\\n % (kwargs['name'], kwargs['email'], kwargs['timestamp'], kwargs['timezone'] , \\\n kwargs['name'], kwargs['email'], kwargs['timestamp'], kwargs['timezone'] , kwargs['msg'])\n \n real_content = 'commit %d\\0%s' % (len(content), content)\n super(Commit, self).__init__(workspace, real_content)\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","repo_name":"Nginlion/git-in-python","sub_path":"src/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"27317414577","text":"from typing import List\nfrom ..base import ParameterSetting, ParserSetting\n\nclass MpptSrneSetting(ParameterSetting) :\n \"\"\"\n Parameter setting class for epever\n \"\"\"\n def __init__(self) -> None:\n \"\"\"\n ParameterSetting Object with each member default value :\n id : default 0\n capacity : 100 - 3000 Ah (default 56 * 16 = 896)\n systemVoltage : 12336 (Hi Byte 48V, Lo Byte 48V)\n batteryType : 0 = User, 1 = Flooded, 2 = Sealed, 3 = Gel (default 0)\n overvoltageThreshold : default 5570\n chargingLimitVoltage : default 5520\n equalizeChargingVoltage : default 5470\n boostChargingVoltage : default 5470\n floatChargingVoltage : default 5470\n boostReconnectVoltage : default 5370\n overdischargeRecoveryVoltage : default 4900\n underVoltageWarning : default 4800\n overdischargeVoltage : default 4700\n dischargingLimitVoltage : default 4600\n charge\n overdischargeTimeDelay : default 5\n equalizingChargingTime : default 0\n boostChargingTime : default 120\n equalizingChargingInterval : default 0\n tempCompensation : 0 - 5 (default 0)\n \"\"\"\n super().__init__()\n self.__id = 0\n self.__capacity = 896\n self.__systemVoltage = 12336\n self.__batteryType = 0\n self.__overvoltageThreshold = 5570\n self.__chargingLimitVoltage = 5520\n self.__equalizeChargingVoltage = 5470\n self.__boostChargingVoltage = 5470\n self.__floatChargingVoltage = 5470\n self.__boostReconnectVoltage = 5370\n self.__overdischargeRecoveryVoltage = 4950\n self.__underVoltageWarning = 4800\n self.__overdischargeVoltage = 4700\n self.__dischargingLimitVoltage = 4600\n self.__chargeDischargeSoc = 25650\n self.__overdischargeTimeDelay = 5\n self.__equalizingChargingTime = 120\n self.__boostChargingTime = 120\n self.__equalizingChargingInterval = 120\n self.__tempCompensation = 0\n self.__paramList : List[int] = []\n\n def __eq__(self, other): \n if not isinstance(other, MpptSrneSetting):\n # don't attempt to compare against unrelated types\n print(\"Error : Attempted to compare between different type, returning False\")\n return NotImplemented\n\n if self.id == other.id \\\n and self.capacity == other.capacity \\\n and self.systemVoltage == other.systemVoltage \\\n and self.batteryType == other.batteryType \\\n and self.overvoltageThreshold == other.overvoltageThreshold \\\n and self.chargingLimitVoltage == other.chargingLimitVoltage \\\n and self.equalizeChargingVoltage == other.equalizeChargingVoltage \\\n and self.boostChargingVoltage == other.boostChargingVoltage \\\n and self.floatChargingVoltage == other.floatChargingVoltage \\\n and self.boostReconnectVoltage == other.boostReconnectVoltage \\\n and self.overdischargeRecoveryVoltage == other.overdischargeRecoveryVoltage \\\n and self.underVoltageWarning == other.underVoltageWarning \\\n and self.overdischargeVoltage == other.overdischargeVoltage \\\n and self.dischargingLimitVoltage == other.dischargingLimitVoltage \\\n and self.overdischargeTimeDelay == other.overdischargeTimeDelay \\\n and self.equalizingChargingTime == other.equalizingChargingTime \\\n and self.boostChargingTime == other.boostChargingTime \\\n and self.equalizingChargingInterval == other.equalizingChargingInterval \\\n and self.tempCompensation == other.tempCompensation :\n return True\n else :\n return False\n\n def printContainer(self) :\n \"\"\"\n Print each member value\n \"\"\"\n print (\"Id :\", self.__id)\n print (\"Capacity :\", self.__capacity)\n print (\"System voltage :\", self.__systemVoltage >> 8)\n print (\"Recognized voltage :\", (self.__systemVoltage & 0xff))\n print (\"Battery type :\", self.__batteryType)\n print (\"Overvoltage threshold :\", self.__overvoltageThreshold)\n print (\"Charging limit voltage :\", self.__chargingLimitVoltage)\n print (\"Equalize charging voltage :\", self.__equalizeChargingVoltage)\n print (\"Boost charging voltage :\", self.__boostChargingVoltage)\n print (\"Float charging voltage :\", self.__floatChargingVoltage)\n print (\"Boost reconnect voltage :\", self.__boostReconnectVoltage)\n print (\"Overdischarge recovery voltage :\", self.__overdischargeRecoveryVoltage)\n print (\"Undervoltage warning :\", self.__underVoltageWarning)\n print (\"Overdischarge voltage :\", self.__overdischargeVoltage)\n print (\"Discharging limit voltage :\", self.__dischargingLimitVoltage)\n print (\"Charge discharge soc :\", self.__chargeDischargeSoc)\n print (\"Overdischarge time delay :\", self.__overdischargeTimeDelay)\n print (\"Equalizing charging time :\", self.__equalizingChargingTime)\n print (\"Boost charging time :\", self.__boostChargingTime)\n print (\"Equalizing charging interval :\", self.__equalizingChargingInterval)\n print (\"Temperature compensation :\", self.__tempCompensation)\n \n def getListParam(self) -> List[int]:\n value : List[int] = [\n self.__id,\n self.__capacity,\n self.__systemVoltage,\n self.__batteryType,\n self.__overvoltageThreshold,\n self.__chargingLimitVoltage,\n self.__equalizeChargingVoltage,\n self.__boostChargingVoltage,\n self.__floatChargingVoltage,\n self.__boostReconnectVoltage,\n self.__overdischargeRecoveryVoltage,\n self.__underVoltageWarning,\n self.__overdischargeVoltage,\n self.__dischargingLimitVoltage,\n self.__chargeDischargeSoc,\n self.__overdischargeTimeDelay,\n self.__equalizingChargingTime,\n self.__boostChargingTime,\n self.__equalizingChargingInterval,\n self.__tempCompensation\n ]\n return value\n\n def setParam(self, registerList : List[int]) -> int:\n \"\"\"\n Set each member parameter, only valid if the received list length is 19\n\n Args :\n registerList (list) : a list of integer value, received from register modbus\n \"\"\"\n length = len(registerList)\n if (length == 19) :\n self.__capacity = registerList[0]\n self.__systemVoltage = registerList[1]\n self.__batteryType = registerList[2]\n self.__overvoltageThreshold = registerList[3]\n self.__chargingLimitVoltage = registerList[4]\n self.__equalizeChargingVoltage = registerList[5]\n self.__boostChargingVoltage = registerList[6]\n self.__floatChargingVoltage = registerList[7]\n self.__boostReconnectVoltage = registerList[8]\n self.__overdischargeRecoveryVoltage = registerList[9]\n self.__underVoltageWarning = registerList[10]\n self.__overdischargeVoltage = registerList[11]\n self.__dischargingLimitVoltage = registerList[12]\n self.__chargeDischargeSoc = registerList[13]\n self.__overdischargeTimeDelay = registerList[14]\n self.__equalizingChargingTime = registerList[15]\n self.__boostChargingTime = registerList[16]\n self.__equalizingChargingInterval = registerList[17]\n self.__tempCompensation = registerList[18]\n self.__paramList = registerList.copy()\n return 1\n return -1\n\n @property\n def id(self) -> int :\n return self.__id\n \n @id.setter\n def id(self, val : int) :\n self.__id = val\n\n @property\n def batteryType(self) -> int:\n return self.__batteryType\n \n @batteryType.setter\n def batteryType(self, val : int) :\n self.__batteryType = val\n\n @property\n def capacity(self) -> int :\n return self.__capacity\n \n @capacity.setter\n def capacity(self, val : int) :\n self.__capacity = val\n\n @property\n def systemVoltage(self) -> int :\n return self.__systemVoltage\n \n @systemVoltage.setter\n def systemVoltage(self, val : int) :\n self.__systemVoltage = val\n\n @property\n def tempCompensation(self) -> int :\n return self.__tempCompensation\n \n @tempCompensation.setter\n def tempCompensation(self, val : int) :\n self.__tempCompensation = val\n\n @property\n def overvoltageThreshold(self) -> int:\n return self.__overvoltageThreshold\n \n @overvoltageThreshold.setter\n def overvoltageThreshold(self, val : int) :\n self.__overvoltageThreshold = val\n\n @property\n def chargingLimitVoltage(self) -> int :\n return self.__chargingLimitVoltage\n \n @chargingLimitVoltage.setter\n def chargingLimitVoltage(self, val : int) :\n self.__chargingLimitVoltage = val\n\n @property\n def equalizeChargingVoltage(self) -> int:\n return self.__equalizeChargingVoltage\n \n @equalizeChargingVoltage.setter\n def equalizeChargingVoltage(self, val : int):\n self.__equalizeChargingVoltage = val\n\n @property\n def boostChargingVoltage(self) -> int :\n return self.__boostChargingVoltage\n \n @boostChargingVoltage.setter\n def boostChargingVoltage(self, val : int):\n self.__boostChargingVoltage = val\n\n @property\n def floatChargingVoltage(self) -> int :\n return self.__floatChargingVoltage\n \n @floatChargingVoltage.setter\n def floatChargingVoltage(self, val : int) :\n self.__floatChargingVoltage = val\n\n @property\n def boostReconnectVoltage(self) -> int :\n return self.__boostReconnectVoltage\n \n @boostReconnectVoltage.setter\n def boostReconnectVoltage(self, val : int) :\n self.__boostReconnectVoltage = val\n\n @property\n def overdischargeRecoveryVoltage(self) -> int :\n return self.__overdischargeRecoveryVoltage\n \n @overdischargeRecoveryVoltage.setter\n def overdischargeRecoveryVoltage(self,val : int):\n self.__overdischargeRecoveryVoltage = val\n\n @property\n def underVoltageWarning(self) -> int :\n return self.__underVoltageWarning\n \n @underVoltageWarning.setter\n def underVoltageWarning(self, val : int) :\n self.__underVoltageWarning = val\n\n @property\n def overdischargeVoltage(self) -> int :\n return self.__overdischargeVoltage\n \n @overdischargeVoltage.setter\n def overdischargeVoltage(self, val : int) :\n self.__overdischargeVoltage = val\n\n @property\n def dischargingLimitVoltage(self) -> int :\n return self.__dischargingLimitVoltage\n \n @dischargingLimitVoltage.setter\n def dischargingLimitVoltage(self, val : int) :\n self.__dischargingLimitVoltage = val\n\n @property\n def chargeDischargeSoc(self) -> int :\n return self.__chargeDischargeSoc\n\n @property\n def overdischargeTimeDelay(self) -> int :\n return self.__overdischargeTimeDelay\n \n @overdischargeTimeDelay.setter\n def overdischargeTimeDelay(self, val : int) :\n self.__overdischargeTimeDelay = val\n\n @property\n def equalizingChargingTime(self) -> int :\n return self.__equalizingChargingTime\n \n @equalizingChargingTime.setter\n def equalizingChargingTime(self, val : int) :\n self.__equalizingChargingTime = val\n\n @property\n def boostChargingTime(self) -> int :\n return self.__boostChargingTime\n \n @boostChargingTime.setter\n def boostChargingTime(self, val : int) :\n self.__boostChargingTime = val\n\n @property\n def equalizingChargingInterval(self) -> int :\n return self.__equalizingChargingInterval\n \n @equalizingChargingInterval.setter\n def equalizingChargingInterval(self, val : int) :\n self.__equalizingChargingInterval = val\n\n @property\n def getParamList(self) -> List[int] :\n return self.__paramList.copy()\n\nclass SrneParserSetting(ParserSetting) :\n \"\"\"\n Parser class for epever json setting\n \"\"\"\n def __init__(self) -> None:\n super().__init__()\n\n def parse(self, val : dict) -> List[MpptSrneSetting] :\n \"\"\"\n Parse json file from register_config.json into list of ParameterSetting\n\n Args :\n val (dict) : dictionary of register_config\n\n Returns :\n List[ParameterSetting] : list of ParameterSetting\n \"\"\"\n deviceList : List[dict] = val['device']\n paramList : List[MpptSrneSetting] = []\n for a in deviceList :\n p = MpptSrneSetting()\n p.id = a['slave']\n p.capacity = a['parameter']['battery_capacity']\n p.systemVoltage = a['parameter']['system_voltage']\n p.batteryType = a['parameter']['battery_type']\n p.overvoltageThreshold = a['parameter']['overvoltage_threshold']\n p.chargingLimitVoltage = a['parameter']['charging_limit_voltage']\n p.equalizeChargingVoltage = a['parameter']['equalizing_charge_voltage']\n p.boostChargingVoltage = a['parameter']['boost_charging_voltage']\n p.floatChargingVoltage = a['parameter']['floating_charging_voltage']\n p.boostReconnectVoltage = a['parameter']['boost_charging_recovery_voltage']\n p.overdischargeRecoveryVoltage = a['parameter']['overdischarge_recovery_voltage']\n p.underVoltageWarning = a['parameter']['undervoltage_warning_level']\n p.overdischargeVoltage = a['parameter']['overdischarge_voltage']\n p.dischargingLimitVoltage = a['parameter']['discharging_limit_voltage']\n p.overdischargeTimeDelay = a['parameter']['overdischarge_time_delay']\n p.equalizingChargingTime = a['parameter']['equalizing_charging_time']\n p.boostChargingTime = a['parameter']['boost_charging_time']\n p.equalizingChargingInterval = a['parameter']['equalizing_charging_interval']\n p.tempCompensation = a['parameter']['temperature_comp']\n paramList.append(p)\n return paramList","repo_name":"thomsardi/mppt-joulestore","sub_path":"mppt/mpptsrne/mppt_srne_utils.py","file_name":"mppt_srne_utils.py","file_ext":"py","file_size_in_byte":14152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"33327869786","text":"import os\r\nimport sqlite3\r\nos.chdir(os.getcwd())\r\nchek = 0\r\ndef dbank():\r\n if chek == 0:\r\n dbanq = input \\\r\n (\"Do you want to create a new drug bank ? Doing so will clear any existing database.. y for yes..n for no?\\n\")\r\n else:\r\n dbanq = input(\"Drug bank not found..do you want to create one..y for yes..n for no?\\n\")\r\n while dbanq.lower() != \"y\" and dbanq.lower() != \"n\":\r\n print(\"Enter a valid response y or n !!!\")\r\n if chek == 0:\r\n dbanq = input \\\r\n (\"Do you want to create a new drug bank ? Doing so will erase any previous drug bank record. y for yes..n for no?\\n\")\r\n else:\r\n dbanq = input(\"Drug bank not found..do you want to create one..y for yes..n for no?\\n\")\r\n\r\n if dbanq.lower( )== \"y\":\r\n print(\"Please wait a few seconds while your drug bank file is being created..\")\r\n index = 0\r\n\r\n with open(\"drug_list.csv\") as pr:\r\n creadb = sqlite3.connect(\"drugdb.db\")\r\n c = creadb.cursor()\r\n c.execute(\"CREATE TABLE IF NOT EXISTS drugdb(No INTEGER,drugs TEXT,Shelf_no TEXT)\")\r\n clear = \"DELETE FROM drugdb\"\r\n c.execute(clear)\r\n for drug in pr:\r\n index = index + 1\r\n drug = drug.replace(\",\", \"\")\r\n drug = drug.strip('\\n')\r\n shelf = \"\"\r\n\r\n\r\n c.execute(\"INSERT INTO drugdb VALUES(?,?,?)\",(index,drug,shelf))\r\n creadb.commit()\r\n\r\n elif dbanq.lower() == \"n\":\r\n print(\"Okay\")\r\n return \"nothing\"\r\ndbank()\r\nwhile True:\r\n chek = 1\r\n\r\n if os.path.isfile(\"drugdb.db\"):\r\n\r\n try:\r\n creadb = sqlite3.connect(\"drugdb.db\")\r\n c=creadb.cursor()\r\n except:\r\n print(\"Something went wrong in file opening\")\r\n chek=0\r\n dbank()\r\n continue\r\n question = \"\"\r\n while question != \"1\" and question != \"2\" and question != \"3\" :\r\n question=input(\"Input '1' for drug location, '2' to know the drugs in any location and '3' to add a drug to the database.\\n\")\r\n if question == \"1\":\r\n drugloc = []\r\n dimp = input(\"Input drug\\n\")\r\n\r\n c.execute(\"SELECT No,drugs FROM drugdb WHERE drugs LIKE ?\",('%'+dimp+'%',))\r\n drugloc = c.fetchall()[:]\r\n\r\n for row in drugloc:\r\n print(\"No:\",row[0],\"Drug:\",row[1])\r\n\r\n\r\n\r\n\r\n\r\n if len(drugloc) > 0:\r\n numput = \"\"\r\n w = 0\r\n while w == 0:\r\n try:\r\n numput = int(input(\"Enter corresponding valid number to get drug location.\\n\"))\r\n except:\r\n ValueError\r\n continue\r\n\r\n w=1\r\n\r\n c.execute(\"SELECT * FROM drugdb WHERE No == ?\",(numput,))\r\n locshow = c.fetchall()[:]\r\n print(\"The drug\",locshow[0][1],\"is at Location :\",locshow[0][2])\r\n chput = \"\"\r\n while chput.lower() != \"y\" and chput.lower() != \"n\":\r\n\r\n chput = input(\"Do you want to edit the location? y for yes..n for no \\n\")\r\n if chput.lower() == \"y\":\r\n location = input(\"Enter the shelf_no\\n\")\r\n c.execute(\"UPDATE drugdb SET shelf_no = ? WHERE No == ?\",(location,numput))\r\n creadb.commit()\r\n c.execute(\"SELECT * FROM drugdb WHERE No == ?\", (numput,))\r\n change=c.fetchall()[:]\r\n \r\n print(\"DRUG:\",change[0][1] ,\"is at\" ,\"SHELF\",change[0][2] )\r\n\r\n\r\n elif chput.lower() == \"n\":\r\n pass\r\n elif len(drugloc) == 0:\r\n print('Drug not found')\r\n elif question == \"2\":\r\n shelf = input(\"Input shelf location\\n\")\r\n c.execute(\"SELECT * FROM drugdb WHERE Shelf_no == ?\",(shelf,))\r\n lhold = c.fetchall()[:]\r\n print(\"The drugs in shelf\",shelf,'are :')\r\n for row in lhold:\r\n print(row[1])\r\n\r\n elif question == \"3\":\r\n new_drug = input(\"Enter the name of the new drug\\n\")\r\n new_drug = new_drug.upper()\r\n shelf = input(\"Enter the shelf number\\n\")\r\n c.execute(\"SELECT * FROM drugdb\")\r\n index = len(c.fetchall()) + 1\r\n c.execute(\"INSERT INTO drugdb VALUES(?,?,?)\",(index,new_drug,shelf))\r\n creadb.commit()\r\n \r\n else:\r\n resp = dbank()\r\n if resp == \"nothing\":\r\n break\r\n\r\n\r\n","repo_name":"frank-ezenwanne/simple_python_projects","sub_path":"Drug_search_location_bank/drug_search_app.py","file_name":"drug_search_app.py","file_ext":"py","file_size_in_byte":4651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"15192452086","text":"import requests\nimport matplotlib.pyplot as plt\nimport io\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nfrom flask import Flask, Response\n\napp = Flask(__name__)\n\ndef getResponse(buildno):\n build = f'\"build\":\"{buildno}\"'\n files = {\n 'docs': (None, '[{'+build+', \"suite\":\".*JCK.*\"}]'),\n }\n try:\n response = requests.post('http://onega:8083/AutoRedGreen/_getstaticresults', files=files)\n return response.json()\n except:\n response = {'ok': 0}\n return response\n\ndef checkTP(kfdb_status, evaluation_status):\n if ((evaluation_status.lower() != 'unexpected')\n and ((kfdb_status.lower() == evaluation_status.lower())\n or (kfdb_status.lower() == 'flaky' and evaluation_status.endswith('_new'))\n or (kfdb_status.lower() == 'known') and evaluation_status.lower != 'unexpected_new')):\n return True\n else:\n return False\n\n\ndef checkTN(kfdb_status, evaluation_status):\n if ((\n (kfdb_status.lower() == 'unexpected' or kfdb_status.lower() == 'recurrence_unexpected')\n and (evaluation_status.lower() == 'unexpected' or evaluation_status.endswith('_new'))\n )):\n return True\n else:\n return False\n\n\ndef checkFP(kfdb_status, evaluation_status):\n if (\n (kfdb_status.lower() == 'waived' or kfdb_status.lower() == 'flaky')\n and (kfdb_status.lower() != evaluation_status.lower() and not evaluation_status.endswith('_new'))\n ):\n\n return True\n else:\n return False\n\n\ndef checkFN(kfdb_status, evaluation_status):\n if (\n (kfdb_status.lower() == 'unexpected' or kfdb_status.lower() == 'recurrence_unexpected')\n and ( evaluation_status.lower() != 'unexpected' and not evaluation_status.endswith('_new') )\n ):\n return True\n else:\n return False\n\ndef precision(cm):\n '''\n precision = True Positive/Actual Results\n precision = TP/TP+FP\n '''\n actual_result = cm['tp'] + cm['fp']\n try:\n precision = cm['tp'] / actual_result\n except:\n precision = 0\n return format(precision, '.3f')\n\n\ndef recall(cm):\n '''\n recall = True Positive/Predicted Results\n recall = TP/TP+FN\n '''\n predicted_result = cm['tp'] + cm['fn']\n try:\n recall = cm['tp'] / predicted_result\n except:\n recall = 0\n return format(recall, '.3f')\n\ndef cal(json_results):\n gold = {'tp': 0, 'tn': 0, 'fp': 0, 'fn': 0, 'unaccounted': 0}\n silver = {'tp': 0, 'tn': 0, 'fp': 0, 'fn': 0, 'unaccounted': 0}\n if json_results['ok'] == 1:\n for result in json_results['results']:\n try:\n fn = checkFN(result[\"kfdb_status\"], result[\"evaluation_status\"])\n fp = checkFP(result[\"kfdb_status\"], result[\"evaluation_status\"])\n tn = checkTN(result[\"kfdb_status\"], result[\"evaluation_status\"])\n tp = checkTP(result[\"kfdb_status\"], result[\"evaluation_status\"])\n if tp:\n if result[\"cycle\"] == 'Silver':\n silver['tp'] += 1\n elif result[\"cycle\"] == 'Gold':\n gold['tp'] += 1\n elif tn:\n if result[\"cycle\"] == 'Silver':\n silver['tn'] += 1\n elif result[\"cycle\"] == 'Gold':\n gold['tn'] += 1\n elif fp:\n if result[\"cycle\"] == 'Silver':\n silver['fp'] += 1\n elif result[\"cycle\"] == 'Gold':\n gold['fp'] += 1\n elif fn:\n if result[\"cycle\"] == 'Silver':\n silver['fn'] += 1\n elif result[\"cycle\"] == 'Gold':\n gold['fn'] += 1\n else:\n if result[\"cycle\"] == 'Silver':\n silver['unaccounted'] += 1\n elif result[\"cycle\"] == 'Gold':\n gold['unaccounted'] += 1\n except:\n pass\n else:\n pass\n\n precision_gold = precision(gold)\n recall_gold = recall(gold)\n\n\n precision_silver = precision(silver)\n recall_silver = recall(silver)\n\n return {'gold': {'precision' : precision_gold,\n 'recall': recall_gold,\n 'confusionMatrix': gold},\n 'silver': {'precision': precision_silver,\n 'recall': recall_silver,\n 'confusionMatrix': silver},\n }\n\ndef create_figure(builds, gold_precision, gold_recall, silver_precision, silver_recall):\n fig = Figure(figsize=(20, 20))\n fig.suptitle('Precision Recall Martix', fontsize=16)\n grid = plt.GridSpec(4, 1, hspace=0.8, wspace=5)\n\n axis = fig.add_subplot(grid[0:1, :])\n axis2 = fig.add_subplot(grid[1:2, :])\n axis3 = fig.add_subplot(grid[2:3, :])\n axis4 = fig.add_subplot(grid[3:4, :])\n\n axis.grid()\n axis2.grid()\n axis3.grid()\n axis4.grid()\n\n axis.title.set_text('Gold Precision')\n axis2.title.set_text('Gold Recall')\n axis3.title.set_text('Silver Precision')\n axis4.title.set_text('Silver Recall')\n\n axis.plot(builds, gold_precision)\n axis2.plot(builds, gold_recall)\n axis3.plot(builds, silver_precision)\n axis4.plot(builds, silver_recall)\n\n return fig\n\ndef getArray(results):\n # return {'rr':results}\n builds = []\n gold_precision = []\n gold_recall = []\n silver_precision = []\n silver_recall = []\n for result in results:\n builds.append(list(result.keys())[0])\n gold_precision.append((result[builds[-1]]['gold']['precision']))\n gold_recall.append((result[builds[-1]]['gold']['recall']))\n silver_precision.append((result[builds[-1]]['silver']['precision']))\n silver_recall.append((result[builds[-1]]['silver']['recall']))\n return (builds, gold_precision, gold_recall, silver_precision, silver_recall)\n\ndef getArrayOf_CM(results):\n builds = []\n gold_tp = []\n gold_tn = []\n gold_fp = []\n gold_fn = []\n silver_tp = []\n silver_tn = []\n silver_fp = []\n silver_fn = []\n for result in results:\n builds.append(list(result.keys())[0])\n\n gold_confusionMatrix = (result[builds[-1]]['gold']['confusionMatrix'])\n silver_confusioMatrix = (result[builds[-1]]['silver']['confusionMatrix'])\n\n gold_tp.append(gold_confusionMatrix['tp'])\n gold_tn.append(gold_confusionMatrix['tn'])\n gold_fp.append(gold_confusionMatrix['fp'])\n gold_fn.append(gold_confusionMatrix['fn'])\n\n silver_tp.append(silver_confusioMatrix['tp'])\n silver_tn.append(silver_confusioMatrix['tn'])\n silver_fp.append(silver_confusioMatrix['fp'])\n silver_fn.append(silver_confusioMatrix['fn'])\n\n return (builds, gold_tp, gold_tn, gold_fp, gold_fn, silver_tp, silver_tn, silver_fp, silver_fn)\n\n@app.route('/', defaults={'str1': '0', 'str2': '0'}, methods=['GET', 'POST'])\n@app.route('/matrix', defaults={'str1': '0', 'str2': '0'}, methods=['GET', 'POST'])\n@app.route('/matrix/', defaults={'str1': '0', 'str2': '0'}, methods=['GET', 'POST'])\n@app.route('/matrix///', methods=['GET', 'POST'])\ndef matrix(str1, str2):\n results = []\n\n for build in range(int(str1), int(str2)):\n json_results = getResponse(build)\n result = cal(json_results)\n results.append({build: result})\n return {'result': results}\n\n@app.route('/aggregate', defaults={'str1': '0', 'str2': '0'}, methods=['GET', 'POST'])\n@app.route('/aggregate/', defaults={'str1': '0', 'str2': '0'}, methods=['GET', 'POST'])\n@app.route('/aggregate///', methods=['GET', 'POST'])\ndef aggregate(str1, str2):\n results = []\n for build in range(int(str1), int(str2)):\n json_results = getResponse(build)\n result = cal(json_results)\n results.append({build: result})\n (builds, gold_tp, gold_tn, gold_fp, gold_fn, silver_tp, silver_tn, silver_fp, silver_fn) = getArrayOf_CM(results)\n\n gold_tps = sum(gold_tp)\n gold_tns = sum(gold_tn)\n gold_fps = sum(gold_fp)\n gold_fns = sum(gold_fn)\n\n silver_tps = sum(silver_tp)\n silver_tns = sum(silver_tn)\n silver_fps = sum(silver_fp)\n silver_fns = sum(silver_fn)\n\n gold_actual_result = gold_tps + gold_fps\n silver_actual_result = silver_tps + silver_fps\n\n gold_pred_result = gold_tps + gold_fns\n silver_pred_result = silver_tps + silver_fns\n gold_agg_precision = silver_agg_precision = gold_agg_recall = silver_agg_recall = 0\n try:\n gold_agg_precision = gold_tps/gold_actual_result\n silver_agg_precision = silver_tps / silver_actual_result\n\n gold_agg_recall = gold_tps / gold_pred_result\n silver_agg_recall = silver_tps / silver_pred_result\n\n except:\n pass\n\n return {'gold_agg_precision': format(gold_agg_precision, '.3f'),\n 'silver_agg_precision': format(silver_agg_precision, '.3f'),\n 'gold_agg_recall': format(gold_agg_recall, '.3f'),\n 'silver_agg_recall': format(silver_agg_recall, '.3f'),\n\n 'gold_tps': format(gold_tps, '.3f'),\n 'gold_tns': format(gold_tns, '.3f'),\n 'gold_fps': format(gold_fps, '.3f'),\n 'gold_fns': format(gold_fns, '.3f'),\n\n 'silver_tps': format(silver_tps, '.3f'),\n 'silver_tns': format(silver_tns, '.3f'),\n 'silver_fps': format(silver_fps, '.3f'),\n 'silver_fns': format(silver_fns, '.3f'),\n\n }\n\n@app.route('/matrix/plot', defaults={'str1': '0', 'str2': '0'}, methods=['GET', 'POST'])\n@app.route('/matrix/plot/', defaults={'str1': '0', 'str2': '0'}, methods=['GET', 'POST'])\n@app.route('/matrix/plot///', methods=['GET', 'POST'])\ndef plot(str1, str2):\n results =[]\n for build in range(int(str1), int(str2)):\n json_results = getResponse(build)\n result = cal(json_results)\n results.append({build: result})\n (builds, gold_precision, gold_recall, silver_precision, silver_recall) = getArray(results)\n\n fig = create_figure(builds, gold_precision, gold_recall, silver_precision, silver_recall)\n output = io.BytesIO()\n FigureCanvas(fig).print_png(output)\n return Response(output.getvalue(), mimetype='image/png')\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='0.0.0.0', port=5002)","repo_name":"arunmastermind/red-green_comparisionAPI","sub_path":"ConfusionMatrixAPI/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"35983219987","text":"from numpy import arange\nfrom numpy import meshgrid\nfrom numpy.random import seed\nfrom numpy.random import rand\nfrom matplotlib import pyplot\n\n\n# objective function\ndef objective(x, y):\n return x ** 2.0 + y ** 2.0\n\n\nr_min, r_max = -5.0, 5.0\nxaxis = arange(r_min, r_max, 0.1)\nyaxis = arange(r_min, r_max, 0.1)\nx, y = meshgrid(xaxis, yaxis)\nresults = objective(x, y)\nseed(1)\nsample_x = r_min + rand(10) * (r_max - r_min)\nsample_y = r_min + rand(10) * (r_max - r_min)\npyplot.contourf(x, y, results, levels=50, cmap='jet')\noptima_x = [0.0, 0.0]\npyplot.plot([optima_x[0]], [optima_x[1]], '*', color='white')\npyplot.plot(sample_x, sample_y, 'o', color='black')\npyplot.show()","repo_name":"AbuBakkar32/Machine-Learning-Practice","sub_path":"Matplotlib/MathPlot V1.3/Coutour Plot 2.py","file_name":"Coutour Plot 2.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"71"} +{"seq_id":"72775173990","text":"\"\"\"\nTest suite for all utils in the `core` application\n\"\"\"\nfrom cms.api import Page\nfrom cms.test_utils.testcases import CMSTestCase\n\nfrom richie.apps.core.helpers import create_i18n_page\n\n\nclass PagesTests(CMSTestCase):\n \"\"\"Integration tests that actually render pages\"\"\"\n\n def test_pages_i18n(self):\n \"\"\"\n Create an i18n page and check its rendering on the site\n \"\"\"\n content = {\"fr\": \"Tableau de bord\", \"en\": \"Dashboard\"}\n create_i18n_page(\n content,\n is_homepage=True,\n published=True,\n template=\"richie/single_column.html\",\n )\n # Get the root page in french...\n root = Page.objects.get_home()\n response = self.client.get(root.get_absolute_url(\"fr\"))\n self.assertEqual(200, response.status_code)\n # ... and make sure the page menu is present in french on the page\n self.assertIn(content[\"fr\"], response.rendered_content)\n\n # Get the root page in english...\n response = self.client.get(root.get_absolute_url(\"en\"))\n self.assertEqual(200, response.status_code)\n # ... and make sure the page menu is present in english on the page\n self.assertIn(content[\"en\"], response.rendered_content)\n","repo_name":"mamilkew/explore-richie","sub_path":"tests/apps/core/test_pages.py","file_name":"test_pages.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"11078532956","text":"# -*- coding: utf-8 -*-\n\"\"\"\nFile Name: addStrings.py\nAuthor : jynnezhang\nDate: 2020/5/26 6:58 下午\nDescription:\n\"\"\"\n\n\nclass Solution:\n def addStrings(self, num1: str, num2: str) -> str:\n l1, l2 = len(num1), len(num2)\n if l1 < l2:\n num1, num2 = num2, num1\n l1, l2 = l2, l1\n num1 = [int(x) for x in num1]\n num2 = [int(x) for x in num2]\n num1, num2 = num1[::-1], num2[::-1]\n for i, digit in enumerate(num2):\n num1[i] += num2[i]\n\n num1 = self.CarrySolver(num1)\n num1 = num1[::-1]\n return \"\".join(str(x) for x in num1)\n\n def CarrySolver(self, nums):\n # 这个函数的功能是:将输入的数组中的每一位处理好进位\n # 举例:输入[15, 27, 12], 返回[5, 8, 4, 1]\n i = 0\n while i < len(nums):\n if nums[i] >= 10:\n carrier = nums[i] // 10\n if i == len(nums) - 1:\n nums.append(carrier)\n else:\n nums[i + 1] += carrier\n nums[i] %= 10\n i += 1\n\n return nums\n\n\nif __name__ == '__main__':\n print(Solution().addStrings(\"123\", \"496\"))\n\n\n","repo_name":"summer-vacation/AlgoExec","sub_path":"jianzhioffer/addStrings.py","file_name":"addStrings.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"2070992549","text":"#!--*--coding:utf-8--*--\n\nfrom form import equation\nfrom convert_to_draw import find_equation_with\nfrom copy import deepcopy\nfrom convert_to_draw import isOnEq\n\n# The silhouette is created with the list of points of the form in the good order\n\nclass silhouette:\n def __init__(self, list_pts_form):\n list_pts_form_cpy = [deepcopy(pt) for pt in list_pts_form]\n self.couples = creation_couples(list_pts_form_cpy)\n self.sort()\n del list_pts_form_cpy[-1]\n self.sommets = list_pts_form_cpy\n \n # Clean couples, join points on the same right\n def clean_couples(self):\n toRemove = []\n toAppend = []\n for couple in self.couples:\n if couple not in toRemove:\n for couple_test in self.couples:\n if couple != couple_test and couple_test not in toRemove:\n if couple[1] == couple_test[0]:\n if are_para(equation(couple[0], couple[1]), equation(couple_test[0], couple_test[1])):\n toRemove.append(couple)\n toRemove.append(couple_test)\n if [couple_test[1], couple[0]] not in toAppend:\n toAppend.append([couple[0], couple_test[1]])\n elif couple[0] == couple_test[1]:\n if are_para(equation(couple[1], couple[0]), equation(couple_test[1], couple_test[0])):\n toRemove.append(couple)\n toRemove.append(couple_test)\n if [couple[1], couple_test[0]] not in toAppend:\n toAppend.append([couple_test[0], couple[1]])\n \n else:\n equa_test = equation(couple_test[0], couple_test[1])\n equa = equation(couple[0], couple[1])\n if are_para(equa, equa_test):\n if isOnEq(couple[1], equa_test) and isOnEq(couple_test[1], equa):\n toRemove.append(couple)\n toRemove.append(couple_test)\n toAppend.append([couple[0], couple_test[1]])\n toAppend.append([couple_test[0], couple[1]])\n \n self.couples = [couple for couple in self.couples if couple not in toRemove]\n \n self.couples += toAppend\n\n #Remove a form from the silhouette\n def remove(self, forme, sommet):\n eq_sil = self.find_equation_with(sommet)\n eq_form = find_equation_with(forme, sommet, forme.forme.new_scale)\n\n for eq in eq_sil:\n for eq_test in eq_form:\n if are_para(eq, eq_test):\n if eq[-1] == eq_test[-1] and eq[-2] != eq_test[-2]:\n self.couples.append([eq[-2], eq_test[-2]])\n \n elif eq[-2] == eq_test[-2] and eq[-1] != eq_test[-1]:\n self.couples.append([eq_test[-1], eq[-1]])\n\n firstPoint = []\n secondPoint = []\n\n if len(forme.forme.sommets) == 3:\n for eq in eq_form:\n if sommet == eq[-1]:\n firstPoint = eq[-2]\n else:\n secondPoint = eq[-1]\n self.couples.append([firstPoint, secondPoint])\n else:\n for eq in forme.build_equations(forme.forme.new_scale):\n if sommet not in eq:\n self.couples.append([eq[-1], eq[-2]])\n\n self.couples = [couple for couple in self.couples if sommet not in couple]\n \n self.clean_couples()\n self.clean_couples()\n \n listTmp = []\n \n for couple in self.couples:\n if couple not in listTmp and couple[1] != couple[0]:\n listTmp.append(couple)\n \n self.couples = listTmp\n \n self.sort()\n \n self.sommets = []\n self.sommets = [couple[0] for couple in self.couples if couple[0] not in self.sommets]\n \n #Build equations of the silhouette\n def build_equations(self):\n equations = []\n\n for couple in self.couples:\n equations.append(equation(couple[0], couple[1]))\n \n return equations\n \n #Find equations associated with a given point\n def find_equation_with(self, sommet):\n equations = []\n for eq in self.build_equations():\n if sommet in eq:\n equations.append(eq)\n\n return equations\n \n #Test if a form complete the silhouette, in other words if the silhouette discribe the silhouette of the given form\n def complete(self, form):\n test = deepcopy(self.sommets)\n for sommet in form.get_sommets(form.forme.new_scale):\n if sommet in self.sommets:\n test.remove(sommet)\n return test == []\n\n #sort the couples form the little to the biggest\n def sort(self):\n self.couples.sort(key = lambda couple : (couple[1][0]-couple[0][0])**2 + (couple[1][1]-couple[0][1])**2, reverse = True)\n \n#Create coupes from list of ordonates points\ndef creation_couples(list_pts):\n list_cpl = []\n for i in range(len(list_pts)-1):\n list_cpl.append([list_pts[i], list_pts[i+1]])\n return list_cpl\n \n#Test if two equations are parallel\ndef are_para(eq, eq_test):\n if len(eq) == len(eq_test):\n if len(eq) == 3:\n return True\n \n else:\n return eq[0] == eq_test[0]\n\n return False","repo_name":"RodolphePonthon/IA41-Tangram","sub_path":"Silhouette.py","file_name":"Silhouette.py","file_ext":"py","file_size_in_byte":5679,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"6955844344","text":"def noRepeatSubstring(s):\n frequency = {}\n l = 0\n maxLength = 0\n\n for r, char in enumerate(s):\n if char in frequency:\n l = max(l, frequency[char] + 1)\n frequency[char] = r\n maxLength = max(maxLength, r - l + 1)\n return maxLength\n\n\ns = \"abccde\"\nprint(noRepeatSubstring(s))\n","repo_name":"kchensu/grokkin","sub_path":"Sliding Window/noRepeatSubstring.py","file_name":"noRepeatSubstring.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"10172134584","text":"import psycopg2\nfrom dataclasses import dataclass\nimport sys\nsys.path.insert(0, \"~/sibd/project\")\n\n@dataclass\nclass Action():\n description: str\n script: str\n parameters: list\n columns: list\n\n @property\n def query(self):\n return r\"={}&\".join(self.parameters) + \"={}\"\n\n\ndef connect_to_database(host, port, IST_ID, password, db_name):\n credentials = f\"host={host} port={port} user={IST_ID} password={password} dbname={db_name}\"\n connection = psycopg2.connect(credentials)\n\n return connection\n\n\ndef get_index_html():\n with open(\"/afs/.ist.utl.pt/users/6/5/ist193365/web/sibd/template.html\", \"r\") as f:\n return f.read()\n\n\ndef set_active_page(page):\n pages = dict(HOME=\"\", SAILORS=\"\", RESERVATIONS=\"\", TRIPS=\"\")\n pages[page.upper()] = \"active\"\n return pages\n\n\ndef print_html(body, header, active):\n html = get_index_html()\n active = set_active_page(active)\n\n print('Content-type:text/html\\n\\n')\n print(html.format(BODY=body, HEADER=header, **active))\n\n\ndef get_html_table(contents, header, actions=[]):\n table = '
\\n\\t\\n'\n table += '\\n\\n\\t\\n'\n\n for col in header:\n table += f'\\t\\t\\n'\n table += \"\\t\\n\"\n\n for row in contents:\n table += \"\\t\\n\"\n for value in row:\n table += f\"\\t\\t\\n\"\n\n for action in actions:\n href = action.script + \"?\" + \\\n action.query.format(*[row[i] for i in action.columns])\n\n table += f'\\t\\t\\n'\n\n table += \"\\t\\n\"\n\n table += \"
{col}
{value}{action.description}
\\n
\\n\"\n\n return table\n\n\ndef get_button(text, href, color=\"success\"):\n button = f'\\n\\t '\n button += text\n button += '\\n'\n\n return button\n\n\ndef get_form(entries=[], labels=[], types=[], action=[], radios=None, selects=None):\n form = f'\\n\\t
'\n\n if selects is not None:\n for select in selects:\n form += get_selects(**select)\n\n for entry, label, ftype in zip(entries, labels, types):\n form += '\\n\\t\\t
'\n form += f'\\n\\t\\t\\t'\n form += f'\\n\\t\\t\\t'\n form += '\\n\\t\\t
'\n\n if radios is not None:\n form += get_radios(**radios)\n\n\n form += '\\n\\t

'\n form += '\\n\\t
'\n return form\n\n\ndef get_radios(name, entries, labels, rtype=\"checkbox\"):\n radios = \"\"\n for n, (entry, label) in enumerate(zip(entries, labels)):\n radios += '\\n\\t
'\n radios += f'\\n\\t\\t'\n radios += f'\\n\\t\\t'\n radios += '\\n\\t
'\n return radios\n\ndef get_selects(name, label, options):\n dropdown = f'\\n\\t'\n dropdown += f'\\n\\t'\n return dropdown\n","repo_name":"bernardo-silva/SIBD-Project","sub_path":"part3/web/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"14917195240","text":"from hamcrest import (\n all_of,\n assert_that,\n empty,\n has_entries,\n has_entry,\n has_items,\n none,\n not_,\n)\n\nfrom . import confd\nfrom ..helpers import errors as e, fixtures, scenarios as s\nfrom ..helpers.config import MAIN_TENANT, SUB_TENANT\n\nALL_OPTIONS = [\n [\"cid_name\", \"cid_name\"],\n [\"cid_num\", \"cid_num\"],\n [\"allow\", \"allow\"],\n [\"disallow\", \"disallow\"],\n]\n\n\ndef test_get_errors():\n fake_sccp_get = confd.endpoints.sccp(999999).get\n yield s.check_resource_not_found, fake_sccp_get, 'SCCPEndpoint'\n\n\ndef test_delete_errors():\n fake_sccp = confd.endpoints.sccp(999999).delete\n yield s.check_resource_not_found, fake_sccp, 'SCCPEndpoint'\n\n\ndef test_post_errors():\n url = confd.endpoints.sccp.post\n for check in error_checks(url):\n yield check\n\n\n@fixtures.sccp()\ndef test_put_errors(sccp):\n url = confd.endpoints.sccp(sccp['id']).put\n for check in error_checks(url):\n yield check\n\n\ndef error_checks(url):\n yield s.check_bogus_field_returns_error, url, 'options', 123\n yield s.check_bogus_field_returns_error, url, 'options', None\n yield s.check_bogus_field_returns_error, url, 'options', {}\n yield s.check_bogus_field_returns_error, url, 'options', 'string'\n yield s.check_bogus_field_returns_error, url, 'options', [None]\n yield s.check_bogus_field_returns_error, url, 'options', ['string', 'string']\n yield s.check_bogus_field_returns_error, url, 'options', [123, 123]\n yield s.check_bogus_field_returns_error, url, 'options', ['string', 123]\n yield s.check_bogus_field_returns_error, url, 'options', [[]]\n yield s.check_bogus_field_returns_error, url, 'options', [{'key': 'value'}]\n yield s.check_bogus_field_returns_error, url, 'options', [['missing_value']]\n yield s.check_bogus_field_returns_error, url, 'options', [['too', 'much', 'value']]\n yield s.check_bogus_field_returns_error, url, 'options', [['wrong_value', 1234]]\n yield s.check_bogus_field_returns_error, url, 'options', [['none_value', None]]\n\n\n@fixtures.sccp()\ndef test_get(sccp):\n response = confd.endpoints.sccp(sccp['id']).get()\n assert_that(response.item, has_entries(line=none()))\n\n\n@fixtures.sccp()\n@fixtures.sccp()\ndef test_list(sccp1, sccp2):\n response = confd.endpoints.sccp.get()\n assert_that(\n response.items,\n has_items(has_entry('id', sccp1['id']), has_entry('id', sccp2['id'])),\n )\n\n\n@fixtures.sccp(wazo_tenant=MAIN_TENANT)\n@fixtures.sccp(wazo_tenant=SUB_TENANT)\ndef test_list_multi_tenant(main, sub):\n response = confd.endpoints.sccp.get(wazo_tenant=MAIN_TENANT)\n assert_that(response.items, all_of(has_items(main)), not_(has_items(sub)))\n\n response = confd.endpoints.sccp.get(wazo_tenant=SUB_TENANT)\n assert_that(response.items, all_of(has_items(sub), not_(has_items(main))))\n\n response = confd.endpoints.sccp.get(wazo_tenant=MAIN_TENANT, recurse=True)\n assert_that(response.items, has_items(main, sub))\n\n\n@fixtures.sccp(wazo_tenant=MAIN_TENANT)\n@fixtures.sccp(wazo_tenant=SUB_TENANT)\ndef test_get_multi_tenant(main, sub):\n response = confd.endpoints.sccp(main['id']).get(wazo_tenant=SUB_TENANT)\n response.assert_match(404, e.not_found(resource='SCCPEndpoint'))\n\n response = confd.endpoints.sccp(sub['id']).get(wazo_tenant=MAIN_TENANT)\n assert_that(response.item, has_entries(**sub))\n\n\ndef test_create_minimal_parameters():\n response = confd.endpoints.sccp.post()\n\n response.assert_created('endpoint_sccp', location='endpoints/sccp')\n assert_that(response.item, has_entries(tenant_uuid=MAIN_TENANT, options=empty()))\n\n\ndef test_create_all_parameters():\n response = confd.endpoints.sccp.post(options=ALL_OPTIONS)\n\n assert_that(\n response.item,\n has_entries(tenant_uuid=MAIN_TENANT, options=has_items(*ALL_OPTIONS)),\n )\n\n\n@fixtures.sccp(options=[[\"allow\", \"alaw\"], [\"disallow\", \"all\"]])\ndef test_update_options(sccp):\n options = [[\"allow\", \"g723\"], [\"disallow\", \"opus\"]]\n\n response = confd.endpoints.sccp(sccp['id']).put(options=options)\n response.assert_updated()\n\n response = confd.endpoints.sccp(sccp['id']).get()\n assert_that(response.item['options'], has_items(*options))\n\n\n@fixtures.sccp(wazo_tenant=MAIN_TENANT)\n@fixtures.sccp(wazo_tenant=SUB_TENANT)\ndef test_edit_multi_tenant(main, sub):\n response = confd.endpoints.sccp(main['id']).put(wazo_tenant=SUB_TENANT)\n response.assert_match(404, e.not_found(resource='SCCPEndpoint'))\n\n response = confd.endpoints.sccp(sub['id']).put(wazo_tenant=MAIN_TENANT)\n response.assert_updated()\n\n\n@fixtures.sccp()\ndef test_delete(sccp):\n response = confd.endpoints.sccp(sccp['id']).delete()\n response.assert_deleted()\n\n\n@fixtures.sccp(wazo_tenant=MAIN_TENANT)\n@fixtures.sccp(wazo_tenant=SUB_TENANT)\ndef test_delete_multi_tenant(main, sub):\n response = confd.endpoints.sccp(main['id']).delete(wazo_tenant=SUB_TENANT)\n response.assert_match(404, e.not_found(resource='SCCPEndpoint'))\n\n response = confd.endpoints.sccp(sub['id']).delete(wazo_tenant=MAIN_TENANT)\n response.assert_deleted()\n","repo_name":"wazo-platform/wazo-confd","sub_path":"integration_tests/suite/base/test_endpoint_sccp.py","file_name":"test_endpoint_sccp.py","file_ext":"py","file_size_in_byte":5063,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"} +{"seq_id":"31622393648","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n@Project :LeetCodePthonVersion \n@File :76. 最小覆盖子串.py\n@Author :HuntingGame\n@Date :2023-02-03 14:32 \nC'est la vie!!! enjoy ur day :D\n'''\nclass Solution:\n def minWindow(self, s: str, t: str) -> str:\n \"\"\"\n\n 如果t的长度是M,s长度是N;如果M》N,那就不用讨论,是没的\n 只有N>=M才可以;\n 解题思路:做一个欠账表,是t的词频,那么遍历s,如果s[i]不在欠账表中,欠账表里面加个-1,但是欠账量不变;\n 如果出现在,那就欠账表中i位置减一个,欠账量也减。直到欠账量变为0,那么这个时候L。。。R是一个符合的,不过不是最短的。\n 这个时候L往右走,R往左缩,直到达标的最短。\n :param s:\n :param t:\n :return:\n \"\"\"\n if len(s) =0:#有效还账\n match -=1\n if match == 0:\n #表示还完涨了\n while map[ord(s[L])] < 0:\n #开始缩,判断能不能往右缩\n #比如s:steasac,t:aac,这里就是在做把[L]从s到a的操作.\n map[ord(s[L])]+=1\n L +=1\n # 代码到这里,说明[L] == 0\n if minlen == -1 or minlen > R- L + 1:\n #表示之前没有抓过答案或者之前答案没有现在的好。\n minlen = R - L + 1\n ansl = L\n ansr = R\n match +=1#阴为上面的L,所以这里[L]一定是属于欠账表的。\n map[ord(s[L])] +=1#因为上面已经抓去过以L开头的最短距离了,现在L往右移一位来抓新的\n L +=1\n R +=1\n\n return s[ansl:ansr+1] if minlen !=-1 else \"\"\n\n\n\n\n\n\n\n","repo_name":"enternityFan/LeetCodePythonVersion","sub_path":"leetcode100/76. 最小覆盖子串.py","file_name":"76. 最小覆盖子串.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"3452425814","text":"import json\nimport random\nimport re\n\nwith open(\"emoji-suggestions.json\") as f:\n suggestions = json.load(f)[\"suggestions\"]\n\nwith open(\"all_emojis.json\") as f:\n all_emojis = json.load(f)\n all_emojis.reverse()\n\nwith open(\"neutral_emojis.json\") as f:\n neutral_emojis = json.load(f)\n\n\ndef neutral_emoji(seed):\n random.seed(seed)\n if random.randint(0, 100) > 50:\n return random.choice(neutral_emojis)\n return \"\"\n\n\ndef suggest_emoji(phrase):\n phrase = phrase.lower().strip()\n phrase = phrase.replace(\",\", \"\")\n phrase = re.sub(r\"\\?\", \"\", phrase)\n phrase = re.sub(r\"\\.\", \"\", phrase)\n phrase = re.sub(r\",\", \"\", phrase)\n\n for emoji in all_emojis:\n pattern = f\"\\\\b{emoji.replace('-', ' ')}\\\\b\"\n if len(re.findall(pattern, phrase)) > 0:\n return emoji\n\n without_s = re.sub(r\"s\\b\", \"\", phrase)\n if len(re.findall(pattern, without_s)) > 0:\n return emoji\n\n without_er = re.sub(r\"er\\b\", \"\", phrase)\n if len(re.findall(pattern, without_er)) > 0:\n return emoji\n\n for suggestion in suggestions:\n for keyword in suggestion[\"keywords\"]:\n pattern = f\"\\\\b{keyword}\\\\b\"\n if re.findall(pattern, phrase):\n return suggestion[\"emoji\"]\n\n return None\n\n\nif __name__ == '__main__':\n print(suggestions)\n print(all_emojis)\n print(suggest_emoji(\"I love going for walks\"))\n print(suggest_emoji(\"I go to school\"))\n print(suggest_emoji(\"give me a door,\"))\n print(suggest_emoji(\"fofo wedding?\"))\n print(suggest_emoji(\"I (27M) have a twin sister (27F)\"))\n print(suggest_emoji(\"I proposed and she said yes.\"))\n print(suggest_emoji(\"Press like and comment what you think.\"))\n print(suggest_emoji(\"Rob found out about the affair 3 years later.\"))\n print(suggest_emoji(\"Am I really wrong for wanting\"))\n print(suggest_emoji(\"older man\"))\n print(suggest_emoji(\"about to be eight months old.\"))\n","repo_name":"jarle/tiktok-automatic-videos","sub_path":"generate-assets/emoji_suggester.py","file_name":"emoji_suggester.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"71"} +{"seq_id":"44159176402","text":"from channels.auth import AuthMiddlewareStack\nfrom channels.db import database_sync_to_async\nfrom channels.middleware import BaseMiddleware\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.db import close_old_connections\nfrom rest_framework.authtoken.models import Token\n\nfrom user.models import User\n\n\n@database_sync_to_async\ndef get_user(token_key):\n token = Token.objects.get(key=token_key)\n return token.user\n\n\nclass TokenAuthMiddleware(BaseMiddleware):\n \"\"\"\n Token authorization middleware for Django Channels 3\n \"\"\"\n\n def __init__(self, inner):\n super().__init__(inner)\n\n async def __call__(self, scope, receive, send):\n close_old_connections()\n headers = dict(scope[\"headers\"])\n try:\n if b\"sec-websocket-protocol\" in headers:\n token = headers[b\"sec-websocket-protocol\"].decode().split(\", \")\n token_name, token_key = token\n if token_name == \"Token\":\n token = await Token.objects.aget(key=token_key)\n user = await User.objects.aget(auth_token=token)\n # user = await get_user(token_key)\n scope[\"user\"] = user\n except Token.DoesNotExist:\n scope[\"user\"] = AnonymousUser()\n return await super().__call__(scope, receive, send)\n\n\ndef TokenAuthMiddlewareStack(inner):\n return TokenAuthMiddleware(AuthMiddlewareStack(inner))\n","repo_name":"ResearchHub/researchhub-backend","sub_path":"src/notification/token_auth.py","file_name":"token_auth.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"71"} +{"seq_id":"71954944871","text":"# -*- encoding:UTF-8 -*-\n\nimport requests\nfrom lxml import etree\n\nurl = \"https://movie.douban.com/chart\"\n\nheader = {\"User-Agent\": \"Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Mobile Safari/537.36\"}\n\nresponse = requests.get(url, headers=header)\nhtml_str = response.content.decode()\n\n# etree 处理\nhtml_element = etree.HTML(html_str)\nprint(type(html_element))\n# 获取请求\nhrefs = html_element.xpath(\"//div[@class='indent']/div/table//div[@class='pl2']/a/@href \")\n\nprint(hrefs)\n","repo_name":"emperwang/python_operation","sub_path":"base/requests/lxml/Lxml.py","file_name":"Lxml.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"71264869671","text":"import sys\r\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QInputDialog, QMessageBox, QTableWidgetItem\r\nfrom PyQt5.QtCore import QStringListModel, QTimer\r\n\r\nfrom mainui import Ui_MainWindow\r\nfrom DownPosMachine import DownPosMachine\r\nfrom DB import DB\r\nfrom DownPosMachine import DownPosMachine\r\nfrom fuzzy_infer_machine import fuzzy_infer_machine\r\nfrom credit_infer_machine import credit_infer_machine\r\nimport time\r\nimport threading\r\nfrom credit_manager import credit_manager\r\nfrom fuzzy_manager import fuzzy_manager\r\nfrom realtime_api import realtime_api\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\ndb = DB()\r\ndpm = None\r\nc_manager = credit_manager(db)\r\nf_manager = fuzzy_manager(db)\r\nbaidu_data = realtime_api()\r\n\r\n#def plot_fuzzy_set(set_id):\r\n\r\n \r\n\r\nclass MyMainForm(QMainWindow, Ui_MainWindow):\r\n def __init__(self, parent=None):\r\n super(MyMainForm, self).__init__(parent)\r\n self.setupUi(self)\r\n plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']\r\n plt.rcParams['axes.unicode_minus'] = False\r\n self.DownMachineBtn.clicked.connect(self.connect_down_machine)\r\n self.sim_step.clicked.connect(self.sim_step_action)\r\n self.sim_clear.clicked.connect(self.sim_clear_action)\r\n self.sim_fuzzy.clicked.connect(self.sim_fuzzy_action)\r\n self.sim_credit.clicked.connect(self.sim_credit_action)\r\n self.ClearDownLog.clicked.connect(self.clear_dpm_log_action)\r\n self.ExpCreditResult.clicked.connect(self.dpm_credit_action)\r\n self.ExpFuzzyResult.clicked.connect(self.dpm_fuzzy_action)\r\n self.refreshSensor.clicked.connect(self.refreshSensor_click)\r\n self.refreshControl.clicked.connect(self.refreshControl_click)\r\n self.refreshSensor_click()\r\n self.refreshControl_click()\r\n self.set_NS_Light(0)\r\n self.set_WE_Light(0)\r\n self.data_from_dpm = True\r\n # 可信度知识相关\r\n self.creditInsert.clicked.connect(self.credit_insert)\r\n self.creditDelete.clicked.connect(self.credit_delete)\r\n self.creditUpdate.clicked.connect(self.credit_update)\r\n self.tableWidget.clicked.connect(self.credit_knowledge_click)\r\n # 模糊知识相关\r\n self.fuzzyInsert.clicked.connect(self.fuzzy_insert)\r\n self.fuzzyDelete.clicked.connect(self.fuzzy_delete)\r\n self.fuzzyUpdate.clicked.connect(self.fuzzy_update)\r\n self.fuzzySetInsert.clicked.connect(self.fuzzy_set_insert)\r\n self.fuzzySetDelete.clicked.connect(self.fuzzy_set_delete)\r\n self.fuzzySetUpdate.clicked.connect(self.fuzzy_set_update)\r\n self.fuzzySetShow.clicked.connect(self.fuzzy_set_show)\r\n self.tableWidget_3.clicked.connect(self.fuzzy_knowledge_click)\r\n # 统计分析相关\r\n self.time_list = None\r\n self.analyze.clicked.connect(self.analyze_click)\r\n self.plot_traffic.clicked.connect(self.plot_traffic_click)\r\n self.plot_speed.clicked.connect(self.plot_speed_click)\r\n self.plot_green_time.clicked.connect(self.plot_green_click)\r\n self.plot_yellow_time.clicked.connect(self.plot_yellow_click)\r\n # 其它\r\n self.sim_log = []\r\n self.dpm_log = []\r\n self.sim_fim = fuzzy_infer_machine(db)\r\n self.sim_cim = credit_infer_machine(db)\r\n self.dpm_fim = fuzzy_infer_machine(db)\r\n self.dpm_cim = credit_infer_machine(db)\r\n self.timer=QTimer()\r\n self.timer.timeout.connect(self.refresh_dpm_log)\r\n self.timer.start(200)\r\n def plot_traffic_click(self):\r\n if self.time_list is None:\r\n QMessageBox.critical(self,\"信息\",\"请先统计\")\r\n else:\r\n we_traffic = [self.time_list[x]['traffic_we'] for x in self.time_list]\r\n ns_traffic = [self.time_list[x]['traffic_ns'] for x in self.time_list]\r\n x = [x for x in self.time_list]\r\n l1=plt.plot(x,we_traffic,'r--',label='东西方向车流量')\r\n l2=plt.plot(x,ns_traffic,'g--',label='南北方向车流量')\r\n plt.plot(x,we_traffic,'ro-',x,ns_traffic,'go-')\r\n plt.title('车流量统计图')\r\n plt.legend()\r\n plt.show()\r\n def plot_speed_click(self):\r\n if self.time_list is None:\r\n QMessageBox.critical(self,\"信息\",\"请先统计\")\r\n else:\r\n we_speed = [self.time_list[x]['speed_we'] for x in self.time_list]\r\n ns_speed = [self.time_list[x]['speed_ns'] for x in self.time_list]\r\n x = [x for x in self.time_list]\r\n l1=plt.plot(x,we_speed,'r--',label='东西方向车速')\r\n l2=plt.plot(x,ns_speed,'g--',label='南北方向车速')\r\n plt.plot(x,we_speed,'ro-',x,ns_speed,'go-')\r\n plt.title('车速统计图')\r\n plt.legend()\r\n plt.show()\r\n def plot_green_click(self):\r\n if self.time_list is None:\r\n QMessageBox.critical(self,\"信息\",\"请先统计\")\r\n else:\r\n we_green = [self.time_list[x]['green_we'] for x in self.time_list]\r\n ns_green = [self.time_list[x]['green_ns'] for x in self.time_list]\r\n x = [x for x in self.time_list]\r\n l1=plt.plot(x,we_green,'r--',label='东西方向绿灯')\r\n l2=plt.plot(x,ns_green,'g--',label='南北方向绿灯')\r\n plt.plot(x,we_green,'ro-',x,ns_green,'go-')\r\n plt.title('绿灯时间统计图')\r\n plt.legend()\r\n plt.show()\r\n def plot_yellow_click(self):\r\n if self.time_list is None:\r\n QMessageBox.critical(self,\"信息\",\"请先统计\")\r\n else:\r\n we_green = [self.time_list[x]['yellow_we'] for x in self.time_list]\r\n ns_green = [self.time_list[x]['yellow_ns'] for x in self.time_list]\r\n x = [x for x in self.time_list]\r\n l1=plt.plot(x,we_green,'r--',label='东西方向黄灯')\r\n l2=plt.plot(x,ns_green,'g--',label='南北方向黄灯')\r\n plt.plot(x,we_green,'ro-',x,ns_green,'go-')\r\n plt.title('黄灯时间统计图')\r\n plt.legend()\r\n plt.show()\r\n def analyze_click(self):\r\n time_interval = max(self.time_interval.value(),1)\r\n time_start = self.time_start.value()\r\n time_end = self.time_end.value()\r\n if time_end == 0:\r\n time_end = time.time()\r\n time_start //= time_interval\r\n time_end //= time_interval\r\n sensor_result = db.get_sensor_result()\r\n log = db.get_log()\r\n time_list = dict()\r\n for x in sensor_result:\r\n data = sensor_result[x]\r\n index = data['time'] // time_interval\r\n if index not in time_list:\r\n time_list[index] = {\r\n 'speed_we':0,\r\n 'traffic_we':0,\r\n 'green_we':0,\r\n 'yellow_we':0,\r\n 'speed_ns':0,\r\n 'traffic_ns':0,\r\n 'green_ns':0,\r\n 'yellow_ns':0,\r\n 'count_log_we':0,\r\n 'count_log_ns':0,\r\n 'count_control_we':0,\r\n 'count_control_ns':0\r\n }\r\n if data['sensor_id'] == 0: # 南北方向车流量\r\n time_list[index]['traffic_ns'] += data['sensor_result']\r\n time_list[index]['count_log_ns'] += 1\r\n elif data['sensor_id'] == 1: # 东西方向车流量\r\n time_list[index]['traffic_we'] += data['sensor_result']\r\n time_list[index]['count_log_we'] += 1\r\n elif data['sensor_id'] == 2: # 南北方向车速\r\n time_list[index]['speed_ns'] += data['sensor_result']\r\n time_list[index]['count_log_ns'] += 1\r\n else: # 东西方向车速\r\n time_list[index]['speed_we'] += data['sensor_result']\r\n time_list[index]['count_log_we'] += 1\r\n for x in log:\r\n data = log[x]\r\n index = data['time'] // time_interval\r\n if index not in time_list:\r\n time_list[index] = {\r\n 'speed_we':0,\r\n 'traffic_we':0,\r\n 'green_we':0,\r\n 'yellow_we':0,\r\n 'speed_ns':0,\r\n 'traffic_ns':0,\r\n 'green_ns':0,\r\n 'yellow_ns':0,\r\n 'count_log_we':0,\r\n 'count_log_ns':0,\r\n 'count_control_we':0,\r\n 'count_control_ns':0\r\n }\r\n if data['control_id'] == 0: # 南北方向绿灯时间\r\n time_list[index]['green_ns'] += data['control_value']\r\n time_list[index]['count_control_ns'] += 1\r\n elif data['control_id'] == 1: # 南北方向黄灯时间\r\n time_list[index]['yellow_ns'] += data['control_value']\r\n time_list[index]['count_control_ns'] += 1\r\n elif data['control_id'] == 2: # 东西方向绿灯时间\r\n time_list[index]['green_we'] += data['control_value']\r\n time_list[index]['count_control_we'] += 1\r\n else: # 东西方向黄灯时间\r\n time_list[index]['yellow_we'] += data['control_value']\r\n time_list[index]['count_control_we'] += 1\r\n self.tableWidget_8.setRowCount(len(time_list))\r\n index_i = 0\r\n for i in time_list:\r\n time_str = \"{}~{}\".format(i*time_interval,(i+1)*time_interval-1)\r\n try:\r\n time_list[i]['speed_we'] /= time_list[i]['count_log_we'] / 2\r\n except:\r\n time_list[i]['speed_we'] = 0\r\n try:\r\n time_list[i]['speed_ns'] /= time_list[i]['count_log_ns'] / 2\r\n except:\r\n time_list[i]['speed_ns'] = 0\r\n try:\r\n time_list[i]['traffic_we'] /= time_list[i]['count_log_we'] / 2\r\n except:\r\n time_list[i]['traffic_we'] = 0\r\n try:\r\n time_list[i]['traffic_ns'] /= time_list[i]['count_log_ns'] / 2\r\n except:\r\n time_list[i]['traffic_ns'] = 0\r\n try:\r\n time_list[i]['green_ns'] /= time_list[i]['count_control_ns'] / 2\r\n except:\r\n time_list[i]['green_ns'] = 0\r\n try:\r\n time_list[i]['yellow_ns'] /= time_list[i]['count_control_ns'] / 2\r\n except:\r\n time_list[i]['yellow_ns'] = 0\r\n try:\r\n time_list[i]['green_we'] /= time_list[i]['count_control_we'] / 2\r\n except:\r\n time_list[i]['green_we'] = 0\r\n try:\r\n time_list[i]['yellow_we'] /= time_list[i]['count_control_we'] / 2\r\n except:\r\n time_list[i]['yellow_we'] = 0\r\n self.tableWidget_8.setItem(index_i,0,QTableWidgetItem(time_str))\r\n self.tableWidget_8.setItem(index_i,1,QTableWidgetItem(str(time_list[i]['traffic_we'])))\r\n self.tableWidget_8.setItem(index_i,2,QTableWidgetItem(str(time_list[i]['speed_we'])))\r\n self.tableWidget_8.setItem(index_i,3,QTableWidgetItem(str(time_list[i]['green_we'])))\r\n self.tableWidget_8.setItem(index_i,4,QTableWidgetItem(str(time_list[i]['yellow_we'])))\r\n self.tableWidget_8.setItem(index_i,5,QTableWidgetItem(str(time_list[i]['traffic_ns'])))\r\n self.tableWidget_8.setItem(index_i,6,QTableWidgetItem(str(time_list[i]['speed_ns'])))\r\n self.tableWidget_8.setItem(index_i,7,QTableWidgetItem(str(time_list[i]['green_ns'])))\r\n self.tableWidget_8.setItem(index_i,8,QTableWidgetItem(str(time_list[i]['yellow_ns'])))\r\n index_i += 1\r\n self.time_list = time_list\r\n \r\n def refreshSensor_click(self):\r\n sensor_result = db.get_sensor_result()\r\n self.tableWidget_2.setRowCount(len(sensor_result))\r\n index_i = -1\r\n for i in sensor_result:\r\n index_i += 1\r\n for j in sensor_result[i]:\r\n item = QTableWidgetItem(str(sensor_result[i][j]))\r\n index_j = None\r\n if j == 'sensor_id':\r\n item = sensor_result[i][j]\r\n item = QTableWidgetItem(str(item))\r\n index_j = 0\r\n elif j == 'sensor_result':\r\n item = sensor_result[i][j]\r\n item = QTableWidgetItem(str(item))\r\n index_j = 1\r\n elif j == 'time':\r\n item = sensor_result[i][j]\r\n item = QTableWidgetItem(str(item))\r\n index_j = 2\r\n if index_j is not None:\r\n self.tableWidget_2.setItem(index_i, index_j, item)\r\n def refreshControl_click(self):\r\n sensor_result = db.get_log()\r\n self.tableWidget_7.setRowCount(len(sensor_result))\r\n index_i = -1\r\n for i in sensor_result:\r\n index_i += 1\r\n for j in sensor_result[i]:\r\n item = QTableWidgetItem(str(sensor_result[i][j]))\r\n index_j = None\r\n if j == 'control_id':\r\n item = sensor_result[i][j]\r\n item = QTableWidgetItem(str(item))\r\n index_j = 0\r\n elif j == 'control_value':\r\n item = sensor_result[i][j]\r\n item = QTableWidgetItem(str(item))\r\n index_j = 1\r\n elif j == 'time':\r\n item = sensor_result[i][j]\r\n item = QTableWidgetItem(str(item))\r\n index_j = 2\r\n if index_j is not None:\r\n self.tableWidget_7.setItem(index_i, index_j, item)\r\n def connect_down_machine(self):\r\n global dpm\r\n if dpm == None:\r\n serial, ok = QInputDialog.getText(self, '连接下位机', '请输入下位机的位置,例如Windows上是COMx,UNIX下是/dev/x')\r\n if ok:\r\n try:\r\n dpm = DownPosMachine(serial)\r\n QMessageBox.information(self,\"连接下位机\",\"连接成功\")\r\n self.DownMachineBtn.setText(\"退出\")\r\n except:\r\n QMessageBox.critical(self,\"连接下位机\",\"连接失败\")\r\n dpm = None\r\n if dpm != None:\r\n self.dpm_thread = threading.Thread(target=self.dpm_workder,args=())\r\n self.dpm_thread.setDaemon(True)\r\n self.dpm_thread.start()\r\n else:\r\n exit(0)\r\n def sim_step_action(self):\r\n traffic = self.traffic_value.value()\r\n speed = self.speed_value.value()\r\n temp = self.temp_value.value()\r\n rain = self.rain_value.value()\r\n snow = self.snow_value.value()\r\n wind = self.wind_value.value()\r\n self.sim_cim.update_environment(rain,snow,wind)\r\n self.sim_cim.update_realtime_info(traffic,speed,temp)\r\n cim_result = self.sim_cim.infer()\r\n self.sim_log.append(\"----- 开始推理,时间戳=%d -----\"%(time.time()))\r\n if len(cim_result):\r\n self.sim_log.append(\"可信度推理得到结论:%s\"%(str(cim_result)))\r\n else:\r\n self.sim_log.append(\"暂无可信度推理结论\")\r\n green_time = self.sim_fim.infer(1,traffic)\r\n yellow_time = self.sim_fim.infer(2,speed)\r\n self.sim_log.append(\"模糊推理得到结论:新绿灯时间=%d\"%(green_time))\r\n self.sim_log.append(\"模糊推理得到结论:新黄灯时间=%d\"%(yellow_time))\r\n self.sim_log.append(\"----- 推理结束 -----\")\r\n self.refresh_sim_log()\r\n def sim_clear_action(self):\r\n self.sim_log = []\r\n self.refresh_sim_log()\r\n def refresh_sim_log(self):\r\n slm = QStringListModel()\r\n slm.setStringList(self.sim_log)\r\n self.sim_result.setModel(slm)\r\n def sim_fuzzy_action(self):\r\n log = self.sim_fim.get_log()\r\n if len(log) >= 2:\r\n self.sim_fim.explain_log(log[-2],\"sim_绿灯时间解释结果\")\r\n self.sim_fim.explain_log(log[-1],\"sim_黄灯时间解释结果\")\r\n else:\r\n QMessageBox.critical(self,\"模糊解释器\",\"未找到模糊推理记录\")\r\n def dpm_fuzzy_action(self):\r\n log = self.dpm_fim.get_log()\r\n if len(log) >= 2:\r\n self.dpm_fim.explain_log(log[-2],\"dpm_绿灯时间解释结果\")\r\n self.dpm_fim.explain_log(log[-1],\"dpm_黄灯时间解释结果\")\r\n else:\r\n QMessageBox.critical(self,\"模糊解释器\",\"未找到模糊推理记录\")\r\n def sim_credit_action(self):\r\n log = self.sim_cim.get_log()\r\n if len(log):\r\n self.sim_cim.explain(log[-1],\"sim_可信度推理机解释结果\")\r\n else:\r\n QMessageBox.critical(self,\"可信度解释器\",\"未找到可信度推理记录\")\r\n def dpm_credit_action(self):\r\n log = self.dpm_cim.get_log()\r\n if len(log):\r\n self.dpm_cim.explain(log[-1],\"dpm_可信度推理机解释结果\")\r\n else:\r\n QMessageBox.critical(self,\"可信度解释器\",\"未找到可信度推理记录\")\r\n def refresh_dpm_log(self):\r\n slm = QStringListModel()\r\n slm.setStringList(self.dpm_log)\r\n self.dpm_result.setModel(slm)\r\n def clear_dpm_log_action(self):\r\n self.dpm_log = []\r\n self.refresh_dpm_log()\r\n def dpm_workder(self):\r\n global dpm\r\n ns_green = 15\r\n we_green = 15\r\n ns_yellow = 3\r\n we_yellow = 3\r\n while True:\r\n dpm.ticks = 0\r\n dpm.sum_time = 0\r\n dpm.lasttime = 0\r\n dpm.set_ns_green()\r\n self.set_NS_Light(3)\r\n dpm.set_we_red()\r\n self.set_WE_Light(1)\r\n self.dpm_log.append(\"正在测量南北方向车流量与车速\")\r\n time.sleep(ns_green)\r\n if self.use_dpm.isChecked():\r\n ticks = dpm.ticks*4\r\n speed = 0\r\n if ticks != 0 and dpm.sum_time != 0:\r\n speed = round(1 / (dpm.sum_time / dpm.ticks))\r\n else:\r\n speed,ticks = baidu_data.get_ns()\r\n db.insert_sensor_result(0,ticks)\r\n dpm.stat[0] = ticks\r\n db.insert_sensor_result(2,speed)\r\n dpm.stat[2] = speed\r\n self.dpm_log.append(\"测量结束,南北方向车流=%d,南北方向平均车速=%d\" % (ticks,speed))\r\n ticks = max(0,min(80,ticks))\r\n speed = max(0,min(45,speed))\r\n # 计算该方向新的红灯时间\r\n new_green_time = self.dpm_fim.infer(1,ticks)\r\n self.dpm_log.append(\"根据模糊推理结果,南北方向新绿灯时间=%f\"%(new_green_time))\r\n db.insert_log(0,new_green_time)\r\n ns_green = new_green_time\r\n # 计算该方向新的黄灯时间\r\n new_yellow_time = self.dpm_fim.infer(2,speed)\r\n self.dpm_log.append(\"根据模糊推理结果,南北方向新黄灯时间=%f\"%(new_yellow_time))\r\n db.insert_log(1,new_yellow_time)\r\n self.dpm_cim.update_realtime_info(ticks,speed)\r\n self.dpm_log.append(\"根据可信度推理结果,得到结论:%s\"%(str(self.dpm_cim.infer())))\r\n ns_yellow = new_yellow_time\r\n dpm.set_ns_yellow()\r\n self.set_NS_Light(2)\r\n time.sleep(ns_yellow)\r\n dpm.set_ns_red()\r\n self.set_NS_Light(1)\r\n dpm.set_we_green()\r\n self.set_WE_Light(3)\r\n dpm.ticks = 0\r\n dpm.sum_time = 0\r\n dpm.lasttime = 0\r\n self.dpm_log.append(\"正在测量东西方向车流量与车速\")\r\n time.sleep(we_green)\r\n if self.use_dpm.isChecked():\r\n ticks = dpm.ticks * 4\r\n speed = 0\r\n if ticks != 0 and dpm.sum_time != 0:\r\n speed = round(1 / (dpm.sum_time / dpm.ticks))\r\n else:\r\n speed,ticks = baidu_data.get_we()\r\n db.insert_sensor_result(1,ticks)\r\n dpm.stat[1] = ticks\r\n db.insert_sensor_result(3,speed)\r\n dpm.stat[3] = speed\r\n self.dpm_log.append(\"测量结束,东西方向车流=%d,东西方向平均车速=%d\" % (ticks,speed))\r\n ticks = max(0,min(80,ticks))\r\n speed = max(0,min(45,speed))\r\n # 计算该方向新的红灯时间\r\n new_green_time = self.dpm_fim.infer(1,ticks)\r\n self.dpm_log.append(\"根据模糊推理结果,东西方向新绿灯时间=%f\"%(new_green_time))\r\n db.insert_log(2,new_green_time)\r\n we_green = new_green_time\r\n # 计算该方向新的黄灯时间\r\n new_yellow_time = self.dpm_fim.infer(2,speed)\r\n self.dpm_log.append(\"根据模糊推理结果,东西方向新黄灯时间=%f\"%(new_yellow_time))\r\n db.insert_log(3,new_yellow_time)\r\n we_yellow = new_yellow_time\r\n self.dpm_cim.update_realtime_info(ticks,speed)\r\n self.dpm_log.append(\"根据可信度推理结果,得到结论:%s\"%(str(self.dpm_cim.infer())))\r\n dpm.set_we_yellow()\r\n self.set_WE_Light(2)\r\n time.sleep(we_yellow)\r\n\r\n # 可信度知识展示\r\n def credit_show(self):\r\n credit_knowledge = c_manager.get_credit_knowledge()\r\n self.tableWidget.setRowCount(len(credit_knowledge))\r\n index_i, index_j = -1, -1\r\n for i in credit_knowledge:\r\n index_i += 1\r\n index_j = -1\r\n for j in credit_knowledge[i]:\r\n index_j += 1\r\n item = QTableWidgetItem(str(credit_knowledge[i][j]))\r\n if j == 'premise_id':\r\n item = c_manager.get_premise_by_id(int(credit_knowledge[i][j]))\r\n item = QTableWidgetItem(str(item))\r\n elif j == 'conclusion_id':\r\n item = c_manager.get_conclusion_by_id(int(credit_knowledge[i][j]))\r\n item = QTableWidgetItem(str(item))\r\n self.tableWidget.setItem(index_i, index_j, item)\r\n\r\n # 可信度知识插入\r\n def credit_insert(self):\r\n premise_id = c_manager.get_premise_id_by_name(self.premise.toPlainText())\r\n conclusion_id = c_manager.get_conclusion_id_by_name(self.conclusion.toPlainText())\r\n pre_cred = self.CF.toPlainText()\r\n con_cred = self.lada.toPlainText()\r\n try:\r\n c_manager.insert_credit_knowledge(premise_id, conclusion_id, pre_cred, con_cred)\r\n self.credit_show()\r\n except:\r\n print(\"insert credit knowledge failed!\")\r\n\r\n # 可信度知识删除\r\n def credit_delete(self):\r\n credit_id = self.credit_id.toPlainText()\r\n try:\r\n c_manager.delete_credit_knowledge(int(credit_id))\r\n self.credit_show()\r\n except:\r\n print(\"delete credit knowledge failed!\")\r\n\r\n # 可信度知识修改\r\n def credit_update(self):\r\n credit_id = self.credit_id.toPlainText()\r\n premise_id = c_manager.get_premise_id_by_name(self.premise.toPlainText())\r\n conclusion_id = c_manager.get_conclusion_id_by_name(self.conclusion.toPlainText())\r\n pre_cred = self.CF.toPlainText()\r\n con_cred = self.lada.toPlainText()\r\n try:\r\n c_manager.update_credit_knowledge(credit_id, premise_id, conclusion_id, pre_cred, con_cred)\r\n self.credit_show()\r\n except:\r\n print(\"update credit knowledge failed!\")\r\n\r\n def credit_knowledge_click(self,index):\r\n credit_knowledge = c_manager.get_credit_knowledge()\r\n index_i = 0\r\n for i in credit_knowledge:\r\n if index.row() == index_i:\r\n for j in credit_knowledge[i]:\r\n if j == 'credit_id':\r\n self.credit_id.setText(str(credit_knowledge[i][j]))\r\n elif j == 'premise_id':\r\n self.premise.setText(c_manager.get_premise_by_id(credit_knowledge[i][j]))\r\n elif j == 'conclusion_id':\r\n self.conclusion.setText(c_manager.get_conclusion_by_id(credit_knowledge[i][j]))\r\n elif j == 'pre_cred':\r\n self.CF.setText(str(credit_knowledge[i][j]))\r\n elif j == 'con_cred':\r\n self.lada.setText(str(credit_knowledge[i][j]))\r\n index_i += 1\r\n\r\n # 模糊知识显示\r\n def fuzzy_show(self):\r\n fuzzy_knowledge = f_manager.get_fuzzy_knowledge()\r\n self.tableWidget_3.setRowCount(len(fuzzy_knowledge))\r\n index_i, index_j = -1, -1\r\n for i in fuzzy_knowledge:\r\n index_i += 1\r\n index_j = -1\r\n for j in fuzzy_knowledge[i]:\r\n index_j += 1\r\n item = QTableWidgetItem(str(fuzzy_knowledge[i][j]))\r\n if j == 'set_id_a' or j == 'set_id_b':\r\n item = f_manager.get_fuzzy_set_name(int(fuzzy_knowledge[i][j]))\r\n item = QTableWidgetItem(str(item))\r\n self.tableWidget_3.setItem(index_i, index_j, item)\r\n\r\n def fuzzy_insert(self):\r\n fuzzy_class = self.fuzzy_class.toPlainText()\r\n fuzzy_a = self.fuzzy_a.toPlainText()\r\n fuzzy_b = self.fuzzy_b.toPlainText()\r\n lamb = self.fuzzy_lambda.toPlainText()\r\n try:\r\n f_manager.insert_fuzzy_knowledge(int(fuzzy_class), lamb, int(fuzzy_a), int(fuzzy_b))\r\n self.fuzzy_show()\r\n except:\r\n print(\"insert fuzzy knowledge failed!\")\r\n\r\n def fuzzy_delete(self):\r\n id = self.fuzzy_id.toPlainText()\r\n try:\r\n f_manager.delete_fuzzy_knowledge(int(id))\r\n self.fuzzy_show()\r\n except:\r\n print(\"delete fuzzy knowledge failed!\")\r\n\r\n def fuzzy_update(self):\r\n id = self.fuzzy_id.toPlainText()\r\n fuzzy_class = self.fuzzy_class.toPlainText()\r\n fuzzy_a = self.fuzzy_a.toPlainText()\r\n fuzzy_b = self.fuzzy_b.toPlainText()\r\n lamb = self.fuzzy_lambda.toPlainText()\r\n try:\r\n f_manager.update_fuzzy_knowledge(int(id), int(fuzzy_class), lamb, int(fuzzy_a), int(fuzzy_b))\r\n self.fuzzy_show()\r\n except:\r\n print(\"update fuzzy knowledge failed!\")\r\n\r\n def fuzzy_set_insert(self):\r\n fuzzy_set = self.fuzzy_a.toPlainText()\r\n try:\r\n f_manager.insert_fuzzy_set(fuzzy_set)\r\n except:\r\n print(\"insert fuzzy set failed!\")\r\n\r\n def fuzzy_set_delete(self):\r\n id = self.fuzzy_a.toPlainText()\r\n try:\r\n f_manager.delete_fuzzy_set(int(id))\r\n except:\r\n print(\"delete fuzzy set failed!\")\r\n\r\n def fuzzy_set_update(self):\r\n fuzzy_set = self.fuzzy_a.toPlainText()\r\n try:\r\n id, content = fuzzy_set.split(',')\r\n f_manager.update_fuzzy_set(int(id), content)\r\n except:\r\n print(\"update fuzzy_set failed!\")\r\n \r\n def fuzzy_set_show(self):\r\n fuzzy_set_name = self.fuzzy_a.toPlainText()\r\n set_id = f_manager.get_fuzzy_set_id_by_name(fuzzy_set_name)\r\n if set_id == None:\r\n QMessageBox.critical(self,\"错误\",\"找不到模糊集名称\")\r\n else:\r\n fuzzy_set = f_manager.get_fuzzy_set_by_id(set_id)\r\n x = fuzzy_set.keys()\r\n y = [fuzzy_set[key] for key in fuzzy_set.keys()]\r\n plt.title(fuzzy_set_name)\r\n plt.xlabel(\"x\")\r\n plt.ylabel(\"y\")\r\n plt.plot(x,y,'b^-')\r\n plt.legend()\r\n plt.show()\r\n \r\n def fuzzy_knowledge_click(self,index):\r\n fuzzy_knowledge = f_manager.get_fuzzy_knowledge()\r\n index_i = 0\r\n for i in fuzzy_knowledge:\r\n if index.row() == index_i:\r\n for j in fuzzy_knowledge[i]:\r\n if j == 'fuzzy_id':\r\n self.fuzzy_id.setText(str(fuzzy_knowledge[i][j]))\r\n elif j == 'set_id_a':\r\n self.fuzzy_a.setText(f_manager.get_fuzzy_set_name(fuzzy_knowledge[i][j]))\r\n elif j == 'set_id_b':\r\n self.fuzzy_b.setText(f_manager.get_fuzzy_set_name(fuzzy_knowledge[i][j]))\r\n elif j == 'fuzzy_class':\r\n self.fuzzy_class.setText(str(fuzzy_knowledge[i][j]))\r\n elif j == 'lambda':\r\n self.fuzzy_lambda.setText(str(fuzzy_knowledge[i][j]))\r\n index_i += 1\r\n\r\n def set_NS_Light(self,status=0):\r\n if status == 0:\r\n self.NS_R_1.setStyleSheet(\"background-color: black;\")\r\n self.NS_Y_1.setStyleSheet(\"background-color: black;\")\r\n self.NS_G_1.setStyleSheet(\"background-color: black;\")\r\n self.NS_R_2.setStyleSheet(\"background-color: black;\")\r\n self.NS_Y_2.setStyleSheet(\"background-color: black;\")\r\n self.NS_G_2.setStyleSheet(\"background-color: black;\")\r\n elif status == 1:\r\n self.NS_R_1.setStyleSheet(\"background-color: red;\")\r\n self.NS_Y_1.setStyleSheet(\"background-color: black;\")\r\n self.NS_G_1.setStyleSheet(\"background-color: black;\")\r\n self.NS_R_2.setStyleSheet(\"background-color: red;\")\r\n self.NS_Y_2.setStyleSheet(\"background-color: black;\")\r\n self.NS_G_2.setStyleSheet(\"background-color: black;\")\r\n elif status == 2:\r\n self.NS_R_1.setStyleSheet(\"background-color: black;\")\r\n self.NS_Y_1.setStyleSheet(\"background-color: yellow;\")\r\n self.NS_G_1.setStyleSheet(\"background-color: black;\")\r\n self.NS_R_2.setStyleSheet(\"background-color: black;\")\r\n self.NS_Y_2.setStyleSheet(\"background-color: yellow;\")\r\n self.NS_G_2.setStyleSheet(\"background-color: black;\")\r\n elif status == 3:\r\n self.NS_R_1.setStyleSheet(\"background-color: black;\")\r\n self.NS_Y_1.setStyleSheet(\"background-color: black;\")\r\n self.NS_G_1.setStyleSheet(\"background-color: greenyellow;\")\r\n self.NS_R_2.setStyleSheet(\"background-color: black;\")\r\n self.NS_Y_2.setStyleSheet(\"background-color: black;\")\r\n self.NS_G_2.setStyleSheet(\"background-color: greenyellow;\")\r\n \r\n def set_WE_Light(self,status=0):\r\n if status == 0:\r\n self.WE_R_1.setStyleSheet(\"background-color: Black\")\r\n self.WE_Y_1.setStyleSheet(\"background-color: Black\")\r\n self.WE_G_1.setStyleSheet(\"background-color: Black\")\r\n self.WE_R_2.setStyleSheet(\"background-color: Black\")\r\n self.WE_Y_2.setStyleSheet(\"background-color: Black\")\r\n self.WE_G_2.setStyleSheet(\"background-color: Black\")\r\n elif status == 1:\r\n self.WE_R_1.setStyleSheet(\"background-color: Red\")\r\n self.WE_Y_1.setStyleSheet(\"background-color: Black\")\r\n self.WE_G_1.setStyleSheet(\"background-color: Black\")\r\n self.WE_R_2.setStyleSheet(\"background-color: Red\")\r\n self.WE_Y_2.setStyleSheet(\"background-color: Black\")\r\n self.WE_G_2.setStyleSheet(\"background-color: Black\")\r\n elif status == 2:\r\n self.WE_R_1.setStyleSheet(\"background-color: Black\")\r\n self.WE_Y_1.setStyleSheet(\"background-color: Yellow\")\r\n self.WE_G_1.setStyleSheet(\"background-color: Black\")\r\n self.WE_R_2.setStyleSheet(\"background-color: Black\")\r\n self.WE_Y_2.setStyleSheet(\"background-color: Yellow\")\r\n self.WE_G_2.setStyleSheet(\"background-color: Black\")\r\n elif status == 3:\r\n self.WE_R_1.setStyleSheet(\"background-color: Black\")\r\n self.WE_Y_1.setStyleSheet(\"background-color: Black\")\r\n self.WE_G_1.setStyleSheet(\"background-color: GreenYellow\")\r\n self.WE_R_2.setStyleSheet(\"background-color: Black\")\r\n self.WE_Y_2.setStyleSheet(\"background-color: Black\")\r\n self.WE_G_2.setStyleSheet(\"background-color: GreenYellow\")\r\n\r\nif __name__ == \"__main__\":\r\n app = QApplication(sys.argv)\r\n myWin = MyMainForm()\r\n myWin.credit_show()\r\n myWin.fuzzy_show()\r\n myWin.show()\r\n sys.exit(app.exec_())","repo_name":"cyyself/smart-system-lab","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":32455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"37677957736","text":"# -*- coding: utf-8 -*-\n# Time : 2018/9/21 2:25\n# @Email : 741116327@qq.com\n# @File : crond.py\n# @Software: PyCharm\n# @Author : Mutallip\nfrom index.models import Video\nimport datetime,time\nfrom index.spiders.crawler import Xiaoyao,Yingyuan\nfrom index.spiders.download_index_html import Download_html\nfrom index.spiders import get_video_info\n\ndef run():\n spider()\n # # http_down()\n\n\n\n\ndef get_title():\n title_list = []\n obj = Video.objects.filter(tag='han') #获取数据库里面全部title\n # self.obj = list(self.obj)\n for k in obj:\n title_list.append(k.title)\n return title_list\n\n\ndef spider():\n print(datetime.datetime.now())\n title_list = []\n obj = Video.objects.filter(tag='han') # 获取数据库里面全部title\n # self.obj = list(self.obj)\n for k in obj:\n title_list.append(k.title)\n xy=Xiaoyao().run()[:10]\n save_db(xy)\n yx = Yingyuan().run()[:10]\n save_db(yx)\n Download_html().http_down()\n\n\ndef save_db(data):\n title_list=get_title()\n for i in data:\n if i['title'] in title_list:\n # Video.objects.filter(title=i['title']).update(url_image=i['images'], video_url=i['video_url'])\n print('电影已存在', i['title'])\n else:\n info = get_video_info.run(i['title'])\n if info:\n print(i['title']+'~~~~~~~~~~~~~~~~~~~~~成功了-----')\n\n type=info['type']\n director=info['director']\n actors=info['actors']\n area=info['area']\n content=info['content']\n print(type,director)\n else:\n print(i['title'] + '~~~~~~~~~~~~~~~~~~~~~失败------')\n director = type =actors =area =content =None\n print()\n obj = Video(title=i['title'], url_image=i['image_url'], video_url=i['video_url'], tag='han',\n create_time=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),content=content,type=type,director=director,actors=actors,area=area)\n obj.save()\n title_list.append(i['title'])\n print('没有', i['title'])\n\n time.sleep(2)\n\n\nif __name__ == '__main__':\n # get_videoinfo('长城')\n print('6982558')\n\n\n\n","repo_name":"mutallipp/web","sub_path":"apps/index/crond.py","file_name":"crond.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"26629855868","text":"from typing import Any\nfrom cog import BasePredictor, Input, Path\n\nimport json\nimport torch\nimport timm\nimport numpy as np\nimport urllib\n\nfrom timm.data.transforms_factory import transforms_imagenet_eval\n\nfrom PIL import Image\n\nclass Predictor(BasePredictor):\n def setup(self):\n \"\"\"Load the model into memory to make running multiple predictions efficient\"\"\"\n self.model = timm.create_model('resnet18', pretrained=True)\n self.model.eval()\n self.transform = transforms_imagenet_eval()\n\n url, filename = (\n \"https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt\", \"imagenet_classes.txt\")\n urllib.request.urlretrieve(url, filename)\n with open(\"imagenet_classes.txt\", \"r\") as f:\n self.labels = [s.strip() for s in f.readlines()]\n\n # Define the arguments and types the model takes as input\n def predict(self, image: Path = Input(description=\"Image to classify\")) -> Any:\n \"\"\"Run a single prediction on the model\"\"\"\n # Preprocess the image\n img = Image.open(image).convert('RGB')\n img = self.transform(img)\n\n # Run the prediction\n with torch.no_grad():\n labels = self.model(img[None, ...])\n labels = labels[0] # we'll only do this for one image\n\n # top 5 preds\n topk = labels.topk(5)[1]\n output = {\n # \"labels\": labels.cpu().numpy(),\n \"topk\": [self.labels[x] for x in topk.cpu().numpy().tolist()],\n }\n\n return output","repo_name":"sushant097/TSAI-DistributedTraining-Assignment6","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"38721131490","text":"#!/usr/bin/env python\ncsu_addr=0x000400FAA\ndef csu(func_array):\n func=[0,0,0,0]\n for i in range(len(func_array)):\n func[i]=func_array[i]\n payload=p64(0)+p64(1)+p64(func[0])+p64(func[3])+p64(func[2])+p64(func[1])+p64(csu_addr-0x1a)+p64(0)\n return payload\ndef call_csu(funcs_array):\n payload=p64(csu_addr)\n for i in funcs_array:\n payload+=csu(i)\n payload+=p64(0)*6\n return payload\n","repo_name":"BXS-Team/pwn_exploit_templates","sub_path":"rop.py","file_name":"rop.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"6710134362","text":"import itchat\n\n# 参数itchat.auto_login(hotReload=True)实现第一次运行时扫码,一定时间内再次运行就不用扫码了\nitchat.auto_login(hotReload=True)\n# itchat.auto_login(enableCmdQR=-1)\n\n# 给文件助手发微信\n# itchat.send('你好',toUserName='filehelper')\n\n# 获取微信好友字段名\nvx_file=itchat.search_friends(name='linmeo')\nvx_name_key=vx_file[0]['UserName']\n\nitchat.send('Hello!,This is auto send from python.',toUserName=vx_name_key)\n\n# 登出\nitchat.logout()","repo_name":"ycallenchina/PythonStudy_Git","sub_path":"其他学习_少量/微信自动化/自动发微信.py","file_name":"自动发微信.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"8003736438","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport smtplib\r\nimport time\r\n\r\na=input(\"ENTER PRODUCT URL\")\r\nURL=a\r\nheaders = {\"User-Agent\": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'}\r\n\r\ndef check_price():\r\n page = requests.get(URL, headers=headers)\r\n\r\n soup = BeautifulSoup(page.content, 'html.parser')\r\n\r\n price = soup.find(id=\"priceblock_ourprice\").get_text()\r\n converted_price = (price.replace(',','').strip('₹').strip(' '))\r\n converted_price = float(converted_price)\r\n if(converted_price < 21000.00):\r\n send_mail()\r\n \r\n\r\ndef send_mail():\r\n server = smtplib.SMTP('smtp.gmail.com', 587)\r\n server.ehlo()\r\n server.starttls()\r\n server.ehlo()\r\n\r\n server.login('trackerprice92@gmail.com','bcjpcqzfskinealx')\r\n\r\n subject ='Price Drop Alert'\r\n body = 'Check the link : https://www.amazon.in/Fitbit-FB507BKBK-Smartwatch-Tracking-Included/dp/B07TWFVDWT/ref=lp_19136330031_1_1?srs=19136330031&ie=UTF8&qid=1571410965&sr=8-1'\r\n msg = f\"Subject: {subject}\\n\\n{body}\"\r\n server.sendmail(\r\n 'trackerprice92@gmail.com',\r\n 'anamay22@outlook.com',\r\n msg\r\n )\r\n print('HEY! EMAIL HAS BEEN SENT')\r\n server.quit()\r\n\r\nwhile(True):\r\n check_price()\r\n time.sleep(40)","repo_name":"Anishthdev/PriceTracker","sub_path":"priceapp.py","file_name":"priceapp.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"9476290768","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'cmsapp'\nurlpatterns = [\n path('', views.index, name='index'),\n path('articles/section/', views.article_section, name='article_section'),\n path('articles/', views.article_read, name='article_read'),\n path('articles/search', views.articles_search, name='search_article'),\n path('comments/new/', views.comment_new, name='comment_new'),\n path('manage/articles', views.article_list, name='article_list'),\n path('manage/comments', views.comment_list, name='comment_list'),\n path('manage/comments/edit/', views.comment_edit, name='comment_edit'),\n path('manage/comments/edit/save/', views.comment_edit_save, name='comment_edit_save'),\n path('manage/comments/delete/', views.comment_delete, name='comment_delete'),\n path('manage/sections', views.section_list, name='section_list'),\n path('manage/sections/new', views.section_new, name='section_new'),\n path('manage/sections/new/save', views.section_save, name='section_save'),\n path('manage/sections/delete/', views.section_delete, name='section_delete'),\n path('manage/sections/edit/', views.section_edit, name='section_edit'),\n path('manage/sections/edit/save/', views.section_edit_save, name='section_edit_save'),\n path('manage/users', views.user_list, name='user_list'),\n path('manage/users/edit/', views.user_edit, name='user_edit'),\n path('manage/users/edit/save/', views.user_save, name='user_save'),\n path('manage/users/delete/', views.user_delete, name='user_delete'),\n path('manage/articles/new', views.article_new, name='article_new'),\n path('manage/articles/new/save', views.article_save, name='article_save'),\n path('manage/articles/delete/', views.article_delete, name='article_delete'),\n path('manage/articles/edit/', views.article_edit, name='article_edit'),\n path('manage/articles/edit/save/', views.article_edit_save, name='article_edit_save'),\n path('account/login', views.custom_login, name='login'),\n path('account/register', views.custom_register, name='register'),\n path('account/logout', views.custom_logout, name='logout'),\n path('account/authentication', views.custom_authentication, name='authentication'),\n path('account/createaccount', views.custom_create_account, name='create_account'),\n path('account/profile/edit', views.profile_edit, name='profile_edit'),\n path('account/profile/edit/save/', views.profile_edit_save, name='profile_edit_save'),\n path('account/profile/password/change/', views.profile_password_change, name='profile_password_change'),\n path('system/send-email/', views.send_email, name='send_email')\n]\n","repo_name":"RolandMcDoland/ContentManagmentSystem","sub_path":"cmsapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"35666251812","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 20 10:38:45 2021\n\n@author: abdel\n\"\"\"\n\n\nimport os\nimport sys\nimport torch\nimport torch_geometric\nfrom pathlib import Path\nfrom model import GNNPolicy\nfrom data_type import GraphDataset\nfrom utils import process\n\nif __name__ == \"__main__\":\n \n problem = \"GISP\"\n lr = 0.005\n n_epoch = 2\n n_sample = -1\n patience = 10\n early_stopping = 20\n normalize = True\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n batch_train = 16\n batch_valid = 256\n \n loss_fn = torch.nn.BCELoss()\n optimizer_fn = torch.optim.Adam\n \n for i in range(1, len(sys.argv), 2):\n if sys.argv[i] == '-problem':\n problem = str(sys.argv[i + 1])\n if sys.argv[i] == '-lr':\n lr = float(sys.argv[i + 1])\n if sys.argv[i] == '-n_epoch':\n n_epoch = int(sys.argv[i + 1])\n if sys.argv[i] == '-n_sample':\n n_sample = int(sys.argv[i + 1])\n if sys.argv[i] == '-patience':\n patience = int(sys.argv[i + 1])\n if sys.argv[i] == '-early_stopping':\n early_stopping = int(sys.argv[i + 1])\n if sys.argv[i] == '-normalize':\n normalize = bool(int(sys.argv[i + 1]))\n if sys.argv[i] == '-device':\n device = str(sys.argv[i + 1])\n if sys.argv[i] == '-batch_train':\n batch_train = int(sys.argv[i + 1])\n if sys.argv[i] == '-batch_valid':\n batch_valid = int(sys.argv[i + 1])\n \n \n \n train_losses = []\n valid_losses = []\n train_accs = []\n valid_accs = []\n\n\n train_files = [ str(path) for path in Path(os.path.join(os.path.dirname(__file__), \n f\"../node_selection/data/{problem}/train\")).glob(\"*.pt\") ][:n_sample]\n \n valid_files = [ str(path) for path in Path(os.path.join(os.path.dirname(__file__), \n f\"../node_selection/data/{problem}/valid\")).glob(\"*.pt\") ][:int(0.2*n_sample if n_sample != -1 else -1)]\n \n\n if problem == 'FCMCNF':\n train_files = train_files + valid_files[3000:]\n valid_files = valid_files[:3000]\n\n \n\n train_data = GraphDataset(train_files)\n valid_data = GraphDataset(valid_files)\n \n \n# TO DO : learn something from the data\n train_loader = torch_geometric.loader.DataLoader(train_data, \n batch_size=batch_train, \n shuffle=True, \n follow_batch=['constraint_features_s', \n 'constraint_features_t',\n 'variable_features_s',\n 'variable_features_t'])\n \n valid_loader = torch_geometric.loader.DataLoader(valid_data, \n batch_size=batch_valid, \n shuffle=False, \n follow_batch=['constraint_features_s',\n 'constraint_features_t',\n 'variable_features_s',\n 'variable_features_t'])\n \n policy = GNNPolicy().to(device)\n optimizer = optimizer_fn(policy.parameters(), lr=lr) #ADAM is the best\n \n print(\"-------------------------\")\n print(f\"GNN for problem {problem}\")\n print(f\"Training on: {len(train_data)} samples\")\n print(f\"Validating on: {len(valid_data)} samples\")\n print(f\"Batch Size Train: {batch_train}\")\n print(f\"Batch Size Valid {batch_valid}\")\n print(f\"Learning rate: {lr} \")\n print(f\"Number of epochs: {n_epoch}\")\n print(f\"Normalize: {normalize}\")\n print(f\"Device: {device}\")\n print(f\"Loss fct: {loss_fn}\")\n print(f\"Optimizer: {optimizer_fn}\") \n print(f\"Model's Size: {sum(p.numel() for p in policy.parameters())} parameters \")\n print(\"-------------------------\") \n \n \n for epoch in range(n_epoch):\n print(f\"Epoch {epoch + 1}\")\n \n train_loss, train_acc = process(policy, \n train_loader, \n loss_fn,\n device,\n optimizer=optimizer, \n normalize=normalize)\n train_losses.append(train_loss)\n train_accs.append(train_acc)\n print(f\"Train loss: {train_loss:0.3f}, accuracy {train_acc:0.3f}\" )\n \n valid_loss, valid_acc = process(policy, \n valid_loader, \n loss_fn, \n device,\n optimizer=None,\n normalize=normalize)\n valid_losses.append(valid_loss)\n valid_accs.append(valid_acc)\n \n print(f\"Valid loss: {valid_loss:0.3f}, accuracy {valid_acc:0.3f}\" )\n \n torch.save(policy.state_dict(),f'policy_{problem}.pkl')\n\n\n\n\n\n","repo_name":"ds4dm/learn2comparenodes","sub_path":"learning/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5508,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"71"} +{"seq_id":"70343804390","text":"import pandas as pd\r\nimport sklearn\r\nfrom sklearn.model_selection import StratifiedKFold\r\ndf = pd.read_csv('/home/hana/sonnh/kaggle-cassava/dataset/train_mix/new_mix.csv')\r\n# print(df_new.columns)\r\nprint(df.head())\r\ndf_new = df[:21396]\r\ndf_old = df[21396:]\r\n\r\ndef fillter(df, a, b):\r\n df_a = df[df['fold'] == a]\r\n\r\n df_b = df[df['fold'] == b]\r\n\r\n df_ab = pd.concat([df_a, df_b])\r\n # print(tyoe(df_14))\r\n return df_ab\r\n\r\ndef re_split(df, a, b):\r\n df = fillter(df, a, b)\r\n df_new = pd.DataFrame({})\r\n df_new['image_id'] = df['image_id']\r\n df_new['label'] = df['label']\r\n df_new['fold'] = df['fold']\r\n df = df_new\r\n #print(df)\r\n #print(df_new)\r\n skf = StratifiedKFold(n_splits=2, shuffle=True, random_state=2020)\r\n X = df['image_id']\r\n Y = df['label']\r\n fold = a - 1\r\n for train_index, test_index in skf.split(X, Y):\r\n # print(test_index)\r\n fold += 1\r\n X_test = X.iloc[test_index]\r\n df.loc[df.image_id.isin(X_test), 'fold'] = fold\r\n\r\n # for i in range(len(df)):\r\n # if df.iloc[i]['fold'] == 2:\r\n # df.iloc[i]['fold'] = 3\r\n df['fold'] = df['fold'].replace(a + 1, b)\r\n return df\r\n\r\ndf_new_14 = re_split(df_new, 5, 1)\r\ndf_old_14 = re_split(df_old, 5, 1)\r\n\r\ndf_new_23 = re_split(df_new, 2, 3)\r\ndf_old_23 = re_split(df_old, 2, 3)\r\n\r\n\r\ndf_5 = df[df['fold'] == 4]\r\n\r\ndf_new_split_14 = pd.concat([df_new_14, df_old_14, df_5, df_new_23, df_old_23])\r\ndf_new_split_14.to_csv('new_mix_1234.csv', index = False)\r\n\r\n\r\n","repo_name":"freedom1810/kaggle-cassava","sub_path":"dataset/train_mix/split_5123.py","file_name":"split_5123.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"71"} +{"seq_id":"86284921575","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nfrom astropy import log, units\nfrom astropy.io import fits\nimport numpy as np\nimport pytest\n\nfrom sofia_redux.scan.coordinate_systems.coordinate_2d import Coordinate2D\nfrom sofia_redux.scan.source_models.maps.fits_data import FitsData\n\n\nud = units.dimensionless_unscaled\n\n\n@pytest.fixture\ndef data_array():\n shape = (10, 11) # y, x\n d = np.arange(shape[0] * shape[1], dtype=float).reshape(shape)\n return d\n\n\n@pytest.fixture\ndef fits_data(data_array):\n f = FitsData(data=data_array.copy(), unit='Jy')\n return f\n\n\n@pytest.fixture\ndef ones(fits_data):\n f = fits_data.copy()\n f.fill(1.0)\n return f\n\n\n@pytest.fixture\ndef beam_map():\n beam = np.array([0.5, 1, 0.5])\n beam = beam[None] * beam[:, None]\n return beam\n\n\ndef test_init(data_array):\n data = data_array.copy()\n f = FitsData(data=data)\n assert f.shape == (10, 11)\n assert np.allclose(f.data, data)\n assert f.history == ['new size 11x10']\n assert not f.verbose\n assert f.unit == 1 * ud\n assert f.local_units == {'': 1 * ud}\n assert f.alternate_unit_names == {'': ''}\n assert f.log_new_data\n assert f.parallelism == 0\n assert f.executor is None\n assert np.isnan(f.blanking_value)\n\n\ndef test_copy(fits_data):\n f = fits_data.copy()\n f2 = f.copy()\n assert f == f2 and f is not f2\n\n\ndef test_eq(fits_data):\n f = fits_data.copy()\n f2 = f.copy()\n assert f == f and f == f2\n assert f2.data is not f.data\n assert f2.local_units is f.local_units\n assert f2.alternate_unit_names is f.alternate_unit_names\n assert f != 1\n f2.unit = 'K'\n assert f != f2\n\n\ndef test_referenced_attributes():\n f = FitsData()\n assert 'local_units' in f.referenced_attributes\n assert 'alternate_unit_names' in f.referenced_attributes\n\n\ndef test_unit():\n f = FitsData()\n assert f.unit == 1 * ud\n f.unit = 'K'\n assert f.unit == 1 * units.Unit('K')\n\n\ndef test_fits_to_numpy():\n coordinates = Coordinate2D(np.arange(10).reshape(2, 5))\n new = FitsData.fits_to_numpy(coordinates)\n assert np.allclose(new, coordinates.coordinates[::-1])\n coordinates = coordinates.coordinates\n new = FitsData.fits_to_numpy(coordinates)\n assert np.allclose(new, coordinates[::-1])\n coordinates = np.random.random((2, 5)) > 0.5\n new = FitsData.fits_to_numpy(coordinates)\n assert np.allclose(new, coordinates)\n new = FitsData.fits_to_numpy([1, 2])\n assert new == [2, 1]\n new = FitsData.fits_to_numpy(1)\n assert new == 1\n\n\ndef test_numpy_to_fits():\n new = FitsData.numpy_to_fits([1, 2])\n assert new == [2, 1]\n\n\ndef test_get_size_string(fits_data):\n f = fits_data.copy()\n assert f.get_size_string() == '11x10'\n f._data = None\n assert f.get_size_string() == '0'\n\n\ndef test_set_data_shape(fits_data):\n f = fits_data.copy()\n f.set_data_shape((4, 5))\n assert f.data.shape == (4, 5)\n assert f.shape == (4, 5)\n assert f.history == ['new size 5x4']\n\n\ndef test_set_data(fits_data):\n f = fits_data.copy()\n f.set_data(np.ones((3, 4)))\n assert f.data.shape == (3, 4)\n assert f.unit == 1 * units.Unit('Jy')\n f2 = f.copy()\n f2.unit = 'K'\n f.set_data(f2)\n assert f.unit == 1 * units.Unit('K')\n\n\ndef test_unit_to_quantity():\n k = units.Unit('K')\n assert FitsData.unit_to_quantity(1 * k) == 1 * k\n assert FitsData.unit_to_quantity('K') == 1 * k\n assert FitsData.unit_to_quantity(k) == 1 * k\n assert FitsData.unit_to_quantity(ud) == 1 * ud\n with pytest.raises(ValueError) as err:\n _ = FitsData.unit_to_quantity(1)\n assert 'Unit must be a ' in str(err.value)\n\n\ndef test_add_local_unit(fits_data):\n f = fits_data.copy()\n k = units.Unit('K')\n f.add_local_unit(k)\n assert f.local_units == {'Jy': 1 * units.Unit('Jy'),\n 'K': 1 * units.Unit('K')}\n assert f.alternate_unit_names == {\n 'Jy': 'Jy', 'Jansky': 'Jy', 'jansky': 'Jy', 'K': 'K', 'Kelvin': 'K'}\n\n f.add_local_unit(k, alternate_names=['foo', 'bar'])\n assert f.alternate_unit_names == {'Jy': 'Jy', 'Jansky': 'Jy',\n 'jansky': 'Jy', 'K': 'K', 'Kelvin': 'K',\n 'foo': 'K', 'bar': 'K'}\n f.local_units = None\n f.add_local_unit(k)\n assert f.local_units == {'K': 1 * units.Unit('K')}\n\n\ndef test_add_alternate_unit_names():\n f = FitsData()\n assert f.alternate_unit_names == {'': ''}\n f.alternate_unit_names = None\n f.add_alternate_unit_names('foo', 'bar')\n assert f.alternate_unit_names == {'bar': 'foo', 'foo': 'foo'}\n\n\ndef test_get_unit():\n f = FitsData()\n k = units.Unit('K')\n f.add_local_unit(k)\n assert f.get_unit(k) == 1 * k\n assert f.get_unit('Kelvin') == 1 * k\n assert f.get_unit('Jy') == 1 * units.Unit('Jy')\n assert f.get_unit(2 * k) == 1 * k\n\n\ndef test_set_unit():\n f = FitsData()\n k = units.Unit('K')\n f.set_unit('K')\n assert f.local_units == {'': 1 * ud, 'K': 1 * k}\n assert f.unit == 1 * k\n\n\ndef test_set_default_unit():\n f = FitsData()\n f.unit = 1 * units.Unit('K')\n f.set_default_unit()\n assert f.unit == 1 * ud\n\n\ndef test_clear_history(fits_data):\n f = fits_data.copy()\n assert len(f.history) == 1\n f.clear_history()\n assert len(f.history) == 0\n\n\ndef test_add_history():\n f = FitsData()\n f.verbose = True\n with log.log_to_list() as log_list:\n f.add_history('foo')\n assert len(log_list) == 1 and log_list[0].msg == 'foo'\n assert f.history[-1] == 'foo'\n f.verbose = False\n f.history = None\n f.add_history(['foo', 'bar'])\n assert f.history == ['foo', 'bar']\n\n\ndef test_set_history():\n f = FitsData()\n f.set_history(['foo', 'bar'])\n assert f.history == ['foo', 'bar']\n f.set_history('foo')\n assert f.history == ['foo']\n\n\ndef test_add_history_to_header():\n f = FitsData()\n f.history = None\n header = fits.Header()\n f.add_history_to_header(header)\n assert len(header) == 0\n f.history = ['foo', 'bar']\n f.add_history_to_header(header)\n assert list(header['HISTORY']) == ['foo', 'bar']\n\n\ndef test_record_new_data(fits_data):\n f = fits_data.copy()\n f.log_new_data = False\n assert not f.log_new_data\n f.record_new_data()\n assert len(f.history) == 1\n assert f.log_new_data\n f.record_new_data(detail='foobar')\n assert f.history == ['set new image 11x10 foobar']\n\n\ndef test_set_parallel():\n f = FitsData()\n f.set_parallel(10)\n assert f.parallelism == 10\n\n\ndef test_set_executor():\n f = FitsData()\n f.set_executor('foo')\n assert f.executor == 'foo'\n\n\ndef test_clear(fits_data):\n f = fits_data.copy()\n f.clear()\n assert f.data.shape == (10, 11) and np.allclose(f.data, 0)\n assert f.history == ['clear 11x10']\n f.data = np.ones(f.shape)\n mask = np.full(f.shape, False)\n mask[:4] = True\n f.clear(mask)\n assert np.allclose(f.data[:4], 0)\n assert np.allclose(f.data[4:], 1)\n f.data = np.ones(f.shape)\n indices = np.nonzero(mask)[::-1] # x, y format\n f.clear(indices)\n assert np.allclose(f.data[:4], 0)\n assert np.allclose(f.data[4:], 1)\n\n\ndef test_destroy(fits_data):\n f = fits_data.copy()\n assert len(f.history) == 1\n f.destroy()\n assert len(f.history) == 0\n assert f.shape == (0, 0)\n\n\ndef test_fill(fits_data):\n f = fits_data.copy()\n f.data = np.zeros(f.shape)\n f.fill(1)\n assert f.shape == (10, 11) and np.allclose(f.data, 1)\n assert f.history == ['fill 11x10 with 1']\n\n mask = np.full(f.shape, False)\n mask[:, :2] = True\n f.fill(2, mask)\n assert np.allclose(f.data[mask], 2)\n assert np.allclose(f.data[~mask], 1)\n\n indices = np.nonzero(mask)[::-1] # FITS (x, y) order\n f.fill(0)\n f.fill(1, indices=indices)\n assert np.allclose(f.data[~mask], 0)\n assert np.allclose(f.data[mask], 1)\n\n\ndef test_add(fits_data):\n f = fits_data.copy()\n f.clear()\n f.add(1)\n assert np.allclose(f.data, 1)\n assert f.history[-1] == 'added 1'\n mask = np.full(f.shape, False)\n mask[:2] = True\n f.add(1, indices=mask, factor=2)\n assert np.allclose(f.data[:2], 3)\n assert np.allclose(f.data[2:], 1)\n assert f.history[-1] == 'added 2'\n f.fill(1)\n f2 = f.copy()\n indices = np.asarray(np.nonzero(mask))[::-1] # For FITS (x, y) order\n f.add(f2, indices=indices)\n assert np.allclose(f.data[:2], 2)\n assert np.allclose(f.data[2:], 1)\n assert f.history[-1] == 'added FitsData'\n f.fill(1)\n f.add(np.ones(f.shape), factor=3)\n assert f.history[-1] == 'added scaled (10, 11) array (3x)'\n f.fill(1)\n f.add(f2, indices=indices, factor=3)\n assert np.allclose(f.data[:2], 4)\n assert np.allclose(f.data[2:], 1)\n assert f.history[-1] == 'added scaled FitsData (3x)'\n\n\ndef test_scale(ones):\n f = ones.copy()\n mask = np.full(f.shape, False)\n mask[:2] = True\n indices = np.asarray(np.nonzero(mask))[::-1] # For FITS (x, y) order\n f.scale(3, indices=indices)\n assert np.allclose(f.data[:2], 3)\n assert np.allclose(f.data[2:], 1)\n assert f.history[-1] == 'scale by 3'\n\n\ndef test_validate(ones):\n f = ones.copy()\n\n class Validator(object):\n def __call__(self, array):\n array.discard(array.data == 2)\n\n data = f.data.copy()\n data[1] = 2\n data[0, 0] = np.nan\n f.data = data\n assert np.allclose(f.flag, 0)\n f.validate()\n assert f.flag[0, 0] == 1\n assert np.allclose(f.flag[np.isfinite(data)], 0)\n assert f.history[-1] == 'validate'\n\n f.validate(validator=Validator())\n assert f.flag[0, 0] == 1\n assert np.allclose(f.flag[0, 1:], 0)\n assert np.allclose(f.flag[1], 1)\n assert np.allclose(f.flag[2:], 0)\n assert f.history[-1].startswith('validate via')\n\n\ndef test_paste(ones):\n f = ones.copy()\n f2 = f.copy()\n f2.scale(2)\n f.paste(f2)\n assert f == f2\n assert f.history[-1] == 'pasted new content: 11x10'\n\n\ndef test_smooth(ones, beam_map):\n f = ones.copy()\n data = np.zeros(f.shape)\n data[5, 5] = 1\n reference_index = np.array([1, 1])\n f.data = data\n f.smooth(beam_map, reference_index=reference_index)\n assert np.allclose(f.data[4:7, 4:7], beam_map / 4)\n assert f.history[-1] == 'smoothed'\n\n\ndef test_get_smoothed(ones, beam_map):\n f = ones.copy()\n reference_index = np.array([1, 1])\n smoothed, weights = f.get_smoothed(\n beam_map, reference_index=reference_index)\n assert np.allclose(smoothed, 1)\n assert np.allclose(weights[0], [2.25, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2.25])\n assert np.allclose(weights[0], weights[-1])\n assert np.allclose(weights[:, 0], [2.25, 3, 3, 3, 3, 3, 3, 3, 3, 2.25])\n assert np.allclose(weights[:, 0], weights[:, -1])\n assert np.allclose(weights[1:9, 1:10], 4)\n\n\ndef test_fast_smooth(ones, beam_map):\n f = ones.copy()\n f.fast_smooth(beam_map, np.ones(2, dtype=int))\n assert np.allclose(f.data, 1)\n assert f.history[-2] == 'pasted new content: 11x10'\n assert f.history[-1] == 'smoothed (fast method)'\n\n\ndef test_get_fast_smoothed(ones, beam_map):\n f = ones.copy()\n steps = np.ones(2, dtype=int)\n smoothed = f.get_fast_smoothed(beam_map, steps)\n assert np.allclose(smoothed, 1)\n smoothed, weights = f.get_fast_smoothed(beam_map, steps, get_weights=True)\n assert np.allclose(weights[0], [2.25, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2.25])\n assert np.allclose(weights[0], weights[-1])\n assert np.allclose(weights[:, 0], [2.25, 3, 3, 3, 3, 3, 3, 3, 3, 2.25])\n assert np.allclose(weights[:, 0], weights[:, -1])\n assert np.allclose(weights[1:9, 1:10], 4)\n\n\ndef test_create_fits(ones):\n f = ones.copy()\n hdul = f.create_fits()\n assert len(hdul) == 1 and np.allclose(hdul[0].data, 1)\n\n\ndef test_get_hdus(ones):\n f = ones.copy()\n hdus = f.get_hdus()\n assert isinstance(hdus, list) and len(hdus) == 1\n\n\ndef test_create_hdu(ones):\n f = ones.copy()\n hdu = f.create_hdu()\n assert isinstance(hdu, fits.ImageHDU)\n assert np.allclose(hdu.data, 1)\n assert hdu.header['BUNIT'] == 'Jy'\n\n\ndef test_get_fits_data(ones):\n f = ones.copy()\n assert f.get_fits_data() is f.data\n\n\ndef test_edit_header(ones):\n f = ones.copy()\n header = fits.Header()\n data = f.data\n data[5, 5] = 100\n f.data = data\n f.edit_header(header)\n assert header['DATAMIN'] == 1.0\n assert header['DATAMAX'] == 100.0\n assert header['BZERO'] == 0.0\n assert header['BSCALE'] == 1.0\n assert header['BUNIT'] == 'Jy'\n assert header['HISTORY'] == 'new size 11x10'\n f.unit = ud\n f.edit_header(header)\n assert header['BUNIT'] == 'ct'\n\n\ndef test_parse_header(ones):\n f = ones.copy()\n header = fits.Header()\n f.parse_header(header)\n assert f.unit == 1 * ud\n header['BUNIT'] = 'Jy'\n f.parse_header(header)\n assert f.unit == 1 * units.Unit('Jy')\n\n\ndef test_parse_history(ones):\n f = ones.copy()\n header = fits.Header()\n f.parse_history(header)\n assert f.history == []\n header['HISTORY'] = 'foo'\n f.parse_history(header)\n assert f.history == ['foo']\n\n\ndef test_get_indices(ones):\n with pytest.raises(NotImplementedError):\n ones.get_indices(np.arange(5))\n\n\ndef test_delete_indices(ones):\n with pytest.raises(NotImplementedError):\n ones.delete_indices(np.arange(5))\n\n\ndef test_insert_blanks(ones):\n with pytest.raises(NotImplementedError):\n ones.insert_blanks(np.arange(5))\n\n\ndef test_merge(ones):\n with pytest.raises(NotImplementedError):\n ones.merge(ones.copy())\n\n\ndef test_resample_from(ones, beam_map):\n image = ones.copy()\n image.data[5, 6] = 2.0\n kernel = beam_map.copy()\n to_indices = np.stack(\n [x.ravel() for x in np.indices(image.shape)])[::-1] # xy not yx\n kernel_reference_index = np.array([1, 1])\n f = ones.copy()\n f.resample_from(image, to_indices, kernel=kernel,\n kernel_reference_index=kernel_reference_index)\n\n mask = np.full(image.shape, False)\n mask[4:7, 5:8] = True\n assert np.allclose(\n f.data[mask],\n [1.0625, 1.125, 1.0625, 1.125, 1.25, 1.125, 1.0625, 1.125, 1.0625])\n assert np.allclose(f.data[~mask], 1)\n assert f.history[-1] == 'resampled 11x10 from 11x10'\n\n f.clear()\n f.resample_from(image.data, to_indices, kernel=kernel,\n kernel_reference_index=kernel_reference_index)\n assert np.allclose(\n f.data[mask],\n [1.0625, 1.125, 1.0625, 1.125, 1.25, 1.125, 1.0625, 1.125, 1.0625])\n assert np.allclose(f.data[~mask], 1)\n assert f.history[-1] == 'resampled 11x10 from 11x10'\n\n\ndef test_despike(ones):\n f = ones.copy()\n f.despike(2.0)\n assert f.history[-1] == 'despiked at 2.000'\n\n\ndef test_get_index_range(ones):\n f = ones.copy()\n index_range = f.get_index_range()\n assert np.allclose(index_range, [[0, 11], [0, 10]])\n\n\ndef test_value_at(ones):\n f = ones.copy()\n data = f.data\n f.data[3, 5] = 2.0 # (x, y) = (5, 3)\n f.data = data\n assert np.isclose(f.value_at([5, 3]), 2)\n assert np.isclose(f.value_at([5, 4]), 1)\n assert np.isclose(f.value_at([4.9, 3.1]), 1.715, atol=1e-3)\n\n\ndef test_index_of_max(ones):\n f = ones.copy()\n data = f.data\n data[4, 5] = 2.0 # (x, y) = (5, 4)\n f.data = data\n value, index = f.index_of_max()\n assert value == 2\n assert np.allclose(index, [5, 4])\n\n\ndef test_get_refined_peak_index(ones):\n f = ones.copy()\n data = f.data\n data[5, 4] = 1.4\n data[5, 5] = 1.9\n data[5, 6] = 1.6\n peak_index = np.asarray([5, 5])\n f.data = data\n index = f.get_refined_peak_index(peak_index)\n assert np.allclose(index, [5.125, 5])\n\n\ndef test_crop(ones):\n f = ones.copy()\n f.crop(np.array([[4, 8], [2, 4]]))\n assert f.shape == (3, 5)\n","repo_name":"SOFIA-USRA/sofia_redux","sub_path":"sofia_redux/scan/source_models/maps/tests/test_fits_data.py","file_name":"test_fits_data.py","file_ext":"py","file_size_in_byte":15693,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"} +{"seq_id":"71478340069","text":"\"\"\"SH Controller Template controller.\"\"\"\n\nfrom controller import Robot\nfrom controller import Supervisor\n\nimport struct\n\ndef getTF():\n global robot\n return robot.getSelf().getField(\"translation\").getSFVec3f() \n\ndef setTF(x,y,z):\n robot.getSelf().getField(\"translation\").setSFVec3f([x,y,z]) \n\ndef getSize():\n global robot\n return robot.getSelf().getField(\"size\").getSFFloat()\n\ndef setSize(size):\n global robot\n robot.getSelf().getField(\"size\").setSFFloat(size)\n\ndef getMaxSize():\n global robot\n return robot.getSelf().getField(\"maxSize\").getSFFloat()\n\ndef getGrowth():\n global robot\n return robot.getSelf().getField(\"growth\").getSFFloat()\n\n\n#def getSize():\n #return robot.getSelf().getField(\"children\").getMFNode(0).getField('geometry').getSFNode().getField('radius').getSFFloat()\n# return robot.getSelf().getField(\"size\").getSFFloat()\n\ndef emitt(state):\n global emitter\n tf = getTF() \n msg = struct.pack(\"?ddd\",state,tf[0],tf[1],tf[2])\n emitter.send(msg)\n\n\n\n\nTIME_STEP = 64\nrobot = Supervisor()\ntimestep = int(robot.getBasicTimeStep())\n\ndeltaTime = 0\n\nemitter = robot.getDevice(\"emitter\")\n \nscale_max = getMaxSize()\nscale_increment = getGrowth()\n\n\nsetSize(0.1)\n\n# Main loop:\n# - perform simulation steps until Webots is stopping the controller\nwhile robot.step(timestep) != -1:\n deltaTime += timestep\n # check if messages are send from WebUI \n\n scale = getSize()\n if scale < scale_max:\n size = scale + scale_increment\n print (\"scale -> \" + str(scale) + \" inc: \" + str(scale_increment) + \" new: \" + str(size))\n setSize(size)\n\n\n\n # do something every 500ms\n # if deltaTime > 500:\n # deltaTime = 0\n # sh_device.log(\"Hello World -> \" + str(deltaTime))\n\n pass\n\n# cleanup on Exit\n","repo_name":"goch/smarthome-webots","sub_path":"controllers/FireController/FireController.py","file_name":"FireController.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"5827184811","text":"\"\"\"\nCreated on 2017-11-25\n\nHelper functions for visualization.\n\n\n@author: mukundraj\n\n\"\"\"\n\nimport numpy as np\nfrom produtils import dprint\nimport matplotlib.pyplot as plt\n\n\n\nfilename = 'chem_sp_vectors.txt'\noutpath = 'output_tsvs/vector_figs/'\n\ndef draw_hist_figures(filename, outpath):\n \"\"\"\n Main file for chem mutag networks/ fb nets shortest path kernels for getting the\n vector histogram images to view with the interactive visualization.\n Args:\n filename:\n outpath:\n\n Returns:\n\n \"\"\"\n\n # read text file\n X = np.genfromtxt(filename)\n\n maxval = np.amax(X)\n ymax = maxval*1.1\n\n m,n = np.shape(X)\n bins = range(n)\n plt.figure()\n\n for i in range(m):\n\n # get row\n row = X[i,:]\n plt.plot(row)\n\n outfile = format(i, '04')+'.png'\n plt.ylim(ymax = ymax, ymin = 0)\n\n plt.savefig(outpath+outfile)\n\n plt.clf()\n\n plt.close()\n","repo_name":"mukundraj/dbvis","sub_path":"src/datarelated/helpers/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"6455141188","text":"import sys, os\nimport xml.dom\nimport xml.dom.minidom\nfrom xml.dom.minidom import parse, parseString, getDOMImplementation\n\nimpl = getDOMImplementation()\n\ndef ensure_list(obj):\n \"\"\"\n ensures the object passed is a list, so it is iterable.\n\n useful workaround until i decide if XMLNode.foo should always\n return a list of foo, even if there is only one foo child\n \"\"\"\n if len(obj):\n return obj\n else:\n return [obj]\n\nclass MissingRootTag(Exception):\n \"\"\"root tag name was not given\"\"\"\n\nclass InvalidXML(Exception):\n \"\"\"failed to parse XML input\"\"\"\n\nclass CannotSave(Exception):\n \"\"\"unable to save\"\"\"\n\nclass InvalidNode(Exception):\n \"\"\"not a valid minidom node\"\"\"\n\nclass XMLFile:\n \"\"\"\n Allows an xml file to be viewed and operated on\n as a python object.\n\n (If you're viewing the epydoc-generated HTML documentation, click the 'show private'\n link at the top right of this page to see all the methods)\n\n Holds the root node in the .root attribute, also in an attribute\n with the same name as this root node.\n \"\"\"\n def __init__(self, **kw):\n \"\"\"\n Create an XMLFile\n\n Keywords:\n - path - a pathname from which the file can be read\n - file - an open file object from which the raw xml\n can be read\n - raw - the raw xml itself\n - root - name of root tag, if not reading content\n\n Usage scenarios:\n 1. Working with existing content - you must supply input in\n one of the following ways:\n - 'path' must be an existing file, or\n - 'file' must be a readable file object, or\n - 'raw' must contain raw xml as a string\n 2. Creating whole new content - you must give the name\n of the root tag in the 'root' keyword\n\n Notes:\n - Keyword precedence governing existing content is:\n 1. path (if existing file)\n 2. file\n 3. raw\n - If working with existing content:\n - if the 'root' is given, then the content's toplevel tag\n MUST match the value given for 'root'\n - trying to _save will raise an exception unless 'path'\n has been given\n - if not working with existing content:\n - 'root' must be given\n - _save() will raise an exception unless 'path' has been given\n \"\"\"\n path = kw.get(\"path\", None)\n fobj = kw.get(\"file\", None)\n raw = kw.get(\"raw\", None)\n root = kw.get(\"root\", None)\n\n if path:\n self.path = path\n try:\n fobj = file(path)\n except IOError:\n pass\n else:\n self.path = None\n\n if fobj:\n raw = fobj.read()\n\n if raw:\n self.dom = xml.dom.minidom.parseString(raw)\n else:\n # could not source content, so create a blank slate\n if not root:\n # in which case, must give a root node name\n raise MissingRootTag(\n \"No existing content, so must specify root\")\n\n # ok, create a blank dom\n self.dom = impl.createDocument(None, root, None)\n\n # get the root node, save it as attributes 'root' and name of node\n rootnode = self.dom.documentElement\n\n # now validate root tag\n if root:\n if rootnode.nodeName != root:\n raise IncorrectRootTag(\"Gave root='%s', input has root='%s'\" % (\n root, rootnode.nodeName))\n\n # need this for recursion in XMLNode\n self._childrenByName = {}\n self._children = []\n\n # add all the child nodes\n for child in self.dom.childNodes:\n childnode = XMLNode(self, child)\n #print \"compare %s to %s\" % (rootnode, child)\n if child == rootnode:\n #print \"found root\"\n self.root = childnode\n setattr(self, rootnode.nodeName, self.root)\n\n def save(self, where=None, obj=None):\n \"\"\"\n Saves the document.\n\n If argument 'where' is given, saves to it, otherwise\n tries to save to the original given 'path' (or barfs)\n\n Value can be a string (taken to be a file path), or an open\n file object.\n \"\"\"\n obj = obj or self.dom\n\n if not where:\n if self._root.path:\n where = self._root.path\n\n if isinstance(where, str):\n where = file(where, \"w\")\n\n if not where:\n raise CannotSave(\"No save destination, and no original path\")\n\n where.write(obj.toxml())\n where.flush()\n\n def saveAs(self, path):\n \"\"\"\n save this time, and all subsequent times, to filename 'path'\n \"\"\"\n self.path = path\n self.save()\n\n def toxml(self):\n return self.dom.toxml()\n\n def __len__(self):\n \"\"\"\n returns number of child nodes\n \"\"\"\n return len(self._children)\n\n def __getitem__(self, idx):\n if isinstance(idx, int):\n return self._children[idx]\n else:\n return self._childrenByName[idx]\n\n\nclass XMLNode:\n \"\"\"\n This is the workhorse for the xml object interface\n\n (If you're viewing the epydoc-generated HTML documentation, click the 'show private'\n link at the top right of this page to see all the methods)\n\n \"\"\"\n\n # http://docs.python.org/reference/lexical_analysis.html#id6\n __RESERVED_WORDS = (\n \"and\",\"del\",\"class\",\"from\",\"not\",\"while\"\n \"as\",\"elif\",\"global\",\"or\",\"with\",\"assert\",\"else\",\"if\",\n \"pass\",\"yield\",\"break\",\"except\",\"import\",\"print\",\n \"class\",\"exec\",\"in\",\"raise\",\"continue\",\"finally\",\n \"is\",\"return\",\"def\",\"for\",\"lambda\",\"try\"\n )\n\n def __init__(self, parent, node):\n \"\"\"\n You shouldn't need to instantiate this directly\n \"\"\"\n self._parent = parent\n if isinstance(parent, XMLFile):\n self._root = parent\n else:\n self._root = parent._root\n self._node = node\n self._childrenByName = {}\n self._children = []\n\n # add ourself to parent's children registry\n parent._children.append(self)\n\n # the deal with named subtags is that we store the first instance\n # as itself, and with second and subsequent instances, we make a list\n parentDict = self._parent._childrenByName\n\n # If the name of the node is a python reserved word then captilize it\n nodeName = node.nodeName\n if nodeName in self.__RESERVED_WORDS:\n nodeName = nodeName.upper()\n\n if nodeName not in parentDict:\n parentDict[nodeName] = parent.__dict__[nodeName] = self\n else:\n if isinstance(parentDict[nodeName], XMLNode):\n # this is the second child node of a given tag name, so convert\n # the instance to a list\n parentDict[nodeName] = parent.__dict__[nodeName] = [parentDict[nodeName]]\n parentDict[nodeName].append(self)\n\n # figure out our type\n self._value = None\n if isinstance(node, xml.dom.minidom.Text):\n self._type = \"text\"\n self._value = node.nodeValue\n elif isinstance(node, xml.dom.minidom.Element):\n self._type = \"node\"\n elif isinstance(node, xml.dom.minidom.Comment):\n self._type = \"comment\"\n self._value = node.nodeValue\n elif isinstance(node, xml.dom.minidom.DocumentType):\n #\n #Ignore doctype, could possibly check it....\n pass\n else:\n raise InvalidNode(\"node class %s\" % node.__class__)\n\n # and wrap all the child nodes\n for child in node.childNodes:\n XMLNode(self, child)\n\n def _render(self):\n \"\"\"\n Produces well-formed XML of this node's contents,\n indented as required\n \"\"\"\n return self._node.toxml()\n\n def __repr__(self):\n if self._type == \"node\":\n return \"\" % self._node.nodeName\n else:\n return \"\" % self._type\n\n def __getattr__(self, attr):\n \"\"\"\n Fetches an attribute or child node of this tag\n\n If it's an attribute, then returns the attribute value as a string.\n\n If a child node, then:\n - if there is only one child node of that name, return it\n - if there is more than one child node of that name, return a list\n of child nodes of that tag name\n\n Supports some magic attributes:\n - _text - the value of the first child node of type text\n \"\"\"\n #print \"%s: __getattr__: attr=%s\" % (self, attr)\n\n if attr == '_text':\n # magic attribute to return text\n tnode = self['#text']\n if isinstance(tnode, list):\n tnode = tnode[0]\n return tnode._value\n\n if self._type in ['text', 'comment']:\n if attr == '_value':\n return self._node.nodeValue\n else:\n raise AttributeError(attr)\n\n if self._node.hasAttribute(attr):\n return self._node.getAttribute(attr)\n elif attr in self._childrenByName:\n return self._childrenByName[attr]\n\n #elif attr == 'value':\n # magic attribute\n\n else:\n raise AttributeError(attr)\n\n\n def __setattr__(self, attr, val):\n \"\"\"\n Change the value of an attribute of this tag\n\n The magic attribute '_text' can be used to set the first child\n text node's value\n\n For example::\n\n Consider:\n\n \n foo\n \n\n >>> somenode\n \n >>> somenode.child\n \n >>> somenode.child._text\n 'foo'\n >>> somenode._toxml()\n u'foo'\n >>> somenode.child._text = 'bar'\n >>> somenode.child._text\n 'bar'\n >>> somenode.child._toxml()\n u'bar/child>'\n\n \"\"\"\n if attr.startswith(\"_\"):\n\n # magic attribute for setting _text\n if attr == '_text':\n tnode = self['#text']\n if isinstance(tnode, list):\n tnode = tnode[0]\n tnode._node.nodeValue = val\n tnode._value = val\n return\n\n self.__dict__[attr] = val\n elif self._type in ['text', 'comment']:\n self._node.nodeValue = val\n else:\n # discern between attribute and child node\n if attr in self._childrenByName:\n raise Exception(\"Attribute Exists\")\n self._node.setAttribute(attr, str(val))\n\n def _keys(self):\n \"\"\"\n Return a list of attribute names\n \"\"\"\n return list(self._node.attributes.keys())\n\n def _values(self):\n \"\"\"\n Returns a list of (attrname, attrval) tuples for this tag\n \"\"\"\n return [self._node.getAttribute(k) for k in list(self._node.attributes.keys())]\n\n def _items(self):\n \"\"\"\n returns a list of attribute values for this tag\n \"\"\"\n return [(k, self._node.getAttribute(k)) for k in list(self._node.attributes.keys())]\n\n def _has_key(self, k):\n \"\"\"\n returns True if this tag has an attribute of the given name\n \"\"\"\n return self._node.hasAttribute(k) or k in self._childrenByName\n\n def _get(self, k, default=None):\n \"\"\"\n returns the value of attribute k, or default if no such attribute\n \"\"\"\n if self._has_key(k):\n return getattr(self, k)\n else:\n return default\n def __len__(self):\n \"\"\"\n returns number of child nodes\n \"\"\"\n return len(self._children)\n\n def __getitem__(self, idx):\n \"\"\"\n if given key is numeric, return the nth child, otherwise\n try to return the child tag (or list of child tags) having\n the key as the tag name\n \"\"\"\n #print \"__getitem__: idx=%s\" % str(idx)\n\n if isinstance(idx, slice) or isinstance(idx, int):\n return self._children[idx]\n elif isinstance(idx, str):\n return self._childrenByName[idx]\n else:\n raise IndexError(idx)\n\n def _addNode(self, child):\n \"\"\"\n Tries to append a child node to the tree, and returns it\n\n Value of 'child' must be one of:\n - a string (in which case it is taken to be the name\n of the new node's tag)\n - a dom object, in which case it will be wrapped and added\n - an XMLNode object, in which case it will be added without\n wrapping\n \"\"\"\n\n if isinstance(child, XMLNode):\n\n # add it to our children registry\n self._children.append(child)\n\n parentDict = self._childrenByName\n nodeName = child._node.nodeName\n\n if nodeName not in parentDict:\n parentDict[nodeName] = parent.__dict__[nodeName] = child\n else:\n if isinstance(parentDict[nodeName], XMLNode):\n # this is the second child node of a given tag name, so convert\n # the instance to a list\n parentDict[nodeName] = self.__dict__[nodeName] = [parentDict[nodeName]]\n parentDict[nodeName].append(child)\n\n # and stick it in the dom\n self._node.appendChild(child._node)\n\n return child\n\n elif isinstance(child, str):\n childNode = self._root.dom.createElement(child)\n self._node.appendChild(childNode)\n\n elif isinstance(child, xml.dom.minidom.Element):\n childNode = child\n child = childNode.nodeName\n self._node.appendChild(childNode)\n\n\n return XMLNode(self, childNode)\n\n def _addText(self, value):\n \"\"\"\n Tries to append a child text node, with the given text, to the tree,\n and returns the created node object\n \"\"\"\n childNode = self._root.dom.createTextNode(value)\n self._node.appendChild(childNode)\n return XMLNode(self, childNode)\n\n def _addComment(self, comment):\n \"\"\"\n Tries to append a child comment node (with the given text value)\n to the tree, and returns the create node object\n \"\"\"\n childNode = self._root.dom.createCommentNode(comment)\n self._node.appendChild(childNode)\n return XMLNode(self, childNode)\n\n def _save(self, where=None):\n \"\"\"\n Generates well-formed XML from just this node, and saves it\n to a file.\n\n Argument 'where' is either an open file object, or a pathname\n\n If 'where' is not given, then saves the entire document tree.\n \"\"\"\n if not where:\n self._root.save()\n else:\n self._root.save(where, self._node)\n\n def _toxml(self):\n \"\"\"\n renders just this node out to raw xml code\n \"\"\"\n return self._node.toxml()\n\n","repo_name":"paparazzi/paparazzi","sub_path":"sw/tools/tcp_aircraft_server/phoenix/xmlobject.py","file_name":"xmlobject.py","file_ext":"py","file_size_in_byte":15426,"program_lang":"python","lang":"en","doc_type":"code","stars":1440,"dataset":"github-code","pt":"71"} +{"seq_id":"43335409626","text":"from geopy.distance import geodesic\n\n\nboundary = {}\nboundary[0]= [[47.376762, 8.356943],[47.376846, 8.356880]]\nboundary[1]= [[47.376846, 8.356880],[47.376804, 8.356722]]\nboundary[2]= [[47.376804, 8.356722],[47.376724, 8.356810]]\ntest = type(boundary[0][0])\nprint (type(test))\n\nclass gpsDistance:\n\n\tdef getDistance(self,pointA,pointB):\n\t\tdistance = geodesic(pointA, pointB).miles\n\t\tdistance = self.milesToMeters(distance)\n\t\treturn (distance)\n\n\tdef getRelativeDistanceToRobot(self,positionRobot, boundary, element):\n\t\tlength_A = getDistance(positionRobot, boundary[element][0])\n\t\tlength_B = getDistance(positionRobot, boundary[element][1])\n\t\treturn length_A + length_B\n\t\t\n\tdef milesToMeters(self,value):\n\t\tvalue = value * 1.6 * 1000\n\t\treturn value\n\t\t\t\n\t# for i in boundary:\n\t\t# print(getRelativeDistanceToRobot(positionRobot, boundary,i))\n","repo_name":"frankbrunner/LMRP","sub_path":"GPS_Distance.py","file_name":"GPS_Distance.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"41126943834","text":"from .input import Input, ZerosInput, PoolingInput, ConvolutionInput, DenseInput, IdentityInput\nfrom .output import Output, OutCell, OutBlock, Out\nfrom .operation import Operation, Combination, Sum, Flat\nfrom .block import Block\nfrom .cell import Cell\n\ndef lenet5_blocks():\n blocks = []\n\n block1 = Block()\n block1.set_stride(\"1x1\")\n block1.set_features(600)\n cell11 = Cell(input1 = ConvolutionInput((5,5),None,None,\"same\", \"tanh\"))\n block1.append_cell(cell11)\n cell12 = Cell(input1 = PoolingInput((2,2),None,\"average\", \"valid\"))\n block1.append_cell(cell12)\n\n block2 = Block()\n block2.set_stride(\"1x1\")\n cell21 = Cell(input1 = ConvolutionInput((5,5),None, 12,\"same\", \"tanh\"))\n block2.append_cell(cell21)\n\n block22 = Block()\n block22.set_stride(\"2x2\")\n cell22 = Cell(input1 = PoolingInput((2,2),None,\"average\", \"valid\"))\n block22.append_cell(cell22)\n\n\n block3 = Block()\n block3.set_stride(\"1x1\")\n cell31 = Cell(input1 = ConvolutionInput((5,5),(1,1),120,\"valid\", \"tanh\"))\n block3.append_cell(cell31)\n\n block4 = Block()\n block4.set_stride(\"1x1\")\n cell41 = Cell(input1 = DenseInput(84, \"tanh\"))\n block4.append_cell(cell41)\n\n blocks.extend([block1, block2,block22, block3, block4])\n\n return blocks","repo_name":"yamizi/FeatureNet","sub_path":"model/leNet.py","file_name":"leNet.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"13588281441","text":"import numpy as np\n\n\ndef applied(loss_fnc, name):\n\n def applied_loss_fnc(prediction=None, truth=None):\n loss, gradient = mean_squared_loss(\n prediction=prediction[name],\n truth=truth[name])\n return loss, { name: gradient } \n return applied_loss_fnc\n\ndef running_rnn_loss(input_var, output_var, basic_loss):\n\n def loss(input=None, prediction=None):\n losses = []\n derivs = []\n shape = None\n for t in range(0, len(prediction) - 1):\n loss, deriv = basic_loss(\n prediction=prediction[t][output_var],\n truth=input[input_var][:,t + 1,:]\n )\n derivs.append({ output_var: deriv })\n losses.append(loss)\n shape = deriv.shape\n derivs = derivs + [{output_var: np.zeros(shape)}]\n return sum(losses), derivs\n \n return loss\n\ndef mean_squared_loss(prediction=None, truth=None):\n assert prediction.shape == truth.shape\n loss = 0.5 * np.sum(np.power(prediction - truth, 2))\n deriv = prediction - truth\n return loss, deriv\n","repo_name":"jctillman/network-practice","sub_path":"network/stateless/loss/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"11000022618","text":"import os\nimport sys\nimport shutil\n\nimport subprocess as sp\n\ndrc = os.path.abspath(os.path.dirname( __file__ )) # get path of program\n\nfilemap = {\n \"fort.7\" : \"dls76.inp\",\n \"fort.8\" : \"nfilea.inp\",\n \"fort.10\" : \"spgr.dat\",\n \"fort.11\" : \"powd10.inp\",\n \"fort.12\" : \"dls76.out\" }\n\ndef clean():\n fort = [\"fort.7\", \"fort.8\", \"fort.10\", \"fort.11\", \"fort.12\"]\n for f in fort:\n if os.path.exists(f):\n os.remove(f)\n\ndef move_files():\n fort = [\"fort.8\", \"fort.11\", \"fort.12\"]\n for f in fort:\n target = filemap[f]\n if os.path.exists(target):\n os.remove(target)\n if os.path.exists(f):\n os.rename(f, target)\n\ndef dls76(args=[]):\n spgr_dat = os.path.join(drc, \"..\", \"resources\", \"spgr.dat\")\n\n dls76_exe = '_dls76.x'\n\n if not os.path.exists(spgr_dat):\n print(\"Cannot find\", spgr_dat)\n sys.exit()\n\n clean()\n\n try:\n inp = args[0] \n except IndexError:\n inp = \"dls76.inp\"\n\n try:\n out = args[1] \n except IndexError:\n out = \"dls76.out\"\n\n if not os.path.exists(inp):\n print(\"Cannot find\", inp)\n sys.exit()\n\n inp = os.path.abspath(inp)\n out = os.path.abspath(out)\n\n shutil.copyfile(inp, \"fort.7\")\n shutil.copyfile(spgr_dat, \"fort.10\")\n\n sp.call([dls76_exe,])\n\n move_files()\n clean()\n\ndef dls76_entry():\n args = sys.argv[1:]\n dls76(args)\n\nif __name__ == '__main__':\n dls76_entry_point()","repo_name":"stefsmeets/focus_package","sub_path":"focus_tools/pydls.py","file_name":"pydls.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"71"} +{"seq_id":"37146698697","text":"# coding=utf-8\nfrom appium import webdriver\nimport time\nfrom selenium.common.exceptions import NoSuchElementException\nimport keyword\n\ndesired_caps = {\n 'platformName': 'Android',\n 'platformVersion': '5.1',\n 'deviceName': '88MFBM72HGXZ',\n 'udid': '88MFBM72HGXZ',\n 'appPackage': 'com.izerocar.zycx',\n 'appActivity': '.business.splash.SplashActivity',\n 'appWaitActivity': '.business.home.HomeActivity',\n 'unicodeKeyboard': True,\n 'resetKeyboard': True,\n}\nmm = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)\ntime.sleep(3)\n\nmm.find_element_by_id('com.izerocar.zycx:id/iv_close').click()\n# 检查是否有活动页面弹窗,如果有就点击关闭\ntry:\n closePupop = mm.find_element_by_id('com.izerocar.zycx:id/iv_close')\nexcept NoSuchElementException:\n pass\nelse:\n closePupop.click()\ntime.sleep(2)\n\n\ndef login_tset():\n Login = mm.find_element_by_id('com.izerocar.zycx:id/btn_portrait').click()\n Login = mm.find_element_by_id('com.izerocar.zycx:id/et_phone').send_keys('13804380438')\n Login = mm.find_element_by_id('com.izerocar.zycx:id/et_verification').send_keys('666666')\n Login = mm.find_element_by_id('com.izerocar.zycx:id/btn_login').click()\n\n\ntry:\n mm.find_element_by_id('com.izerocar.zycx:id/btn_confirm')\nexcept NoSuchElementException:\n login_tset()\nelse:\n mm.find_element_by_id('com.izerocar.zycx:id/btn_confirm').click()\n quit()\n\n\n","repo_name":"xinguoguoyo/project","sub_path":"boke/python/android.py","file_name":"android.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"4911920468","text":"import torch\nimport os\nimport numpy as np\nimport cv2\n\nclass SimpleInferenceDataset(torch.utils.data.Dataset):\n '''\n A dataset objects that lists\n '''\n def __init__(self, dataset_path, file_extension='.png', downsample_scale=None, filter=None):\n\n image_files = []\n\n for filename in os.listdir(dataset_path):\n if filename.endswith(file_extension) and filter in filename:\n image_files.append(filename)\n\n self.image_files = image_files\n self.length = len(image_files)\n self.dataset_path = dataset_path\n self.downsample_scale = downsample_scale\n self.filter = filter\n\n def __getitem__(self, index):\n label = np.zeros(1)\n image_filename = self.image_files[index]\n x = self._process_input(image_filename)\n return x, label, image_filename\n\n def _process_input(self, image_filename):\n img_path = os.path.join(self.dataset_path, image_filename)\n img = cv2.imread(img_path)\n\n if self.downsample_scale is not None:\n scale = self.downsample_scale\n img = cv2.resize(img, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA)\n\n # BGR to RGB\n img = img[...,::-1]\n\n img = img.astype(np.float32) / 255.\n # move from (x, y, c) to (c, x, y) PyTorch style\n img = np.moveaxis(img, -1, 0)\n\n return img\n\n\n def __len__(self):\n\n return self.length","repo_name":"yonkshi/urban_dl","sub_path":"experiment_manager/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"19468346969","text":"def increase_decrease(items, value):\n # increase the value of all elements in the sequence which are less or equal to the removed element with the value\n # of the removed element\n for x in range(len(items)):\n if items[x] <= value:\n items[x] += value\n else:\n items[x] -= value\n return items\n\n\ninput_list = list(map(int, input().split()))\ntotal_sum = 0\n\nwhile input_list:\n # When an index is received, remove the element at that index from the sequence\n current_index = int(input())\n\n # If the given index is less than 0, remove the first element of the sequence and copy the last element to its place\n # the elements must still be increased and decreased\n if current_index < 0:\n current_element_value = input_list.pop(0)\n total_sum += current_element_value\n input_list.insert(0, input_list[-1])\n input_list = increase_decrease(input_list, current_element_value)\n\n # If the given index is greater than the last index of the sequence, remove the last element from the sequence, and\n # copy the first element to its place\n # the elements must still be increased and decreased\n elif current_index >= len(input_list):\n current_element_value = input_list.pop(-1)\n total_sum += current_element_value\n input_list.append(input_list[0])\n input_list = increase_decrease(input_list, current_element_value)\n else:\n current_element_value = input_list.pop(current_index)\n total_sum += current_element_value\n input_list = increase_decrease(input_list, current_element_value)\n\nprint(total_sum)\n","repo_name":"azashev/Programming-Fundamentals-with-Python-Softuni","sub_path":"Lists Advanced/pokemon_dont_go.py","file_name":"pokemon_dont_go.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"74576327268","text":"from typing import Any, List, Optional\nfrom langchain.llms.huggingface_pipeline import HuggingFacePipeline, VALID_TASKS\nfrom langchain.callbacks.manager import CallbackManagerForLLMRun\nfrom langchain.llms.utils import enforce_stop_tokens\nfrom langchain.output_parsers import RegexParser\n\n\nVALID_TASKS = (*VALID_TASKS, \"question-answering\")\n\n\nclass ExtendedHuggingFacePipeline(HuggingFacePipeline):\n with_score: bool = False\n\n qa_input_parser = RegexParser(\n regex=r\"<Вопрос>: (.*?) <Контекст>: (.*)\",\n output_keys=[\"question\", \"context\"],\n default_output_key=\"question\",\n )\n def _call(\n self,\n prompt: str,\n stop: Optional[List[str]] = None,\n run_manager: Optional[CallbackManagerForLLMRun] = None,\n **kwargs: Any,\n ) -> str:\n \n if self.pipeline.task == \"question-answering\":\n prompt = self.qa_input_parser.parse(prompt.replace('\\n', ' '))\n response = self.pipeline(prompt)\n\n if self.pipeline.task == \"text-generation\":\n # Text generation return includes the starter text.\n text = response[0][\"generated_text\"][len(prompt) :]\n elif self.pipeline.task == \"text2text-generation\":\n text = response[0][\"generated_text\"]\n elif self.pipeline.task == \"summarization\":\n text = response[0][\"summary_text\"]\n elif self.pipeline.task == \"question-answering\":\n if self.with_score:\n score = int(float(response['score']) * 100)\n text = f\": {response['answer']}\\n: {score}\"\n else:\n text = response['answer']\n else:\n raise ValueError(\n f\"Got invalid task {self.pipeline.task}, \"\n f\"currently only {VALID_TASKS} are supported\"\n )\n \n if stop:\n # This is a bit hacky, but I can't figure out a better way to enforce\n # stop tokens when making calls to huggingface_hub.\n text = enforce_stop_tokens(text, stop)\n return text\n","repo_name":"MakArtKar/LLM_QA_doc_corpus","sub_path":"src/utils/langchain.py","file_name":"langchain.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"73293380711","text":"from django.urls import path\nfrom .views import StockCreateView, StockListView,StockUpdateView,StockDeleteView,StockDetailView, home, details\n\nurlpatterns = [\n path('', home, name='home'),\n path('add/', StockCreateView.as_view(), name='add'),\n path('/update/', StockUpdateView.as_view(), name='update'),\n path('/', StockDetailView.as_view(), name='detail'),\n path('/delete/', StockDeleteView.as_view(), name='delete'),\n path('stock_list/', details, name='list'),\n]","repo_name":"kenmugy/stockmgt-unfinished","sub_path":"stckmgt/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"10700186970","text":"from collections import defaultdict\n\nimport networkx as nx\nimport pydotplus\n\nfrom .syntax import *\nfrom .structure import *\nfrom .analyze import *\n\n# These are used to make node scopes more visible in scope graphs\nnode_scope_colors = [\n 'blue',\n 'red',\n 'green',\n 'purple',\n 'lightblue'\n 'orange',\n 'cyan',\n 'magenta',\n]\n\nclass StmtGraph(object):\n \"\"\"Holds a directional graph of statements as nodes including the\n Entry and Exit nodes.\n\n Conditional edges are labeled with the attribution 'condition',\n whose value is either True or False.\n\n If the graph itself needs to be referenced it is held in the\n object property \"graph\".\n \"\"\"\n def __init__(self):\n self.graph = nx.DiGraph()\n\n @classmethod\n def from_section(cls, section):\n \"\"\"Translate a Cobol Section into a statement graph.\n \"\"\"\n graph = cls()\n\n for para in section.paras.values():\n for sentence in para.sentences:\n for stmt in sentence.stmts:\n if isinstance(stmt, SequentialStatement):\n graph._add_edge(stmt, stmt.next_stmt)\n\n elif isinstance(stmt, BranchStatement):\n graph._add_edge(stmt, stmt.true_stmt, condition=True)\n graph._add_edge(stmt, stmt.false_stmt, condition=False)\n\n elif isinstance(stmt, TerminatingStatement):\n graph._add_edge(stmt, Exit)\n\n else:\n raise RuntimeError('Unexpected statement type: {}'.format(stmt))\n\n graph._add_edge(Entry, section.get_first_stmt())\n\n return graph\n\n def _add_edge(self, src, dest, **attr):\n if dest is None:\n dest = Exit\n self.graph.add_edge(src, dest, attr)\n\n def reachable_subgraph(self):\n \"\"\"Return a new StmtGraph that only contains the nodes reachable from\n Entry.\n \"\"\"\n sub_graph = StmtGraph()\n sub_graph.graph.add_edges_from(nx.edge_dfs(self.graph, Entry))\n return sub_graph\n\n\n def print_stmts(self):\n stmts = self.graph.nodes()\n stmts.sort(key = lambda s: s.source.from_char)\n for stmt in stmts:\n print(stmt)\n\n\nclass StructureGraphBase(object):\n def __init__(self, debug=False):\n self.graph = nx.MultiDiGraph()\n self._debug = debug\n\n def print_nodes(self):\n nodes = self.graph.nodes()\n nodes.sort(key = lambda n: n.source.from_char)\n for node in nodes:\n print('{} [loop {}]'.format(node, node.scope))\n for n, next_node, data in self.graph.out_edges_iter(node, data=True):\n if data.get('condition') == True:\n print('True:')\n elif data.get('condition') == False:\n print('False:')\n\n for stmt in data['stmts']:\n print(stmt)\n\n print('-> {}'.format(next_node))\n\n\n def write_dot(self, output_path):\n \"\"\"Write a graphviz .dot representation of the graph to output_path.\n \"\"\"\n\n # Write the graph ourselves to output statements as edge labels\n\n dot = pydotplus.Dot('', graph_type='digraph', strict=False)\n dot.set_edge_defaults(labeljust='l')\n\n added_nodes = set()\n\n scope_colors = defaultdict(\n lambda: node_scope_colors[len(scope_colors) % len(node_scope_colors)])\n\n for src, dest, data in self.graph.edges_iter(data=True):\n src_id = str(id(src))\n dest_id = str(id(dest))\n\n if src_id not in added_nodes:\n added_nodes.add(src_id)\n dot.add_node(pydotplus.Node(src_id, label=str(src), color=scope_colors[src.scope]))\n\n if dest_id not in added_nodes:\n added_nodes.add(dest_id)\n dot.add_node(pydotplus.Node(dest_id, label=str(dest), color=scope_colors[src.scope]))\n\n stmts = data['stmts']\n condition = data.get('condition')\n\n label = ''\n if condition is not None:\n label = 'if {}:\\n'.format(condition)\n\n label += '\\n'.join((str(s) for s in stmts))\n\n if label:\n dot.add_edge(pydotplus.Edge(src_id, dest_id, label=label))\n else:\n dot.add_edge(pydotplus.Edge(src_id, dest_id))\n\n with open(output_path, mode='wt', encoding='utf-8') as f:\n f.write(dot.to_string())\n\n\nclass CobolStructureGraph(StructureGraphBase):\n \"\"\"A MultiDiGraph representing the structure of a Cobol program.\n\n Instead of statements, the graph nodes are one of:\n\n ## Entry singleton:\n Start of execution. No in edges, one out edge.\n\n ## Exit singleton:\n End of execution. At least one in edge, no out edges.\n\n ## Branch instances:\n At least one in edge, two out edges. The out edges are identified by the attributes\n condition=True and condition=False, respectively.\n\n ## Join instances:\n At least two in edges, one out edge.\n\n The edges between the nodes holds the sequential cobol statements in the\n edge attribute 'stmts'. This may be an empty list, but it is always present.\n \"\"\"\n\n @classmethod\n def from_stmt_graph(cls, stmt_graph):\n cobol_graph = cls()\n\n branch_nodes = []\n join_nodes = []\n node_stmts = {}\n\n # Find all stmts that are branches or joins and wrap them\n for stmt in stmt_graph.graph:\n if isinstance(stmt, BranchStatement):\n n = Branch(stmt)\n branch_nodes.append(n)\n node_stmts[stmt] = n\n\n elif isinstance(stmt, TerminatingStatement):\n node_stmts[stmt] = Exit\n\n elif stmt is Exit:\n node_stmts[Exit] = Exit\n\n elif stmt_graph.graph.in_degree(stmt) > 1:\n n = Join(stmt)\n join_nodes.append(n)\n node_stmts[stmt] = n\n\n # Add statements from Entry node\n nbrs = stmt_graph.graph.successors(Entry)\n assert len(nbrs) == 1\n cobol_graph._add_branch_edge(stmt_graph, node_stmts, Entry, nbrs[0])\n\n # Add statements from each Branch node\n for node in branch_nodes:\n cobol_graph._add_branch_edge(stmt_graph, node_stmts, node, node.stmt.true_stmt, condition=True)\n cobol_graph._add_branch_edge(stmt_graph, node_stmts, node, node.stmt.false_stmt, condition=False)\n\n # Add statements from all join nodes,\n for node in join_nodes:\n # Temporarily drop it to avoid detecting false self-loop\n del node_stmts[node.stmt]\n cobol_graph._add_branch_edge(stmt_graph, node_stmts, node, node.stmt)\n node_stmts[node.stmt] = node\n\n return cobol_graph\n\n\n def _add_branch_edge(self, stmt_graph, node_stmts, source_node, start_stmt, **attrs):\n stmt = start_stmt or Exit\n stmts = []\n dest_node = None\n\n while stmt not in node_stmts:\n stmts.append(stmt)\n nbrs = stmt_graph.graph.successors(stmt)\n assert len(nbrs) == 1\n stmt = nbrs[0]\n\n if stmt is start_stmt:\n # This is a self-loop\n dest_node = source_node\n break\n\n if dest_node is None:\n dest_node = node_stmts[stmt]\n\n attrs['stmts'] = stmts\n self.graph.add_edge(source_node, dest_node, attr_dict=attrs)\n\n\n\nclass AcyclicStructureGraph(StructureGraphBase):\n \"\"\"Similar to a CobolStructureGraph, but loops are broken up by adding\n Loop and ContinueLoop nodes to produce a DAG.\n\n A Loop node identifies the start of a loop. It replaces a Join\n node, and is put in front of other nodes.\n\n All edges that pointed to the original join node is replaced by an\n edge to a ContinueLoop node that references the Loop node object.\n\n Each node in a loop will have two attributes:\n - 'scopes': a set of all the Loop objects it belongs to\n - 'scope': the inner-most Loop object it belongs to\n\n The Loop object itself does not belong to the loop, since it will\n be replaced by a statement that wraps the loop statements.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(AcyclicStructureGraph, self).__init__(*args, **kwargs)\n self._loops = []\n\n\n @classmethod\n def from_cobol_graph(cls, cobol_graph):\n \"\"\"Identify loops in a CobolStructureGraph and break them by adding Loop\n and ContinueLoop nodes. Returns the resulting AcyclicStructureGraph.\n \"\"\"\n dag = cls()\n\n # Copy by way of edges, to avoid getting copies of the node objects\n dag.graph.add_edges_from(cobol_graph.graph.edges(keys=True, data=True))\n\n # Loops are strongly connected components, i.e. a set of nodes\n # which can all reach the other ones via some path through the\n # component.\n\n # Since loops can contain loops, this is done repeatedly until all\n # loops have been broken. At this stage single-node loops are ignored,\n # since nx.strongly_connected_components() returns components also\n # consisting of a single nodes without any self-looping edge.\n while True:\n components = [c for c in nx.strongly_connected_components(dag.graph)\n if len(c) > 1]\n if not components:\n break\n\n for component in components:\n dag._break_component_loop(component)\n\n # Finally find any remaining single-node loops\n for node in list(dag.graph):\n if dag.graph[node].get(node) is not None:\n dag._break_component_loop({node})\n\n return dag\n\n\n def _break_component_loop(self, component):\n\n start_node = self._find_loop_start(component)\n\n loop = Loop(start_node.stmt)\n loop.scope = start_node.scope\n self._loops.append(loop)\n\n continue_loop = ContinueLoop(loop)\n continue_loop.scope = loop\n loop.continue_loop = continue_loop\n\n for node in component:\n node.scope = loop\n\n # Break in-loop edges to the start node, and redirect out-of-loop edges to the loop node\n for edge in self.graph.in_edges(start_node, data=True, keys=True):\n src, dest, key, data = edge\n self.graph.remove_edge(src, dest, key)\n\n if src in component:\n self.graph.add_edge(src, continue_loop, key, data)\n else:\n self.graph.add_edge(src, loop, key, data)\n\n if isinstance(start_node, Join):\n # Replace Join node\n for edge in self.graph.out_edges(start_node, data=True, keys=True):\n src, dest, key, data = edge\n self.graph.remove_edge(src, dest, key)\n self.graph.add_edge(loop, dest, key, data)\n\n self.graph.remove_node(start_node)\n else:\n # Wire to first node in loop\n assert isinstance(start_node, (Loop, Branch))\n self.graph.add_edge(loop, start_node, stmts=[])\n\n\n def _find_loop_start(self, component):\n # The node with the most in edges from the rest of the graph\n # is considered the loop start\n return max(component, key = lambda node: sum((\n 1 for pred in self.graph.predecessors_iter(node)\n if pred not in component)))\n\n\nclass ScopeStructuredGraph(StructureGraphBase):\n \"\"\"An structured graph that has been analysed to isolate scopes\n (i.e. nested loops) by removing cross-scope edges. The purpose is\n to get a structured graph that can easily be flattened into linear\n code.\n\n This identifies conditional loops and replaces such Loop nodes\n with ConditionalLoop objects. A ConditionalLoop always have a\n condition=False edge leading to the LoopExit node, while the\n condition=True edge leads to the nodes in the loop.\n\n All cross-scope edges are processed to ensure they go to either a\n LoopExit or a GotoLabel node:\n\n - The best candidate node for the exit of a Loop scope is\n identified and the edges from the inner loop to this node is\n replaced by edges to a LoopExit node in the parent scope. This\n LoopExit node has a single edge to the target node.\n\n - ContinueLoop nodes that cross scopes are replaced by regular\n jumps.\n\n - Edges that crosses scopes (excluding edges from Loop nodes, or\n to Exit or LoopExit nodes) are replaced by edges to new GotoNode\n nodes in the source scope.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(ScopeStructuredGraph, self).__init__(*args, **kwargs)\n\n # Map (scope, dest) node -> GotoNode objects\n self._goto_nodes = {}\n\n\n def flatten_block(self, keep_all_cobol_stmts=False):\n \"\"\"Translate the graph structure to a Block of CobolStatement or\n structure elements and return it.\n \"\"\"\n scope = RootReductionScope(self.graph, keep_all_cobol_stmts, debug=self._debug)\n block = scope.reduce()\n return block\n\n\n @classmethod\n def from_acyclic_graph(cls, acyclic_graph, debug=False):\n scope_graph = cls(debug=debug)\n\n # Copy by way of edges, to avoid getting copies of the node objects\n scope_graph.graph.add_edges_from(acyclic_graph.graph.edges(keys=True, data=True))\n\n # Find nodes that are LoopExits\n for loop in acyclic_graph._loops:\n if not scope_graph._find_conditional_loop(loop):\n scope_graph._find_loop_exit(loop)\n\n # Drop cross-scope ContinueLoops\n for node in scope_graph.graph.nodes():\n if isinstance(node, ContinueLoop):\n scope_graph._continue_to_goto(node)\n\n # Identify all cross-scope gotos\n for edge in scope_graph.graph.edges(keys=True, data=True):\n src, dest, key, data = edge\n if not (dest is Exit\n or isinstance(src, Loop)\n or isinstance(dest, LoopExit)\n or src.scope is dest.scope):\n scope_graph._goto_node(src, dest, key, data)\n\n return scope_graph\n\n\n def _find_conditional_loop(self, loop):\n \"\"\"Identify a conditional Loop and its LoopExit. Returns True if such\n a loop was found, False otherwise.\n\n To be a conditional loop the first node must be a Branch\n without any preceding statements, one edge of the branch must\n belong to the loop scope, while the other edge must leave the\n scope without any additional statements. The dest node of the\n edge leaving the loop will be the LoopExit.\n \"\"\"\n\n src, dest, data = out_edge(self.graph, loop)\n\n if not isinstance(dest, Branch):\n return False\n\n if suppress_statements(data['stmts']):\n return False\n\n branch = dest\n condition = branch.condition\n then_edge, else_edge = out_condition_edges(self.graph, branch)\n then_node = then_edge[1]\n else_node = else_edge[1]\n\n # Start checking the inverse and flip it if it might qualify\n if else_node.scope is loop and then_node.scope is not loop:\n then_edge, else_edge = else_edge, then_edge\n then_node, else_node = else_node, then_node\n condition = condition.invert()\n\n if not (then_node.scope is loop and else_node.scope is not loop):\n # Not a qualifying branch\n return False\n\n then_data = then_edge[2]\n else_data = else_edge[2]\n\n # There cannot be any statements in the else branch for this to be a while loop\n if suppress_statements(else_data['stmts']):\n return False\n\n loop.condition = condition\n\n loop_exit = LoopExit(loop)\n loop_exit.scope = else_node.scope\n loop.loop_exit = loop_exit\n\n # Remove the branch and move the edges to the loop node\n self.graph.remove_node(branch)\n self.graph.add_edge(loop, then_node, stmts=then_data['stmts'], condition=True)\n self.graph.add_edge(loop, loop_exit, stmts=[], condition=False)\n\n # Move any other loop scope edges to the else node to the loop exit node\n for src, dest, key, data in self.graph.in_edges(else_node, keys=True, data=True):\n if src.scope is loop:\n self.graph.remove_edge(src, dest, key)\n self.graph.add_edge(src, loop_exit, key, data)\n\n self.graph.add_edge(loop_exit, else_node, stmts=[])\n\n return True\n\n\n def _find_loop_exit(self, loop):\n # Map from exit nodes to their in edges from this loop scope\n exit_edges = defaultdict(list)\n\n for edge in self.graph.edges_iter(keys=True, data=True):\n src, dest, key, data = edge\n if dest is not Exit and src.scope is loop and dest.scope is loop.scope:\n exit_edges[dest].append(edge)\n\n if not exit_edges:\n # No loop exits\n return\n\n def exit_weight(kv):\n dest = kv[0]\n edges = kv[1]\n\n # Use mot popular destination\n weight = len(edges)\n\n # Prioritise non-jumps\n if not isinstance(dest, JumpNodeBase):\n weight *= 10\n\n # Break ties by jumping the shortest distance possible\n weight += 1.0 / abs(dest.source.from_char - loop.source.from_char)\n return weight\n\n exits = sorted(exit_edges.items(), key=exit_weight, reverse=True)\n exit_node = exits[0][0]\n edges = exits[0][1]\n\n loop_exit = LoopExit(loop)\n loop_exit.scope = exit_node.scope\n loop.loop_exit = loop_exit\n\n self.graph.add_edge(loop_exit, exit_node, stmts=[])\n\n for src, dest, key, data in edges:\n self.graph.remove_edge(src, dest, key)\n self.graph.add_edge(src, loop_exit, key, data)\n\n if self._debug:\n loop.stmt.comment = 'cobolsharp: loop exit candidates:\\n{}'.format(\n '\\n'.join([' {}'.format(x[0]) for x in exits]))\n\n\n def _continue_to_goto(self, continue_node):\n \"\"\"Change all in edges to a ContinueLoop that crosses scope to point\n directly to the loop instead. In the next step it will be\n turned into a GotoLabel edge.\n \"\"\"\n\n edges = self.graph.in_edges(continue_node, keys=True, data=True)\n for src, dest, key, data in edges:\n if src is not continue_node.loop and src.scope is not continue_node.loop:\n self.graph.remove_edge(src, dest, key)\n self.graph.add_edge(src, continue_node.loop, key, data)\n\n # Remove the continue if it was all turned into gotos\n if self.graph.in_degree(continue_node) == 0:\n continue_node.loop.continue_node = None\n self.graph.remove_node(continue_node)\n\n\n def _goto_node(self, src, dest, key, data):\n \"\"\"Change a cross-scope edge to a GotoNode().\n \"\"\"\n\n goto_node = self._goto_nodes.get((src.scope, dest))\n if goto_node is None:\n goto_node = GotoNode(dest)\n goto_node.scope = src.scope\n self._goto_nodes[(src.scope, dest)] = goto_node\n\n self.graph.remove_edge(src, dest, key)\n self.graph.add_edge(src, goto_node, key, data)\n","repo_name":"petli/cobol-sharp","sub_path":"src/CobolSharp/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":19340,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"71"} +{"seq_id":"40781596166","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Sep 16 12:08:25 2022\r\n\r\n@author: paulm\r\n\"\"\"\r\n\r\n# Exercise for the online course : \r\n# \"Web Scraping in Python With BeautifulSoup & Selenium 2022\"\r\n\r\n# EX 2 : Scrap data from a Table (NFL Standings 2022 regular season)\r\n\r\n#-------------------------------------------------------------------\r\n\r\n##### setup \r\nimport requests\r\nfrom bs4 import BeautifulSoup \r\nimport pandas as pd\r\n\r\nurl = 'https://www.nfl.com/standings/league/2019/REG'\r\n\r\npage = requests.get(url)\r\n#print(page)\r\n\r\n\r\n##### get HTML code\r\nsoup = BeautifulSoup(page.text, 'lxml')\r\n#print(soup)\r\n\r\n\r\n##### get nested HTML (only the table)\r\ntable = soup.find('div', class_='d3-o-table--horizontal-scroll')\r\n#print(table)\r\n\r\n\r\n##### get columns names\r\nheaders = []\r\nfor th in table.find_all('th'):\r\n header = th.text\r\n headers.append(header)\r\n#print(headers)\r\n\r\n\r\n##### create dataframe\r\ndf = pd.DataFrame(columns = headers)\r\nfor tr in table.find_all('tr')[1:]: # not getting headers again \r\n raw_data = tr.find_all('td') # extract row data\r\n row = [el.text for el in raw_data] # extract each value for each column\r\n idx = len(df) # row index\r\n df.loc[idx] = row\r\n\r\nprint(df)\r\n\r\ndf.to_csv(r\"C:\\Users\\paulm\\OneDrive\\Documents\\WebScraping\\NFL_2k19_standings.csv\")\r\n","repo_name":"paulmaitre/WebScraping-Udemy","sub_path":"standings_NFL.py","file_name":"standings_NFL.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"70390752229","text":"# import collections\nclass Solution:\n def singleNumber(self, nums: List[int]) -> int:\n # count = collections.Counter(nums)\n # return [i for i in count if count[i]==1][0]\n \n # without extra memory\n res = 0\n for num in nums:\n res ^= num # XOR \n return res\n \n # 4 1 2 1 2\n # 100 001 010 001 010\n # res: 100 101 111 110 100","repo_name":"Jerrydepon/LeetCode","sub_path":"12_bit manipulation/easy/136. Single Number.py","file_name":"136. Single Number.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"12812041593","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom django.views import View\n\n# Create your views here.\n\n\ndef login(request):\n print(request) # WSGIRequest类的实例化对象\n print(request.method)\n print(request.POST)\n print(request.GET) # request.GET.get('a') == 1\n print(request.path) # 获取当前请求路径\n print(request.get_full_path()) # 获取当前请求路径包含查询参数\n print(request.META) # 获取所有请求头的信息 {''HTTP_USER_AGENT':'asdfasdfasdf',....}\n # request.META 字典类型数据,所有的请求头的键前面都加上了一个HTTP_键名称\n # return HttpResponse('ok')\n if request.method == \"GET\":\n return render(request, 'login.html')\n else:\n uname = request.POST.get(\"username\")\n if uname == 'shiyuan':\n # return redirect('/home/') # redirect的参数是一个路径\n return render(request, 'home.html')\n\ndef home(request):\n book = \"测试\"\n return render(request, 'home.html', {'book': book})\n\ndef index(request):\n re = HttpResponse(\"xx\")\n # re = render(request, 'xxx')\n # re = redirect('/home/')\n re['name'] = 'gaodaao' # 添加响应头键值对\n re.status_code = 404 # 修改状态码\n return re\n\n\nclass BookView(View):\n # get, post\n # 通过反射获取到请求方法对应的类中的方法进行执行\n def get(self, request):\n return HttpResponse('ok')\n\n \"\"\"\n def dispatch(self, request, *args, **kwargs):\n # Try to dispatch to the right method; if a method doesn't exist,\n # defer to the error handler. Also defer to the error handler if the\n # request method isn't on the approved list.\n if request.method.lower() in self.http_method_names:\n handler = getattr(self, request.method.lower(), self.http_method_not_allowed)\n else:\n handler = self.http_method_not_allowed\n return handler(request, *args, **kwargs)\n \"\"\"\n\n\nfrom django.utils.decorators import method_decorator\n\ndef func(f):\n def inner(*args, **kwargs):\n print(\"111111\")\n ret = f(*args, **kwargs)\n print(\"222222\")\n return ret\n return inner\n\n\n# @method_decorator(func, name=\"get\") # CBV方法通过装饰器来进行扩展(方式三)\n# @method_decorator(func, name=\"post\")\nclass ArticlesView(View):\n\n # 重写dispatch方法来进行扩展\n # @method_decorator(func) # CBV方法通过装饰器来进行扩展(方式二)\n # def dispatch(self, request, *args, **kwargs):\n # print(\"111111\")\n # ret = super(ArticlesView, self).dispatch(request, *args, **kwargs)\n # print(\"222222\")\n # return ret\n\n @method_decorator(func) # CBV方法通过装饰器来进行扩展(方式一)\n def get(self, request, year):\n print(year)\n return render(request, 'articles.html')\n\n @method_decorator(func)\n def post(self, request, year):\n print(request.POST)\n return HttpResponse('ok')","repo_name":"liuzilong-github/PythonProject","sub_path":"5.django框架/day48/django_views/app01/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"5296649651","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nfrom __future__ import print_function\nimport time\nimport hashlib\nimport requests\nimport json\n\n\ndef make_sign(token, key_values):\n\n args = []\n for k, v in key_values:\n args.append('%s=%s' % (k, v))\n\n a = '&'.join(args)\n print(\"a=\", a)\n\n b = hashlib.md5(a).hexdigest()\n print(\"b=\", b)\n\n return b.upper()\n\n\ndef main():\n\n #api_url = \"https://www.golden-gac.com/MT4_IIS/banker/getinit\"\n api_url = \"https://www.golden-gac.com/MT4_IIS/banker/user_meta\"\n\n post_data = {\n #\"version\": 136,\n #\"account\": 12583079,\n \"account\": 12583134,\n #\"password\": \"ry52514j\",\n #\"login\":12583079,\n #\"groupname\":\"demoforex\",\n #\"device\":\"Model: Veriton M6620G (Acer), Name: DESKTOP-RPRFHR1, Type: Desktop \",\n }\n\n req = requests.post(api_url, data=json.dumps(post_data))\n a = req.content\n print(\"call\", req.status_code)\n\n if req.status_code == 200:\n print(\"response\", a)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"wutony76/pay_scripts","sub_path":"tony/gac/post_init.py","file_name":"post_init.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"8298641233","text":"import json\nimport threading\nfrom time import sleep\nfrom datetime import datetime, timedelta\nfrom os import path\nfrom typing import Dict, Iterator, List, Optional, Tuple, Union\n\nfrom betfairlightweight import APIClient, filters\nfrom betfairlightweight.filters import price_projection\n\nfrom arb_search.apis.base_api import API_Instance, BaseAPI\nfrom arb_search.apis.utils import fixed_count_bin_packer\nfrom arb_search.sport_types import SportType\nfrom arb_search.user_event import UserBet, UserBookmaker, UserEvent\nfrom arb_search.utils import BookmakerStoredDict, StoredDict, team_name_matcher\n\nfrom betting_event import BetType\n\nfrom .endpoints.betting import Betting_Limitless\nfrom .utils import (betfair_to_datetime, datetime_to_betfair_format,\n price_projection_weight)\n\nMARKET_TYPE_CODES = {\n 1: ['MATCH_ODDS', 'MATCH_ODDS_UNMANAGED',\n 'ASIAN_HANDICAP', 'ODD_OR_EVEN', \n 'CORRECT_SCORE', #'CORRECT_SCORE2',\n 'OVER_UNDER_05', 'OVER_UNDER_15', 'OVER_UNDER_25', 'OVER_UNDER_35', 'OVER_UNDER_45', 'OVER_UNDER_55', 'OVER_UNDER_65', 'OVER_UNDER_75', 'OVER_UNDER_85',\n 'TEAM_A_OVER_UNDER_05', 'TEAM_A_OVER_UNDER_15', 'TEAM_A_OVER_UNDER_25', 'TEAM_A_OVER_UNDER_35', 'TEAM_B_OVER_UNDER_05', 'TEAM_B_OVER_UNDER_15', 'TEAM_B_OVER_UNDER_25', # 'TEAM_B_OVER_UNDER_35'\n # 'MATCH_ODDS_AND_OU_05', 'MATCH_ODDS_AND_OU_15', \n 'MATCH_ODDS_AND_OU_25', 'MATCH_ODDS_AND_OU_35',\n # TODO: figure out what these are 'TEAM_A_1', 'TEAM_A_2', 'TEAM_A_3', 'TEAM_B_1', 'TEAM_B_2', 'TEAM_B_3',\n # TODO: Implement bet class w/ ID 91: 'ALT_TOTAL_GOALS', 'COMBINED_TOTAL', 'TOTAL_GOALS' \n 'TEAM_A_WIN_TO_NIL', 'TEAM_B_WIN_TO_NIL',\n 'BOTH_TEAMS_TO_SCORE', 'MATCH_ODDS_AND_BTTS',\n # TODO: Implement ID 47? 'DRAW_NO_BET',\n 'DOUBLE_CHANCE']\n # TODO merge ID 27 and 28 'CLEAN_SHEET'] #'HANDICAP'\n}\n\nclass Betfair(BaseAPI, APIClient):\n def __init__(self, bookmaker_table: BookmakerStoredDict, default_start_time_range: Optional[Tuple[datetime, datetime]] = None) -> None:\n\n APIClient.__init__(self, **json.load(open('settings/api_keys.json', 'r'))[\"betfair_api\"], lightweight= True)\n BaseAPI.__init__(self, \"betfair\", bookmaker_table)\n\n self.bookmaker_name = \"betfair_ex_uk\"\n if self.bookmaker_name not in bookmaker_table:\n bookmaker_table[self.bookmaker_name] = UserBookmaker(self.bookmaker_name)\n\n settings = json.load(open('settings/settings.json', 'r'))[\"apis\"][\"betfair\"]\n\n if default_start_time_range is None:\n default_start_time_range = (datetime.now(), datetime.now() + timedelta(hours=settings[\"default_hours_range\"]))\n self.default_start_time_range = default_start_time_range\n\n self._competitions_table = StoredDict('storage/betfair/competitions.pkl')\n\n self.betting = Betting_Limitless(self)\n self.login()\n\n def gather_events(self, sport_types: List[SportType], leagues: Optional[List[str]] = None, start_time_range: Optional[Tuple[datetime, datetime]] = None) -> List[UserEvent]:\n events_table: Dict[str, UserEvent] = {}\n market_type_codes: List[str] = []\n\n for sport_type in sport_types:\n market_type_codes += MARKET_TYPE_CODES[sport_type.value]\n market_type_codes = list(set(market_type_codes))\n\n if start_time_range is None:\n start_time_range = self.default_start_time_range\n\n market_filter = filters.market_filter(\n event_type_ids= [sport_type.value for sport_type in sport_types],\n market_type_codes= market_type_codes,\n market_start_time= {\n \"from\": datetime_to_betfair_format(start_time_range[0]),\n \"to\": datetime_to_betfair_format(start_time_range[1])\n }\n )\n\n if leagues is not None:\n competition_ids = [self._competitions_table[league] for league in leagues]\n market_filter.update(filters.market_filter(competition_ids= competition_ids))\n\n # all market projections\n # market_projection = ['COMPETITION', 'EVENT', 'EVENT_TYPE', 'MARKET_START_TIME', 'MARKET_DESCRIPTION', 'RUNNER_DESCRIPTION', 'RUNNER_METADATA']\n\n markets = self.betting.list_all_market_catalogue(filter=market_filter, market_projection=['EVENT', 'COMPETITION', 'RUNNER_DESCRIPTION'])\n\n for market in markets:\n if not market[\"event\"][\"id\"] in events_table:\n events_table[market[\"event\"][\"id\"]] = UserEvent(start_time= betfair_to_datetime(market[\"event\"][\"openDate\"]),\n bookmakers= [self.bookmaker_table[self.bookmaker_name]],\n api_specific_data= {self: {**market[\"event\"], **{\"markets\": {}, \"total_runner_count\": 0}}}\n ) #TODO: figure out no_draw\n\n\n if \"competition\" not in events_table[market[\"event\"][\"id\"]].api_specific_data[self] and \"competition\" in market:\n events_table[market[\"event\"][\"id\"]].api_specific_data[self][\"competition\"] = market[\"competition\"]\n\n events_table[market[\"event\"][\"id\"]].api_specific_data[self][\"markets\"][market[\"marketId\"]] = {\n \"marketName\": market[\"marketName\"],\n \"totalMatched\": market[\"totalMatched\"],\n \"runners\": market[\"runners\"]\n }\n\n events_table[market[\"event\"][\"id\"]].api_specific_data[self][\"total_runner_count\"] += len(market[\"runners\"])\n\n return self.update_events(list(events_table.values()))\n\n def update_bet_data(self, event: UserEvent, bet_indexes: List[int]) -> bool:\n new_bets: List[UserBet] = []\n bet_indexes.sort()\n updates_needed = False\n bets = [event.bets[index] for index in bet_indexes]\n market_books = self.betting.list_market_book(\n [bet.api_specific_data[self][\"market_id\"] for bet in bets if self in bet.api_specific_data and \"market_id\" in bet.api_specific_data[self]],\n price_projection= price_projection(price_data=['EX_BEST_OFFERS']),\n order_projection= 'EXECUTABLE')\n \n for market_book in market_books:\n for current_runner, new_runner in zip(event.api_specific_data[self][\"markets\"][market_book['marketId']][\"runners\"], market_book[\"runners\"]):\n assert current_runner[\"selectionId\"] == new_runner[\"selectionId\"]\n new_runner.update(current_runner)\n new_bets.extend(self._build_bets(event, market_book)) # type: ignore\n \n \n for old_index in bet_indexes[::-1]:\n old_bet = event.bets[old_index]\n if old_bet not in new_bets:\n updates_needed = True\n event.bets.remove(old_bet)\n else:\n new_bet = new_bets[new_bets.index(old_bet)]\n new_bet.wager = old_bet.wager\n new_bet.previous_wager = old_bet.previous_wager\n if old_bet.wager > new_bet.volume:\n updates_needed = True\n event.add_bet(new_bet)\n\n return updates_needed\n\n def update_events(self, events: List[UserEvent]) -> List[UserEvent]:\n price_projection_dict = price_projection(price_data=['EX_BEST_OFFERS'])\n market_id_runners_table: Dict[str, int] = {}\n\n for event in events:\n for market_id in event.api_specific_data[self][\"markets\"].keys():\n market_id_runners_table[market_id] = event.api_specific_data[self][\"total_runner_count\"]\n\n all_market_books = self.betting.list_all_market_book(market_id_runners_table, price_projection= price_projection_dict, order_projection= 'EXECUTABLE', lightweight= True)\n\n for event in events:\n new_bets = []\n for market_id in event.api_specific_data[self][\"markets\"].keys():\n market_book = all_market_books.pop(market_id)\n for current_runner, new_runner in zip(event.api_specific_data[self][\"markets\"][market_book['marketId']][\"runners\"], market_book[\"runners\"]):\n assert current_runner[\"selectionId\"] == new_runner[\"selectionId\"]\n new_runner.update(current_runner)\n # for new_bet in self._build_bets(event, market_book):\n # event.add_bet(new_bet)\n new_bets.extend(self._build_bets(event, market_book))\n\n if event.bets == []:\n event.bets = new_bets\n else:\n for new_bet in new_bets:\n event.add_bet(new_bet)\n\n return events\n \n def read_event_comparison_data(self, event: UserEvent) -> Tuple[str, str, str, str]:\n result = [self.name]\n result += [name.strip() for name in event.api_specific_data[self][\"name\"].split(' v ')]\n result += [event.api_specific_data[self][\"competition\"][\"name\"]]\n if len(result) != 4:\n raise ValueError(\"Expected 4 values in result, but got {}\".format(len(result)))\n return tuple(result) # type: ignore\n\n def _build_bets(self, event: UserEvent, market_book: dict) -> List[UserBet]:\n bets_list = []\n market_data = event.api_specific_data[self][\"markets\"][market_book['marketId']]\n\n _home_team, _away_team = event.api_specific_data[self][\"name\"].split(' v ')\n\n team_table = {\n _home_team.lower(): \"home\",\n _away_team.lower(): \"away\",\n \"the draw\": \"draw\",\n \"draw\": \"draw\"\n }\n\n team_names = list(team_table.keys())\n\n if not market_data[\"marketName\"] == \"Asian Handicap\" and any(runner[\"handicap\"] != 0.0 for runner in market_data[\"runners\"]):\n raise ValueError(f\"Non-zero handicap in non-asian handicap market {market_data['marketName']}\")\n\n if market_data[\"marketName\"].startswith(\"Over/Under \") and market_data[\"marketName\"].endswith(\" Goals\"):\n bet_type = BetType.Goals_OverUnder\n bet_values = [runner[\"runnerName\"][:-6].lower() for runner in market_data[\"runners\"]]\n # exchanges = [runner[\"ex\"] for runner in market_book[\"runners\"]]\n\n elif market_data[\"marketName\"] == \"Match Odds\":\n bet_type = BetType.MatchWinner\n bet_values = [team_table[team_name_matcher(runner[\"runnerName\"], team_names)] for runner in market_data[\"runners\"]]\n\n elif market_data[\"marketName\"] == \"Double Chance\":\n bet_type = BetType.DoubleChance\n bet_values = [runner[\"runnerName\"].lower().replace(' or ', '/') for runner in market_data[\"runners\"]]\n\n elif market_data[\"marketName\"] == \"Correct Score\":\n bet_type = BetType.ExactScore\n bet_values = [runner[\"runnerName\"].replace(' - ', ':') for runner in market_data[\"runners\"] if not runner[\"runnerName\"].startswith('Any')] #TODO: don't discard 'Any Other Score'\n\n elif market_data[\"marketName\"] == \"Asian Handicap\":\n bet_type = BetType.AsianHandicap\n bet_values = [f'{team_table[team_name_matcher(runner[\"runnerName\"], team_names)]} {runner[\"handicap\"]}' for runner in market_data[\"runners\"]]\n # remove values that start with \"draw\"\n bet_value = [value for value in bet_values if not value.startswith(\"draw\")] # TODO: figure out how to implement this for betfair\n\n elif market_data[\"marketName\"] == \"Both teams to Score?\":\n bet_type = BetType.BothTeamsToScore\n bet_values = [runner[\"runnerName\"].lower() for runner in market_data[\"runners\"]]\n\n # elif market_data[\"marketName\"].lower().endswith(\" win to nil\"):\n # bet_type = BetType.Team_WinToNil\n # bet_values = [f'{team_table[team_name_matcher(market_data[\"marketName\"][:-11].lower(), team_names)]} {runner[\"runnerName\"].lower()}' for runner in market_data[\"runners\"]]\n\n elif market_data[\"marketName\"] == \"Match Odds and Both teams to Score\":\n bet_type = BetType.Result_BothTeamsScore\n bet_values = [team_table[team_name_matcher(runner[\"runnerName\"][:runner[\"runnerName\"].index(\"/\")].lower(), team_names)] + runner[\"runnerName\"][runner[\"runnerName\"].index(\"/\"):].lower() for runner in market_data[\"runners\"]]\n\n elif market_data[\"marketName\"] == \"Total Goals Odd/Even\":\n bet_type = BetType.OddEven\n bet_values = [runner[\"runnerName\"].lower() for runner in market_data[\"runners\"]]\n\n elif market_data[\"marketName\"].startswith(\"Match Odds and Over/Under\") and market_data[\"marketName\"].endswith(\" Goals\"):\n bet_type = BetType.Result_OverUnder\n bet_values = [f'{team_table[team_name_matcher(runner[\"runnerName\"][:runner[\"runnerName\"].index(\"/\")].lower(), team_names)]}/{runner[\"runnerName\"][runner[\"runnerName\"].index(\"/\") + 1:-6].lower()}' for runner in market_data[\"runners\"]]\n\n elif market_data[\"marketName\"].lower().startswith(_home_team.lower()) or market_data[\"marketName\"].lower().startswith(_away_team.lower()):\n if market_data[\"marketName\"].lower().startswith(_home_team.lower()):\n rest_of_name = market_data[\"marketName\"][len(_home_team) + 1:]\n team = \"home\"\n else:\n rest_of_name = market_data[\"marketName\"][len(_away_team) + 1:]\n team = \"away\"\n\n if rest_of_name.startswith(\"Over/Under \") and rest_of_name.endswith(\" Goals\"):\n bet_type = BetType.Team_OverUnder\n bet_values = [f\"{team} {runner['runnerName'][:-6].lower()}\" for runner in market_data[\"runners\"]]\n\n elif rest_of_name.lower() == \"win to nil\":\n bet_type = BetType.Team_WinToNil\n bet_values = [f'{team} {runner[\"runnerName\"].lower()}' for runner in market_data[\"runners\"]]\n\n else:\n raise ValueError(f\"Unknown market type '{market_data['marketName']}'\")\n\n else:\n raise ValueError(f\"Unknown market type '{market_data['marketName']}'\")\n\n for runner_index, (bet_value, ex) in enumerate(zip(bet_values, [runner[\"ex\"] for runner in market_book[\"runners\"]])):\n for market_key in ex:\n if market_key not in [\"availableToBack\", \"availableToLay\"]:\n continue #TODO: add previous_wager here\n for price in ex[market_key]:\n bets_list.append(\n UserBet(\n bet_type= bet_type,\n value= bet_value,\n odds= price[\"price\"],\n bookmaker= self.bookmaker_table[self.bookmaker_name],\n lay= (market_key == \"availableToLay\"),\n volume= price[\"size\"],\n api_specific_data= {\n self: {\n \"market_id\": market_book['marketId'],\n \"selection_id\": market_book['runners'][runner_index]['selectionId']\n }\n }\n )\n )\n\n return bets_list\n","repo_name":"dannyray44/arb_search","sub_path":"arb_search/apis/betfair/betfair.py","file_name":"betfair.py","file_ext":"py","file_size_in_byte":15190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"10644525490","text":"def humanise_duration(seconds):\n duration_string = ''\n if seconds >= 3600:\n duration_string += '%dh' % int(seconds/3600)\n seconds %= 3600\n if seconds >= 60:\n duration_string += '%dm' % int(seconds/60)\n seconds %= 60\n if seconds > 0:\n duration_string += '%ds' % round(seconds)\n return duration_string\n\n\ndef humanise_size(size):\n size_table = [\n (1024**4, 'TiB'),\n (1024**3, 'GiB'),\n (1024**2, 'MiB'),\n (1024**1, 'KiB'),\n (0, 'B')\n ]\n\n for size_base, size_unit in size_table:\n if size >= size_base:\n if size_base > 0:\n size_amount = int(float(size) / float(size_base))\n else:\n size_amount = float(size)\n return size_amount, size_unit\n raise Exception(\"This can't happen: size=%r\" % size)\n\n\ndef humanise_speed(size, duration):\n speed_table = [\n (1024**3, 'GiB/s'),\n (1024**2, 'MiB/s'),\n (1024**1, 'KiB/s'),\n (0, 'B/s')\n ]\n\n speed = float(size) / duration\n for speed_base, speed_unit in speed_table:\n if speed >= speed_base:\n if speed_base > 0:\n speed_amount = speed / speed_base\n else:\n speed_amount = speed\n return speed_amount, speed_unit\n raise Exception(\n \"This can't happen: size=%r duration=%r\" % (size, duration))\n","repo_name":"obnam-mirror/obnam","sub_path":"obnamlib/humanise.py","file_name":"humanise.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"71"} +{"seq_id":"13869451002","text":"from flask import Flask, request, render_template\nfrom pull import animalGroupMap\n\nmappings = animalGroupMap()\napp = Flask(__name__)\n\n\n@app.route('/')\ndef helloWorld():\n return render_template('index.html')\n\n\n@app.route('/collective', methods=['GET'])\ndef collective():\n subject = request.args.get('subject', '')\n try:\n m = mappings.get(subject)\n return ','.join(m)\n except:\n return ''\n","repo_name":"sirrah23/Animals","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"22275867782","text":"import warnings\nfrom typing import List, Optional, Union\n\nimport pandas as pd\n\nfrom feature_engine._base_transformers.mixins import GetFeatureNamesOutMixin\nfrom feature_engine._docstrings.fit_attributes import (\n _feature_names_in_docstring,\n _n_features_in_docstring,\n _variables_attribute_docstring,\n)\nfrom feature_engine._docstrings.init_parameters.all_trasnformers import (\n _missing_values_docstring,\n _variables_categorical_docstring,\n)\nfrom feature_engine._docstrings.init_parameters.encoders import _ignore_format_docstring\nfrom feature_engine._docstrings.substitute import Substitution\nfrom feature_engine.dataframe_checks import _check_optional_contains_na, check_X\nfrom feature_engine.encoding.base_encoder import (\n CategoricalInitMixinNA,\n CategoricalMethodsMixin,\n)\n\n\n@Substitution(\n ignore_format=_ignore_format_docstring,\n missing_values=_missing_values_docstring,\n variables=_variables_categorical_docstring,\n variables_=_variables_attribute_docstring,\n feature_names_in_=_feature_names_in_docstring,\n n_features_in_=_n_features_in_docstring,\n)\nclass MatchCategories(\n CategoricalInitMixinNA, CategoricalMethodsMixin, GetFeatureNamesOutMixin\n):\n \"\"\"\n MatchCategories() ensures that categorical variables are encoded as pandas\n `'categorical'` dtype, instead of generic python `'object'` or other dtypes.\n\n Under the hood, `'categorical'` dtype is a representation that maps each\n category to an integer, thus providing a more memory-efficient object\n structure than, e.g., 'str', and allowing faster grouping, mapping, and similar\n operations on the resulting object.\n\n MatchCategories() remembers the encodings or levels that represent each\n category, and can thus can be used to ensure that the correct encoding gets\n applied when passing categorical data to modeling packages that support this\n dtype, or to prevent unseen categories from reaching a further transformer\n or estimator in a pipeline, for example.\n\n More details in the :ref:`User Guide `.\n\n Parameters\n ----------\n {variables}\n\n {ignore_format}\n\n {missing_values}\n\n Attributes\n ----------\n category_dict_:\n Dictionary with the category encodings assigned to each variable.\n\n {variables_}\n\n {feature_names_in_}\n\n {n_features_in_}\n\n Methods\n -------\n fit:\n Learn the encodings or levels to use for each variable.\n\n fit_transform:\n Fit to the data. Then transform it.\n\n get_feature_names_out:\n Get output feature names for transformation.\n\n get_params:\n Get parameters for this estimator.\n\n set_params:\n Set the parameters of this estimator.\n\n transform:\n Enforce the type of categorical variables as dtype `categorical`.\n\n Examples\n --------\n\n >>> import pandas as pd\n >>> from feature_engine.preprocessing import MatchCategories\n >>> X_train = pd.DataFrame(dict(x1 = [\"a\",\"b\",\"c\"], x2 = [4,5,6]))\n >>> X_test = pd.DataFrame(dict(x1 = [\"c\",\"b\",\"a\",\"d\"], x2 = [5,6,4,7]))\n >>> mc = MatchCategories(missing_values=\"ignore\")\n >>> mc.fit(X_train)\n >>> mc.transform(X_train)\n x1 x2\n 0 a 4\n 1 b 5\n 2 c 6\n >>> mc.transform(X_test)\n x1 x2\n 0 c 5\n 1 b 6\n 2 a 4\n 3 NaN 7\n \"\"\"\n\n def __init__(\n self,\n variables: Union[None, int, str, List[Union[str, int]]] = None,\n ignore_format: bool = False,\n missing_values: str = \"raise\",\n ) -> None:\n\n super().__init__(variables, missing_values, ignore_format)\n\n def fit(self, X: pd.DataFrame, y: Optional[pd.Series] = None):\n \"\"\"\n Learn the encodings or levels to use for representing categorical variables.\n\n Parameters\n ----------\n X: pandas dataframe of shape = [n_samples, n_features]\n The training dataset. Can be the entire dataframe, not just the\n variables to be transformed.\n\n y: pandas Series, default = None\n y is not needed in this encoder. You can pass y or None.\n \"\"\"\n X = check_X(X)\n variables_ = self._check_or_select_variables(X)\n\n if self.missing_values == \"raise\":\n _check_optional_contains_na(X, variables_)\n\n self.category_dict_ = dict()\n for var in variables_:\n self.category_dict_[var] = pd.Categorical(X[var]).categories\n\n self.variables_ = variables_\n self._get_feature_names_in(X)\n return self\n\n def transform(self, X: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Encode categorical variables as pandas categorical dtype.\n\n Parameters\n ----------\n X: pandas dataframe of shape = [n_samples, n_features].\n The dataset to encode.\n\n Returns\n -------\n X_new: pandas dataframe of shape = [n_samples, n_features].\n The dataframe with the variables encoded as pandas categorical dtype.\n \"\"\"\n X = self._check_transform_input_and_state(X)\n\n if self.missing_values == \"raise\":\n _check_optional_contains_na(X, self.variables_)\n\n for feature, levels in self.category_dict_.items():\n X[feature] = pd.Categorical(X[feature], levels)\n\n self._check_nas_in_result(X)\n return X\n\n def _check_nas_in_result(self, X: pd.DataFrame):\n # check if NaN values were introduced by the encoding\n if X[self.category_dict_.keys()].isnull().sum().sum() > 0:\n\n # obtain the name(s) of the columns that have null values\n nan_columns = (\n X[self.category_dict_.keys()]\n .columns[X[self.category_dict_.keys()].isnull().any()]\n .tolist()\n )\n\n if len(nan_columns) > 1:\n nan_columns_str = \", \".join(nan_columns)\n else:\n nan_columns_str = nan_columns[0]\n\n if self.missing_values == \"ignore\":\n warnings.warn(\n \"During the encoding, NaN values were introduced in the feature(s) \"\n f\"{nan_columns_str}.\"\n )\n elif self.missing_values == \"raise\":\n raise ValueError(\n \"During the encoding, NaN values were introduced in the feature(s) \"\n f\"{nan_columns_str}.\"\n )\n","repo_name":"feature-engine/feature_engine","sub_path":"feature_engine/preprocessing/match_categories.py","file_name":"match_categories.py","file_ext":"py","file_size_in_byte":6401,"program_lang":"python","lang":"en","doc_type":"code","stars":1587,"dataset":"github-code","pt":"71"} +{"seq_id":"70351276710","text":"s = 'AAAAAAAAAAAAA'\ni = 0\nj = 10\na = set()\nwhile j <= len(s):\n s1 = s[i:j]\n if s.count(s1) > 1:\n a.add(s1)\n i += 1\n j += 1\nprint(a)\nprint(123)","repo_name":"coding-Studio-vbit/CP-Tenure-4","sub_path":"Ashish/Week 2/RepeatedDNA.py","file_name":"RepeatedDNA.py","file_ext":"py","file_size_in_byte":169,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"74577564710","text":"def solution(abilities, k):\n answer = 0\n\n # 우선권을 써야하는 경우는 1순위와 2순위의 능력치가 가장 차이가 많이 날 때다.\n if len(abilities) % 2 != 0:\n abilities.append(0)\n\n n_abilities = sorted(abilities)[::-1]\n differences = []\n for i in range(0, len(abilities) - 1, 2):\n differences.append([n_abilities[i] - n_abilities[i + 1], i])\n\n differences.sort()\n # k를 써야하는 index는 그럼으로 아래와 같이 구할 수 있다.\n k_orders = []\n for i in range(len(differences) - 1, -1, -1):\n if k != 0:\n k_orders.append(differences[i][1])\n k -= 1\n\n else:\n break\n\n # 이제 index를 돌면서 현재 index가 우선권을 써야하는 상황이면 현재 index를 가져가고,\n # 아닐 경우에는 index + 1을 가져간다.\n for i in range(0, len(n_abilities) - 1, 2):\n if i in k_orders:\n answer += n_abilities[i]\n else:\n answer += n_abilities[i + 1]\n\n return answer","repo_name":"Valentino1994/dayAlgorithm","sub_path":"onedayAlgorithm/2022/03_March/26th/s5.py","file_name":"s5.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"12731825789","text":"\"\"\"\n스도쿠는 매우 간단한 숫자 퍼즐이다.\n9×9 크기의 보드가 있을 때, 각 행과 각 열, 그리고 9 개의 3×3 크기의 보드에 1부터 9까지의 숫자가 중복 없이 나타나도록 보드를 채우면 된다.\n\n스도쿠를 정확하게 푼 경우는.\n각 행에 1부터 9까지의 숫자가 중복 없이 나오 고,\n각 열에 1부터 9까지의 숫자가 중복 없이 나오고,\n각 3×3짜리 사각형(9개이며, 위에서 색 깔로 표시되었다)에 1부터 9까지의 숫자가 중복 없이 나오는 경우이다.\n완성된 9×9 크기의 수도쿠가 주어지면 정확하게 풀었으면 “YES\", 잘 못 풀었으면 ”NO\"를 출 력하는 프로그램을 작성하세요.\n\n▣ 입력설명\n첫 번째 줄에 완성된 9×9 스도쿠가 주어집니다.\n\n▣ 출력설명\n첫째 줄에 “YES\" 또는 ”NO\"를 출력하세요.\n\n\"\"\"\n\n# 3x3 마다 외벽을 0으로 초기화 떄려버림\n# 그 리스트는 쓰고 버려\n\"\"\"\nlistN = [list(map(int, input().split())) for _ in range(9)]\nextraList = []\na = \"YES\"\nfor i in range(0, 9, 3):\n for j in range(0, 9):\n extraList.append(listN[i][j])\n extraList.append(listN[i + 1][j])\n extraList.append(listN[i + 2][j])\n if (j+1) % 3 == 0 :\n if len(extraList) != len(set(extraList)):\n a = \"NO\"\n extraList = []\n\nprint(a)\n\"\"\"\n\n\"\"\"\n체크리스트가 3가지 필요함\n행 ch 1~9 를 만들어놓고 돌면서 체크함 중복의 경우 ==1 로 넣기때문에 누적이아니라 그냥 1들어감\n열 ch 1~9 를 만들어놓고 돌면서 체크함\nsum(ch) == 9 리스트가 다 체크가 되어있으면 합산이 9여야함\n\n\"\"\"\n\n\ndef check(a):\n for i in range(9):\n ch1 = [0] * 10\n ch2 = [0] * 10\n for j in range(9):\n ch1[a[i][j]] = 1\n ch2[a[j][i]] = 1\n if sum(ch1) != 9 or sum(ch2) != 9:\n return False\n for i in range(3):\n for j in range(3):\n ch3 = [0] * 10\n for k in range(3):\n for s in range(3):\n ch3[a[i * 3 + k][j * 3 + s]] = 1\n if sum(ch3) != 9:\n return False\n return True\n\n\nlistN = [list(map(int, input().split())) for _ in range(9)]\nif check(listN):\n print(\"YES\")\nelse:\n print(\"NO\")\n\n\"\"\"\n정리\n일단 내가 문제를 잘못 풀음\n1. 나는 그룹 체크만 했음 근데 전체 행 열 체크를 해야됬었던것\n\n핵심정리\n1. ch1 체크리스트를 활용해서 값을 1씩 넣은뒤에 sum을 활용하여 합산이 9인지를 체크\n2. 행과 열을 체크\n3. 4중 반복문을 활용해서 그룹 체크를 하는데 이부분이 중요함\n3-1 일단 ch3 체크리스트 하나 확인\n3-2\n첫번쨰 반복문 i 는 아래로 3단위로 내려가는 숫자\n0\n1\n2\n--- 이렇게 3단위로 아래로 내려가는 i값\nj는 옆으로 3단위로 늘려주는 값\n0 1 2, 3 4 5, 6 7 8\ni값을 3단위로 늘리니까 그 내부에서 3단위 내에서 0 1 2 돌아야 되고 j가 3단위로 돌거기 때문에 그 3안에서 0 1 2 돌아야 하기 때문에\n반복문이 4가지나 필요한것 \n\n3-2 : 즉 3단위로 행과 열을 돌기위해 i 와 j 가 필요하고 그 3단위 안에서 0 1 2 를 각각 돌기 위해서 k와 s가 필요한 것\nj값이 바뀌고나서 리스트는 초기화\n그러면 \n왼쪽 위부터 3x3단위로 탐색을 수행한다.\n\n\"\"\"","repo_name":"christopher3810/algorithm_study","sub_path":"Kim's_Python_Test/Section3/10.Sudoku.py","file_name":"10.Sudoku.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"25138554932","text":"import datetime\nimport re\n\nfrom core.sources.base import Source\nfrom core.connectors.googleads import GoogleAdsConnector\nfrom core.utils import datetime_to_wivo_format_with_tz, datetime_utc_to_timezone\n\nfrom utils.logger import logger\n\n\nclass GoogleAdsSource(Source):\n CONNECTOR = GoogleAdsConnector\n\n CAMPAIGN_SERVICE_NAME = 'CampaignService'\n AD_GROUP_SERVICE_NAME = 'AdGroupService'\n AD_GROUP_AD_SERVICE_NAME = 'AdGroupAdService'\n\n LABEL_NAME_PATTERN = re.compile('^(product_id:[0-9]+)$')\n\n CAMPAIGN_STATUS_VALUES = {'REMOVED': 0, 'ENABLED': 1, 'PAUSED': 2}\n AD_GROUP_STATUS_VALUES = {'REMOVED': 0, 'ENABLED': 1, 'PAUSED': 2}\n AD_STATUS_VALUES = {'DISABLED': 0, 'ENABLED': 1, 'PAUSED': 2}\n\n @classmethod\n def settings(cls):\n import os\n client_id = os.environ['ADWORDS_CLIENT_ID']\n client_secret = os.environ['ADWORDS_CLIENT_SECRET']\n refresh_token = os.environ['ADWORDS_REFRESH_TOKEN']\n developer_token = os.environ['ADWORDS_DEVELOPER_TOKEN']\n client_customer_id = os.environ['ADWORDS_CUSTOMER_CLIENT_ID']\n storage_path = os.environ['STORAGE_PATH']\n timezone_name = os.environ['TIMEZONE_NAME']\n\n return {\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'refresh_token': refresh_token,\n 'developer_token': developer_token,\n 'client_customer_id': client_customer_id,\n 'storage_path': storage_path,\n 'timezone_name': timezone_name,\n }\n\n def process(self, **kwargs):\n logger.info('Process google ads...')\n timezone_name = kwargs['timezone_name']\n conn = self.CONNECTOR.get_instance(**kwargs)\n all_data = []\n\n date_time = datetime.datetime.now()\n string_datetime = datetime_to_wivo_format_with_tz(date_time, timezone_name)\n dt_tz = datetime_utc_to_timezone(date_time, timezone_name)\n\n for campaign in self.campaigns(conn):\n campaign_id = campaign['id']\n campaign_name = campaign['name']\n\n logger.info('Processing campaign {}'.format(campaign_name))\n\n for group in self.ad_groups_by_campaign(conn, campaign_id):\n group_id = group['id']\n group_name = group['name']\n\n logger.info('Processing ad groups {}'.format(group_name))\n\n for ad in self.ads_by_group(conn, group_id):\n logger.info('Processing ad ID {}'.format(ad['ad']['id']))\n\n if 'labels' in ad:\n for label in ad['labels']:\n name = label['name']\n\n if self.LABEL_NAME_PATTERN.match(name):\n\n logger.info('Processing Label {}'.format(name))\n value = self.calculate_value(campaign['status'], group['status'], ad['status'])\n\n product_id = re.split(':', name)[1]\n ad_id = str(ad['ad']['id'])\n\n ad_name = '{}_{}_{}'.format(campaign_name, group_name, ad_id)\n\n data = {\n 'datetime': string_datetime,\n 'value': value,\n 'product_id': str(product_id),\n 'googleadwordscampaign_id': str(campaign_id),\n 'googleadwordscampaign_name': campaign_name,\n 'googleadwordsgroup_id': str(group_id),\n 'googleadwordsgroup_name': group_name,\n 'googleadwordsad_id': ad_id,\n 'googleadwordsad_name': ad_name\n }\n\n all_data.append(data)\n\n else:\n logger.warning('Label \"{}\" with wrong format - ignored'.format(name))\n else:\n logger.warning('Ad ID {} without label'.format(ad['ad']['id']))\n\n return [{'metric': 'googleadwordsadstate', 'date': dt_tz.date(), 'records': all_data}]\n\n def calculate_value(self, campaign_status, group_status, ad_status):\n value = self.AD_STATUS_VALUES.get(ad_status, -1)\n if value is 1:\n group_value = self.AD_GROUP_STATUS_VALUES.get(group_status, -1)\n if group_value is 1:\n value = self.CAMPAIGN_STATUS_VALUES.get(campaign_status, -1)\n else:\n value = group_value\n\n return value\n\n def campaigns(self, connector):\n campaign_service = connector.service(self.CAMPAIGN_SERVICE_NAME)\n fields = ['Id', 'Name', 'Status']\n\n return self.get_pages(campaign_service, fields, [])\n\n def ad_groups_by_campaign(self, connector, campaing):\n ad_groups_service = connector.service(self.AD_GROUP_SERVICE_NAME)\n fields = ['Id', 'Name', 'Status']\n predicates = [\n {\n 'field': 'CampaignId',\n 'operator': 'EQUALS',\n 'values': campaing\n }\n ]\n\n return self.get_pages(ad_groups_service, fields, predicates)\n\n def ads_by_group(self, connector, ad_group):\n ads_service = connector.service(self.AD_GROUP_AD_SERVICE_NAME)\n fields = ['Id', 'Name', 'Status', 'Labels']\n predicates = [\n {\n 'field': 'AdGroupId',\n 'operator': 'EQUALS',\n 'values': ad_group\n }\n ]\n\n return self.get_pages(ads_service, fields, predicates)\n\n def get_pages(self, ads_service, fields, predicates):\n pages = []\n\n offset = 0\n page_size = 500\n\n selector = {\n 'fields': fields,\n 'predicates': predicates,\n 'paging': {\n 'startIndex': str(offset),\n 'numberResults': str(page_size)\n }\n }\n\n more_pages = True\n while more_pages:\n page = ads_service.get(selector)\n\n pages.extend(self.get_entries(page))\n\n offset += page_size\n selector['paging']['startIndex'] = str(offset)\n more_pages = offset < int(page['totalNumEntries'])\n\n return pages\n\n @staticmethod\n def get_entries(page):\n if 'entries' in page:\n return page['entries']\n else:\n return []\n","repo_name":"angelgc82/b2b-maui","sub_path":"core/sources/googleads.py","file_name":"googleads.py","file_ext":"py","file_size_in_byte":6511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"3549394936","text":"class Solution:\n def XXX(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n temp = 0\n for i in range(len(nums)):\n if temp:\n i -= temp\n if nums[i] == 0:\n nums.pop(i)\n nums.insert(0, 0)\n elif nums[i] == 2:\n temp += 1\n nums.pop(i)\n nums.append(2)\n\n","repo_name":"kkcookies99/UAST","sub_path":"Dataset/Leetcode/train/75/131.py","file_name":"131.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"71"} +{"seq_id":"20584237067","text":"import torch\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nfrom glob import glob\nimport os\nfrom PIL import Image\nimport numpy as np\nimport cv2 as cv\n\n\nclass ClassificationDataset(Dataset):\n def __init__(self, dataset_dir, image_size=(336, 120), data_transform=None):\n self.super_class_folders = glob(dataset_dir + \"/*/\")\n self.images_with_class = []\n self.data_transform = data_transform\n self.image_size = image_size\n super_class_id = 0\n class_id = 0\n for super_folder in self.super_class_folders:\n sub_folders = glob(super_folder + \"/*/\")\n #print(f\"sub-folders: {sub_folders}\")\n for folder in sub_folders:\n png_paths = glob(f\"{folder}*.png\")\n jpg_paths = glob(f\"{folder}*.jpg\")\n all_images = []\n all_images.extend(png_paths)\n all_images.extend(jpg_paths)\n for img_path in all_images:\n self.images_with_class.append((img_path, super_class_id, class_id))\n #print(f\"image in dataset: {self.images_with_class[-1]}\")\n class_id += 1\n super_class_id += 1\n\n def __len__(self):\n return len(self.images_with_class)\n\n def __getitem__(self, idx):\n img_path, super_class_id, class_id = self.images_with_class[idx]\n image = Image.open(img_path)\n if self.data_transform:\n image = self.data_transform(image)\n else:\n image = transforms.ToTensor()(image)\n image = F.interpolate(image.unsqueeze(0), self.image_size).squeeze(0)\n return image, super_class_id, class_id\n\n\nclass TripletDataset(Dataset):\n def __init__(self, dataset_dir, images_per_class=2000, image_size=(336, 120), data_transform=None):\n self.super_class_folders = glob(dataset_dir + \"/*/\")\n self.images_grouped_by_class = []\n self.flattened_class_with_images = []\n self.images_per_class = images_per_class\n self.image_size = image_size\n self.data_transform = data_transform\n self.triplets = []\n class_id = 0\n for super_folder in self.super_class_folders:\n sub_folders = glob(super_folder + \"/*/\")\n all_images = []\n for folder in sub_folders:\n png_paths = glob(f\"{folder}*.png\")\n jpg_paths = glob(f\"{folder}*.jpg\")\n all_images.extend(png_paths)\n all_images.extend(jpg_paths)\n for img_path in all_images:\n self.flattened_class_with_images.append((img_path, class_id))\n self.images_grouped_by_class.append((class_id, all_images))\n class_id += 1\n self.num_classes = len(self.super_class_folders)\n print(f\"{class_id}\")\n self.sample_triplets()\n\n def sample_triplets(self):\n self.triplets = []\n for id in range(self.num_classes):\n cid, class_images = self.images_grouped_by_class[id]\n class_images = np.array(class_images).copy()\n other_images = [r[1] for r in self.images_grouped_by_class if r[0] != cid]\n other_images = [item for sub_list in other_images for item in sub_list]\n other_images = np.array(other_images)\n np.random.shuffle(other_images)\n np.random.shuffle(class_images)\n for i in range(self.images_per_class):\n anchor = class_images[0]\n class_images = class_images[1:]\n positive = np.random.choice(class_images, 1)[0]\n #print(f\"a: {anchor}, p: {positive}, n: {negative}\")\n self.triplets.append((cid, anchor, positive))\n\n def __len__(self):\n return len(self.triplets)\n\n def __getitem__(self, idx):\n cid, a_path, p_path = self.triplets[idx]\n a_img = Image.open(a_path)\n p_img = Image.open(p_path)\n\n if self.data_transform:\n a_img = self.data_transform(a_img)\n p_img = self.data_transform(p_img)\n else:\n a_img = transforms.ToTensor()(a_img)\n p_img = transforms.ToTensor()(p_img)\n\n a_img = F.interpolate(a_img.unsqueeze(0), self.image_size).squeeze(0)\n p_img = F.interpolate(p_img.unsqueeze(0), self.image_size).squeeze(0)\n\n return cid, a_img, p_img\n","repo_name":"EmilRyberg/P7Tiago-PersonFollowing","sub_path":"src/person_detector/person_detector/feature_extractor/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":4423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"2794174838","text":"from django.db import models\nfrom django.utils.timezone import now\n# noinspection PyUnresolvedReferences\nfrom user.models import User\n# noinspection PyUnresolvedReferences\nfrom merchant.models import Merchant\n\ntype_choices = (('default', '自定义活动'), ('food', '吃吃喝喝'),\n ('movie', '看电影'), ('game', '游戏'),\n ('outdoor', '户外活动'))\nstatus_choices = (('not_start', '还未开始'), ('in_progress', '正在进行'),\n ('finished', '已经结束'))\n\n\nclass Activity(models.Model):\n # 活动发起人\n initiator = models.ForeignKey(User, on_delete=models.CASCADE,\n related_name='initiated_activities')\n # 活动的标题\n title = models.CharField(max_length=36)\n # 活动涉及的商家\n merchant = models.ForeignKey(Merchant, on_delete=models.CASCADE,\n related_name='activities', null=True)\n # 活动的参与人员\n participants = models.ManyToManyField(User, related_name='participated_activities')\n # 活动是否对非相关用户可见\n is_private = models.BooleanField(default=False)\n # 活动的类型\n type = models.CharField(choices=type_choices, default=type_choices[0][0],\n max_length=16)\n # 活动的状态\n status = models.CharField(choices=status_choices, default=status_choices[0][0],\n max_length=16)\n # 活动描述\n introduction = models.TextField(max_length=512)\n # 活动开始的时间\n start_time = models.DateTimeField()\n # 活动结束的时间\n end_time = models.DateTimeField()\n # 发起活动的时间\n create_time = models.DateTimeField(default=now)\n\n\nclass Comment(models.Model):\n # 评论所属的活动\n activity = models.ForeignKey(Activity, on_delete=models.CASCADE,\n related_name='comments')\n # 评论用户昵称\n nickname = models.CharField(max_length=16)\n # 评论用户名\n username = models.CharField(max_length=16)\n # 评论内容\n content = models.CharField(max_length=140)\n # 评论时间\n create_time = models.DateTimeField(default=now)\n","repo_name":"zgagdfd/RealFriend","sub_path":"server/activity/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"30036176063","text":"class Solution:\n def hasValidPath(self, grid: List[List[int]]) -> bool:\n if len(grid) == 1 and len(grid[0]) ==1 :\n return True\n paths= {\n 1: [(0,-1),(0,1)],\n 2: [(1,0),(-1,0)],\n 3: [(0,-1),(1,0)],\n 4: [(1,0), (0,1)],\n 5: [(0,-1), (-1,0)],\n 6: [(-1,0), (0,1)],\n }\n visited= set()\n def inbound(i, j):\n return i >= 0 and j >= 0 and i list[str]:\n with open(file_name) as infile:\n return [line.rstrip() for line in infile.readlines()]\n\n\ndef to_crate_stacks(crate_stack_lines: list[str]) -> dict[int, list[str]]:\n \"\"\"Crate stacks is a dictionary with keys for column indices and values being lists representing the stack.\"\"\"\n crate_stacks = {idx: [] for idx in range(N_STACKS)}\n for line in crate_stack_lines[::-1][1:]:\n chunk_size = 4\n line_chunked = [line[i:i + chunk_size] for i in range(0, len(line), chunk_size)]\n for col_idx, entry in enumerate(line_chunked):\n if not entry.isspace():\n crate_stacks[col_idx].append(entry.strip()[1:-1])\n return crate_stacks\n\n\n@dataclass\nclass MoveOperation:\n amount: int\n destination: int # as given in the input file, is index + 1\n target: int # as given in the input file, is index + 1\n\n\ndef to_move_operations(plan_lines: list[str]) -> list[MoveOperation]:\n move_operations = []\n for line in plan_lines:\n split_line = line.split(' ')\n move_operations.append(MoveOperation(int(split_line[1]), int(split_line[3]), int(split_line[5])))\n return move_operations\n\n\ndef apply_move_operations_task1_style(crate_stacks: dict[int, list[str]], move_plan: list[MoveOperation]) -> dict[int, list[str]]:\n for move in move_plan:\n for _ in range(move.amount):\n item = crate_stacks[move.destination - 1].pop()\n crate_stacks[move.target - 1].append(item)\n return crate_stacks\n\n\ndef apply_move_operations_task2_style(crate_stacks: dict[int, list[str]], move_plan: list[MoveOperation]) -> dict[int, list[str]]:\n for move in move_plan:\n items = crate_stacks[move.destination - 1][-move.amount:]\n for _ in range(move.amount):\n crate_stacks[move.destination - 1].pop()\n crate_stacks[move.target - 1] += items\n return crate_stacks\n\n\ndef get_top_items(crate_stacks: dict[int, list[str]]) -> str:\n return ''.join([stack[-1] for stack in crate_stacks.values()])\n\n\nif __name__ == '__main__':\n crate_stack_lines_ = load_lines('crate_stack.txt')\n plan_lines_ = load_lines('move_plan.txt')\n\n # Task 1\n print(f'')\n crate_stacks_ = to_crate_stacks(crate_stack_lines_)\n move_operations_ = to_move_operations(plan_lines_)\n final_crate_stacks = apply_move_operations_task1_style(crate_stacks_, move_operations_)\n print(f'Task 1 top items: {get_top_items(final_crate_stacks)}')\n\n # Task 2\n print(f'')\n crate_stacks_ = to_crate_stacks(crate_stack_lines_)\n move_operations_ = to_move_operations(plan_lines_)\n final_crate_stacks = apply_move_operations_task2_style(crate_stacks_, move_operations_)\n print(f'Task 2 top items: {get_top_items(final_crate_stacks)}')\n","repo_name":"iptch/2022-advent-of-code","sub_path":"mhr/day5/day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"71476167271","text":"from django.urls import path\nfrom main_app.views import ( ProjectListView, ProjectDetailView, create_page, \n CreateMainView, CreateChildView, CreateFrontView, \n CreateCarouselView, UpdateMainView, DeleteMainView, \n CreateMainDetailView, DeleteChildView, UpdateCarouselView,\n DeleteCarouselView, UpdateFrontView, DeleteFrontView,\n CreateProductView, UpdateProductView, DeleteProductView )\n\n\napp_name = 'main_app'\n\nurlpatterns = [\n path('', ProjectListView.as_view(), name='project_list'),\n path('/', ProjectDetailView.as_view(), name='project_detail'),\n path('/create_main_detail/', CreateMainDetailView.as_view(), name='create_main_detail'),\n path('/update/', UpdateMainView.as_view(), name='update_main'),\n path('/update_carousel/', UpdateCarouselView.as_view(), name='update_carousel'),\n path('/update_product/', UpdateProductView.as_view(), name='update_product'),\n path('/update_front/', UpdateFrontView.as_view(), name='update_front'),\n path('/delete/', DeleteMainView.as_view(), name='delete_main'),\n path('/delete_carousel/', DeleteCarouselView.as_view(), name='delete_carousel'),\n path('/delete_front/', DeleteFrontView.as_view(), name='delete_front'),\n path('/delete_child/', DeleteChildView.as_view(), name='delete_child'),\n path('/delete_product/', DeleteProductView.as_view(), name='delete_product'),\n path('create/', create_page, name='create_page'),\n path('create/create_main/', CreateMainView.as_view(), name='create_main' ),\n path('create/create_child/', CreateChildView.as_view(), name='create_child' ),\n path('create/create_front/', CreateFrontView.as_view(), name='create_front' ),\n path('create/create_carousel/', CreateCarouselView.as_view(), name='create_carousel' ),\n path('create/create_product/', CreateProductView.as_view(), name='create_product' ),\n]\n\n\n","repo_name":"rlcayaco/salt-ions-website","sub_path":"website_project/main_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"33935718325","text":"from odoo.addons.website.tests.test_performance import UtilPerf\n\n\nclass TestPerformance(UtilPerf):\n def test_10_perf_sql_website_controller_minimalist(self):\n url = '/empty_controller_test'\n select_tables_perf = {\n 'base_registry_signaling': 1,\n }\n self._check_url_hot_query(url, 1, select_tables_perf)\n self.assertEqual(self._get_url_hot_query(url, cache=False), 1)\n","repo_name":"odoo/odoo","sub_path":"addons/test_website/tests/test_performance.py","file_name":"test_performance.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":31745,"dataset":"github-code","pt":"71"} +{"seq_id":"74373066788","text":"# importing packages\nfrom flask import Flask ,render_template, redirect, url_for, session, request, logging\nimport requests\nimport json\nimport dbquery\nimport numpy\napp = Flask(__name__) #app initialisation\n\n@app.route('/', methods=['GET','POST']) #landing page intent\ndef home():\n sql = \"SELECT TIME_STAMP FROM DATA WHERE USERID =1 AND DATA_DATE = CURDATE();\"\n data = dbquery.fetchall(sql)\n List = list()\n #converting from tuple to string\n for t in data:\n List.append(int(t[0]))\n List.sort()\n data = {x:List.count(x) for x in List}\n print(data)\n \n return render_template(\"index.html\",data=data) #display the html template\n\n\n\n#method to insert data to db\n@app.route('/insert_to_db', methods=['POST']) #landing page intent\ndef data_from_model():\n if request.method == 'POST':\n x= request.get_json()\n time = x['time']\n userid =x['userid']\n sql=\"INSERT INTO DATA VALUES (%s ,NOW(),%s);\"%(userid,time)\n dbquery.inserttodb(sql)\n return \"200\"\n return \"Error\"\n\n\n\n\n\n\nif __name__=='__main__':\n\tapp.run(debug=True,host=\"0.0.0.0\",port=8000) \n #use threaded=True instead of debug=True for production\n # use port =80 for using the http port\n\n\n\n#sample code for form data recieve\n# request.form['name']\n# Sample Code for JSON send data to api\n\n#url = 'URL_FOR_API'\n#data = {'TimeIndex':time1 ,'Name':name,'PhoneNumber':phone}\n#headers = {'content-type': 'application/json'}\n#r=requests.post(url, data=json.dumps(data), headers=headers)\n#data = r.json()\n#print(data)\n\n\n#Sample code for JSON recieve data from API\n\n#url = 'URL_FOR_API'\n#headers = {'content-type': 'application/json'}\n#r=requests.get(url, headers=headers)\n#data = r.json()\n#count = data['Count']","repo_name":"IODevelopers/harp","sub_path":"website-backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"33817535867","text":"import requests\nfrom pprint import pprint\n\n\ndef credits(title):\n\n URL = f'https://api.themoviedb.org/3/search/movie'\n \n param= {\n 'api_key' : '6a5ece7778e61cb35c55c953b8743b0d',\n 'language' : 'ko-KR',\n 'region' : 'KR',\n 'query' : title,\n 'page' : 1\n }\n \n r = requests.get(URL, params = param).json().get('results')\n\n if not r:\n return None\n\n else:\n ids = r[0]['id']\n\n credit_URL = f'https://api.themoviedb.org/3/movie/{ids}/credits'\n\n credit_URL_param = {\n 'api_key' : '6a5ece7778e61cb35c55c953b8743b0d',\n 'language' : 'ko-KR',\n 'region' : 'KR',\n }\n\n credit_movie = requests.get(credit_URL, params = credit_URL_param).json()\n \n credit_movie_cast = credit_movie['cast']\n credit_movie_crew = credit_movie['crew'] \n\n cast_dic = {\n 'cast' : [],\n 'directing' : []\n }\n for cr in credit_movie_crew:\n if cr['department'] == 'Directing':\n cast_dic['directing'] += [cr['name']]\n\n for ca in credit_movie_cast:\n if ca['known_for_department'] == 'Acting':\n if ca['cast_id'] < 10:\n cast_dic['cast'] += [ca['name']]\n\n return cast_dic\n\n\n\n\n\n# 아래의 코드는 수정하지 않습니다.\nif __name__ == '__main__':\n \"\"\"\n 제목에 해당하는 영화가 있으면 해당 영화 id를 통해 영화 상세정보를 검색하여 주연배우 목록(cast)과 스태프(crew) 중 연출진 목록을 반환\n 영화 id 검색에 실패할 경우 None을 반환\n \"\"\"\n pprint(credits('기생충'))\n # {'cast': ['Song Kang-ho', 'Lee Sun-kyun', ..., 'Jang Hye-jin'], 'crew': ['Bong Joon-ho', 'Park Hyun-cheol', ..., 'Yoon Young-woo']}\n pprint(credits('검색할 수 없는 영화'))\n # None\n","repo_name":"YuJinUk/PJT","sub_path":"pjt-master-02/pjt-master/02_pjt/code/problem_e.py","file_name":"problem_e.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"43456537547","text":"#!/usr/bin/python3 -W ignore\n# -*- coding: utf-8 -*-\n\nimport sys\nimport requests\nimport argparse\nimport logging\n\n\nlogging.basicConfig(format='[%(asctime)s] %(levelname)s:%(name)s:%(message)s', datefmt='%X', level=logging.ERROR)\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-H\", \"--host\", help=\"Hostname or IP address of the node to check, e.g. 127.0.0.1, domain.com\")\nparser.add_argument(\"-v\", \"--verbose\", help=\"Set verbosity level\", action='count')\nargs = parser.parse_args()\nif args.host is None:\n logging.error(\"Server is not set, exiting.\")\n sys.exit(2)\nif args.verbose:\n logger = logging.getLogger()\n levels = {\n 0 : logging.ERROR,\n 1 : logging.WARNING,\n 2 : logging.INFO,\n 3 : logging.DEBUG,\n }\n try:\n level = levels[args.verbose]\n except KeyError:\n level = logging.DEBUG\n logger.setLevel(level)\n logging.info('Setting logging level to %s', logging.getLevelName(level))\n\nhost=args.host\n\nURL_prizmNodeState = 'https://'+host+':9976/prizm?requestType=getState&includeCounts=false&random=0.461040019'\ntry:\n response = requests.get(URL_prizmNodeState, verify=False, timeout=2)\nexcept Exception as ex:\n logging.exception('Failed to get response from %s', URL_prizmNodeState)\n print(\"CRITICAL - %s\" % str(ex))\n sys.exit(2)\n\ntry:\n result_host=response.json()\n blockchainState=result_host['blockchainState']\n numberOfBlocks=result_host['numberOfBlocks']\n logging.info('%s: blockchainState: %s', host, blockchainState)\n logging.info('%s: numberOfBlocks: %d', host, numberOfBlocks)\nexcept Exception as ex:\n logging.exception('Failed to parse output from host %s', host)\n print('CRITICAL - %s', str(ex))\n sys.exit(2)\n\nhost_prizmApi = 'blockchain.prizm.space'\nURL_prizmState = 'https://' + host_prizmApi + '/prizm?requestType=getState&includeCounts=false&random=0.461040019047640'\ntry:\n response = requests.get(URL_prizmState, verify=False)\nexcept Exception as ex:\n logging.exception('Failed to get response from %s', URL_prizmNodeState)\n print(\"CRITICAL - \" + host_prizmApi + str(ex))\n sys.exit(2)\n\ntry:\n result_api=response.json()\n apinumberOfBlocks=result_api['numberOfBlocks']\n logging.info('%s: numberOfBlocks: %d', host_prizmApi, numberOfBlocks)\nexcept:\n logging.exception('Failed to parse output from host %s', host)\n print('CRITICAL - %s', str(ex))\n sys.exit(2)\n\nDIFF=apinumberOfBlocks - numberOfBlocks\n\nif apinumberOfBlocks > numberOfBlocks:\n logging.info('%s: lagging behind, diff is %d', host, DIFF)\n if blockchainState != 'UP_TO_DATE':\n if DIFF >= 20:\n print(\"CRITICAL - BlockState:\" + str(blockchainState) + \", \" + str(DIFF) + \" blocks missed!\")\n sys.exit(2)\n if DIFF >= 10 or DIFF < 19:\n print(\"WARNING - BlockState:\" + str(blockchainState) + \", \" + str(DIFF) + \" blocks missed!\")\n sys.exit(1)\n else:\n print(\"OK - BlockState:\" + str(blockchainState) + \", \" + str(DIFF) + \" blocks missed!\")\n sys.exit(0)\n logging.info('%s: blockchainState: %s', host, blockchainState)\nelse:\n logging.info('%s: diff is %d', host, DIFF)\n print(\"OK - \" + str(DIFF) + \" blocks missed\")\n sys.exit(0)\n\n","repo_name":"AndreiKozak/nagios_scripts","sub_path":"check_prizm_node.py","file_name":"check_prizm_node.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"4707843624","text":"#\n# Voxel Builder Updater\n#\nimport os\nimport sys\nimport shutil\nfrom io import BytesIO\nfrom urllib.request import urlopen\nfrom zipfile import ZipFile\n\n\nVBUILDER = 'https://github.com/nimadez/voxel-builder/archive/refs/heads/main.zip'\nEXCLUDE = [ \"voxel-builder-main\", \"python_embed\", \"electron\" ]\nrun_bat = \"\"\"@echo off\ntitle Voxel Builder\nstart \"\" electron\\electron .\n\"\"\"\n\ncwd = os.getcwd()\n\n\ndef main():\n DIR_SRC = cwd + '/voxel-builder-main'\n DIR_DST = cwd\n\n remove_directory(DIR_SRC)\n\n try:\n print(\"Connecting to GitHub...\")\n downloadZip(VBUILDER, DIR_DST)\n os.system(\"cls\")\n except:\n input(\"Error: Unable to fetch GitHub repository, check your internet connection.\")\n sys.exit(0)\n\n print(' --------------------------------')\n print(' Voxel Builder Updater')\n print(' --------------------------------')\n if input(\" Begin Update (Y/N)? \").upper() != \"Y\":\n remove_directory(DIR_SRC)\n sys.exit(0)\n\n # clear previous installation\n if os.path.exists(DIR_DST):\n os.chdir(DIR_DST)\n for item in os.listdir(os.getcwd()):\n if item not in EXCLUDE:\n if os.path.isfile(item):\n os.remove(item)\n elif os.path.isdir(item):\n shutil.rmtree(item, ignore_errors=True)\n\n # extract repository\n print('\\nSetting up voxel-builder...')\n for f in os.listdir(DIR_SRC):\n shutil.move(os.path.join(DIR_SRC, f), DIR_DST)\n os.rmdir(DIR_SRC)\n\n with open(DIR_DST + \"/run.bat\", \"w\") as f:\n f.write(run_bat)\n print('Done')\n\n\ndef downloadZip(url, destdir):\n with urlopen(url) as zip:\n with ZipFile(BytesIO(zip.read())) as zf:\n zf.extractall(destdir)\n\n\ndef remove_directory(dir):\n if os.path.exists(dir):\n os.chdir(dir)\n for item in os.listdir(os.getcwd()):\n if os.path.isfile(item):\n os.remove(item)\n elif os.path.isdir(item):\n shutil.rmtree(item, ignore_errors=True)\n os.chdir(cwd)\n os.rmdir(dir)\n\n\nif __name__== \"__main__\":\n main()\n print(\"\\nUpdate complete.\")\n input()\n","repo_name":"nimadez/voxel-builder","sub_path":"update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"71"} +{"seq_id":"12041746455","text":"# Write a program to handle an error if the user entered the number more than four digits\n# it should return “Please length is too short/long !!! Please provide only four digits”\n\ntry:\n number = input(\"Enter the number : \")\n if len(number) <= 4:\n print(\"Perfect\")\n else:\n raise Exception\nexcept Exception:\n print(\"Length is too short/long! Please provide only four digits\")","repo_name":"DevanshiPatel12/PythonAssignmentCA2020","sub_path":"Task-5/Qn-3.py","file_name":"Qn-3.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"36131455221","text":"import pytest\nfrom requests.exceptions import RequestException\nfrom django.contrib.auth import get_user_model\n\nfrom creator.jobs.models import Job\nfrom creator.studies.models import Study\nfrom creator.studies.factories import StudyFactory\nfrom creator.studies.dataservice import sync_dataservice_studies\n\nMOCK_RESP = {\n \"_links\": {\"next\": \"/studies?after=1529089066.003078\", \"self\": \"/studies\"},\n \"_status\": {\"code\": 200, \"message\": \"success\"},\n \"limit\": 10,\n \"results\": [\n {\n \"_links\": {\n \"collection\": \"/studies\",\n \"investigator\": \"/investigators/IG_JBKNYBM3\",\n \"participants\": \"/participants?study_id=SD_9PYZAHHE\",\n \"self\": \"/studies/SD_9PYZAHHE\",\n \"study_files\": \"/study-files?study_id=SD_9PYZAHHE\",\n },\n \"attribution\": \"https://www.ncbi.nlm.nih.gov/\",\n \"created_at\": \"2018-05-22T21:12:42.999818+00:00\",\n \"data_access_authority\": \"dbGaP\",\n \"external_id\": \"phs001168\",\n \"kf_id\": \"SD_9PYZAHHE\",\n \"modified_at\": \"2019-08-07T14:30:22.131584+00:00\",\n \"name\": None,\n \"release_status\": \"Pending\",\n \"short_name\": \"Kids First: Orofacial Cleft - European Ancestry\",\n \"version\": \"v2.p2\",\n \"visible\": True,\n },\n {\n \"_links\": {\n \"collection\": \"/studies\",\n \"investigator\": \"/investigators/IG_4RTENGEW\",\n \"participants\": \"/participants?study_id=SD_DYPMEHHF\",\n \"self\": \"/studies/SD_DYPMEHHF\",\n \"study_files\": \"/study-files?study_id=SD_DYPMEHHF\",\n },\n \"attribution\": \"https://www.ncbi.nlm.nih.gov/\",\n \"created_at\": \"2018-06-11T13:26:50.673622+00:00\",\n \"data_access_authority\": \"dbGaP\",\n \"external_id\": \"phs001436\",\n \"kf_id\": \"SD_DYPMEHHF\",\n \"modified_at\": \"2019-10-01T15:23:31.308197+00:00\",\n \"name\": \"Discovering the Genetic Basis of Human Neuroblastoma\",\n \"release_status\": \"Pending\",\n \"short_name\": \"Kids First: Neuroblastoma\",\n \"version\": \"v1.p1\",\n \"visible\": True,\n },\n ],\n}\n\n\ndef test_sync_dataservice_studies_new_studies(db, mocker):\n \"\"\"\n Test that when new studies are found in the Data Service, a warning is\n thrown and no studies are created.\n \"\"\"\n req_mock = mocker.patch(\"creator.studies.dataservice.requests\")\n req_mock.get().json.return_value = MOCK_RESP\n logger = mocker.patch(\"creator.studies.dataservice.logger\")\n\n sync_dataservice_studies()\n\n assert Study.objects.count() == 0\n assert logger.warning.call_count == 2\n\n\ndef test_sync_dataservice_studies_deleted(db, mocker):\n \"\"\"\n Test that studies that have been deleted in the Dataservice are marked\n in the Study Creator\n \"\"\"\n req_mock = mocker.patch(\"creator.studies.dataservice.requests\")\n req_mock.get().json.return_value = MOCK_RESP\n\n study_1 = StudyFactory(kf_id=\"SD_DYPMEHHF\")\n study_2 = StudyFactory(kf_id=\"SD_9PYZAHHE\")\n\n DEL_RESP = MOCK_RESP.copy()\n DEL_RESP[\"results\"] = DEL_RESP[\"results\"][:1]\n req_mock.get().json.return_value = DEL_RESP\n\n sync_dataservice_studies()\n assert Study.objects.count() == 2\n assert Study.objects.get(kf_id=\"SD_DYPMEHHF\").deleted is True\n assert Study.objects.get(kf_id=\"SD_9PYZAHHE\").deleted is False\n\n\ndef test_sync_dataservice_studies_request_error(db, mocker):\n \"\"\"\n Test that errors in request are handled\n \"\"\"\n req_mock = mocker.patch(\"creator.studies.dataservice.requests\")\n req_mock.get.side_effect = RequestException\n\n with pytest.raises(RequestException):\n sync_dataservice_studies()\n\n\ndef test_sync_dataservice_studies_parse_error(db, mocker):\n \"\"\"\n Test that errors in parsing request are handled\n \"\"\"\n req_mock = mocker.patch(\"creator.studies.dataservice.requests\")\n req_mock.get().json.side_effect = Exception(\"parse error\")\n\n with pytest.raises(Exception) as err:\n sync_dataservice_studies()\n assert err == \"parse error\"\n","repo_name":"kids-first/kf-api-study-creator","sub_path":"tests/studies/test_dataservice.py","file_name":"test_dataservice.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"18759510449","text":"from aocd import data\n\nDAY = '14'\nPART = 'a'\n\n\ndef draw_cave(lines):\n cave = [['.'] * 200 for _ in range(200)]\n for line in lines:\n line_coordinates = [(int(coordinate.split(',')[1]), int(coordinate.split(',')[0])) for coordinate in line.split(' -> ')]\n last_point_row_idx, last_point_col_idx = line_coordinates[0]\n for next_point_row_idx, next_point_col_idx in line_coordinates[1:]:\n points_in_line = []\n if last_point_row_idx != next_point_row_idx:\n step = 1 if last_point_row_idx <= next_point_row_idx else -1\n for row_idx in range(last_point_row_idx, next_point_row_idx + step, step):\n points_in_line.append((row_idx, next_point_col_idx))\n else:\n step = 1 if last_point_col_idx <= next_point_col_idx else -1\n for col_idx in range(last_point_col_idx, next_point_col_idx + step, step):\n points_in_line.append((next_point_row_idx, col_idx))\n for row_idx, col_idx in points_in_line:\n cave[row_idx][col_idx - 400] = '#'\n last_point_row_idx, last_point_col_idx = next_point_row_idx, next_point_col_idx\n return cave\n\n\ndef pour_sand(cave):\n sand_start_row_idx, sand_start_col_idx = 0, 500\n while True:\n sand_current_row_idx, sand_current_col_idx = (sand_start_row_idx, sand_start_col_idx)\n try:\n while True:\n if cave[sand_current_row_idx + 1][sand_current_col_idx - 400] == '.':\n sand_current_row_idx, sand_current_col_idx = sand_current_row_idx + 1, sand_current_col_idx\n elif cave[sand_current_row_idx + 1][sand_current_col_idx - 1 - 400] == '.':\n sand_current_row_idx, sand_current_col_idx = sand_current_row_idx + 1, sand_current_col_idx - 1\n elif cave[sand_current_row_idx + 1][sand_current_col_idx + 1 - 400] == '.':\n sand_current_row_idx, sand_current_col_idx = sand_current_row_idx + 1, sand_current_col_idx + 1\n else:\n cave[sand_current_row_idx][sand_current_col_idx - 400] = 'o'\n break\n except IndexError:\n print('Sand is flowing into the void, we can stop now.')\n break\n return cave\n\n\ndef solve(lines):\n cave = draw_cave(lines)\n cave = pour_sand(cave)\n return len([value for row in cave for value in row if value == 'o'])\n\n\ndef main():\n print(f'Advent of Code 2022 --- Day {DAY} --- Part {PART}')\n lines = data.splitlines()\n result = solve(lines)\n print(f'There are a total of {str(result)} units of sand in the cave.')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"iptch/2022-advent-of-code","sub_path":"mzu/14/year2022_day14a.py","file_name":"year2022_day14a.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"31670682019","text":"#! python3\n# extrema - a simple roguelike dungeoncrawler\n# last changes:\t\n# \nimport pygame, random, copy, time, logging, pygInterface as pyIF, utility as util, world\nfrom pygame.locals import *\nfrom utility import pos\n\n\n#################################################################################\n### create some constants #######################################################\n#################################################################################\n\n####################################################################\n# identifiers\n\n\nUP = 1\nDOWN = 3\nLEFT = 2\nRIGHT = 0\nNONE = None\nDIRECTIONS = [RIGHT, UP, LEFT, DOWN]\nHORIZONTALLY = 'horizontally'\nPERPENDICULAR = 'perpendidular'\n\nMOVING = 'moving'\nATTACKING = 'attacking'\nLOOKING = 'looking'\n\nX = 0 # use it for tuples which\nY = 1\t\t# contain x, y coordinates\nTUPEL = type(())\nDICT = type({})\n\n\npyIF.setupPygame()\n\nlogging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s- %(message)s')\n\ndef main():\n\n ####################################################################\n # set up the game state variables\n\n moved = True\n lvlchange = 0\n mpo = 1\t\t# movement priority order\n actionMode = MOVING\n intendedDirection = False\n objects = []\t\t# objects in the players room\n monsters = []\t\t# monsters in the players room\n roomfilled = True\t# determines weather monster and objects are spawned in the current room\n currentLevelNr = 0\n isLandscape = False\n\n\n levels = [world.Level(levelnr, 3, 3, 2) for levelnr in range(9)]\n overworld = world.Level(11)\n overworld.createLandscape()\n level = overworld\n for i in range(len(levels)):\n oldLevel = level\n level = levels[i]\n level.create()\n level.currentRoom = level.specialRooms[world.LADDER_UP]\n room = level.currentRoom\n print(i)\n\n if i != 0:\n oldRoom = oldLevel.specialRooms[world.LADDER_DOWN]\n exitPoint = oldRoom.attributeObjs[world.LADDER_DOWN]\n else:\n exitPoint = overworld.specialRooms[world.LADDER_DOWN].attributeObjs[world.LADDER_DOWN]\n entryPoint = room.attributeObjs[world.LADDER_UP]\n exitPoint.startRoom = level.specialRooms[world.LADDER_UP]\n exitPoint.otherEnd = entryPoint\n exitPoint.nextLevel = level\n entryPoint.otherEnd = exitPoint\n entryPoint.nextLevel = oldLevel\n\n level = levels[0]\n\n\n player = world.Object(world.PLAYER, world.randomPositionIn(level.currentRoom), False)\n print()\n print(player.position)\n\n\n ########################################################################\n ### the main game loop #################################################\n ########################################################################\n\n while True:\n\n room = level.currentRoom\n ########################################################################\t\n # update gamestate #####################################################\n ########################################################################\n\n direction = None\n for event in pygame.event.get():\n if event.type == QUIT:\n pyIF.terminate()\n elif event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n pyIF.terminate()\n elif event.key == K_UP:\n direction = UP\n elif event.key == K_DOWN:\n direction = DOWN\n elif event.key == K_RIGHT:\n direction = RIGHT\n elif event.key == K_LEFT:\n direction = LEFT\n elif event.key == K_SPACE:\n moved = True\n\n # player actions\t\t\t\n elif event.key == K_a:\n actionMode = ATTACKING\n\n # TODO: after adding the create method to level-class-objects ship this code to function, in order to tidy up the main game loop\n # descent\n\n elif event.key in (K_d, K_k):\n if event.key == K_k:\n entry = world.LADDER_DOWN\n ext = world.LADDER_UP\n lvlchange = -1\n elif event.key == K_d:\n entry = world.LADDER_UP\n ext = world.LADDER_DOWN\n lvlchange = 1\n if (ext in room.attributeObjs) and player.position == room.attributeObjs[ext].position:\n exitPoint = room.attributeObjs[ext]\n level = exitPoint.nextLevel\n level.currentRoom = level.specialRooms[entry]\n player.position = exitPoint.otherEnd.position\n else:\n print((not (ext in room.attributeObjs)))\n print(\"Attribute Objs\", *[obj for key, obj in room.attributeObjs.items()])\n try:\n print(player.position != room.attributeObjs[ext].position)\n except KeyError:\n pass\n print(\"Player Position at {0}\".format(player.position))\n print('No ladder')\n\n elif event.key == K_l:\n actionMode = LOOKING\n\n if actionMode == MOVING and direction != NONE:\n world.updatePositionalData(player, level)\t# gets hallway moving options as well as the overall position when leaving a room\n world.movePlayer(player, direction, level.currentRoom)\n\n direction = None\n moved = True\n\n elif actionMode == LOOKING and direction != None:\n print(\"Looking at {0}\".format(level.giveInfoOn(util.toPosition(direction)+player.position)))\n actionMode = MOVING\n\n\n elif actionMode == ATTACKING:\n print('attack')\n actionMode = MOVING\n\n if moved:\n\n\n #######################################################\n # spawning and deleting enemies and treasures when entering/leaving a room\n\n if level.currentRoom and not roomfilled:\n monsters = level.currentRoom.monsters\n roomfilled = True\n\n if not level.currentRoom and roomfilled:\n monsters = []\n roomfilled = False\n\n #######################################################\n # enemy AI\n\n for monster in monsters:\n pass\n\n mpo = util.count(mpo, 2)\n\n # TODO: make the AI circumvent obstacles withoutil.too mutil.h trial and error.\n\n # TODO: build a simple fighting AI\n\n moved = False\n\n ###########################################################\n # space for console commentary\n\n pyIF.drawBackground()\n pyIF.drawGrid()\n if level.isLandscape:\n\n for tile in util.iter2D(level.landscape):\n if tile.objects:\n pyIF.drawObject(tile.objects[0])\n else:\n pyIF.drawObject(tile.base)\n else:\n for hallway in level.hallways:\n for i in range(len(hallway.geometry) - 2):\n hallwayPartNr = i + 1\n pyIF.drawHallway(hallway.geometry[hallwayPartNr])\n for r in util.iter2D(level.rooms):\n if r:\t# check weather there is a room to draw\n pyIF.drawRoom(r)\n pyIF.drawRoomContent(r)\n\n for monster in monsters:\n pyIF.drawObject(monster)\n# for x in range(len(level.landscape)):\n# for y in range(len(level.landscape[x])):\n# if not level.landscape[x][y].base.moveable:\n# pyIF.paintBlock(pos(x, y))\n pyIF.drawObject(player)\n pyIF.endFrame()\n\n ########################################################################\n # main game loop end ###################################################\n ########################################################################\t\t\t\t\n\n return\n\n\n\n\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"chryms0n/extrema","sub_path":"extrema.py","file_name":"extrema.py","file_ext":"py","file_size_in_byte":8505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"19856893352","text":"from datetime import datetime\n\nfrom aiocsv import AsyncWriter\nimport aiofiles\nimport aiohttp\nimport asyncio\nimport logging\nfrom bs4 import BeautifulSoup\nfrom typing import List\n\n\nlogging.basicConfig(level=logging.INFO)\n\n\nasync def request_data(url: str) -> BeautifulSoup:\n async with aiohttp.ClientSession() as session:\n async with session.get(url=url) as r:\n logging.info(f'Parse {url} at {datetime.now()}')\n return BeautifulSoup(await r.text(encoding='utf-8'), 'lxml')\n\n\nasync def parse_soup(name, description, price, url) -> None:\n soup = await request_data(url)\n name.extend([x.text.strip() for x in soup.find_all('a', class_='name_item')])\n description.extend([x.text.split('\\n') for x in soup.find_all('div', class_='description')])\n price.extend([x.text for x in soup.find_all('p', class_='price')])\n\n\nasync def write_data(name: List[str], description: List[str], price: List[str], link: str, filename: str = 'res') -> None:\n async with aiofiles.open(f'{filename}.csv', 'w', encoding='utf-8-sig', newline='') as file:\n for item, descr, price in zip(name, description, price,):\n flatten = item, *[x.split(':')[1].strip() for x in descr if x], price\n print(flatten)\n writer = AsyncWriter(file, delimiter=';')\n await writer.writerow(flatten)\n logging.info(f'Файл создан для ссылки {link} at {datetime.now()}')\n\n\nasync def main(url,\n filename):\n tasks = []\n soup = await request_data(url + 'index1_page_1.html')\n category = [f'{link.get(\"href\")}' for link in soup.find('div', class_='nav_menu').find_all('a')]\n pagen = []\n for link in category:\n soup = await request_data(url + link)\n pagen.extend([f'{link.get(\"href\")}' for link in soup.find('div', class_='pagen').find_all('a')])\n\n name, description, price = [], [], []\n\n for link in pagen:\n tasks.append(asyncio.create_task(parse_soup(name, description, price, url + link)))\n\n await asyncio.gather(*tasks)\n\n await write_data(name, description, price, url, filename)\n\nif __name__ == '__main__':\n url_host = 'https://parsinger.ru/html/'\n out_file_name = 'result_all'\n asyncio.run(main(url_host, out_file_name))\n","repo_name":"Feraclin/WebParsingStudy","sub_path":"BS4/bs4_parse_4.8.5.py","file_name":"bs4_parse_4.8.5.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"19495411148","text":"import re\nimport os\nimport json\nimport math\nimport nltk\nimport uuid\nimport pickle\nimport numpy as np\n# import textdistance\nimport pandas as pd\nfrom collections import Counter\n\nnltk.data.path.append('app/lib/models/wordnet') \nfrom nltk.tokenize import RegexpTokenizer, word_tokenize\nfrom nltk.stem import WordNetLemmatizer,PorterStemmer\nfrom nltk.corpus import stopwords\nfrom nltk.corpus import wordnet as wn\nfrom nltk.corpus import genesis\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.stem import SnowballStemmer\nfrom nltk.stem.lancaster import LancasterStemmer\n\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nlemmatizer = WordNetLemmatizer()\nstemmer = PorterStemmer()\ngenesis_ic = wn.ic(genesis, False, 0.0)\n \n# Load data from pickle files\ncEXT = pickle.load( open( \"app/lib/models/cEXT.p\", \"rb\"))\ncNEU = pickle.load( open( \"app/lib/models/cNEU.p\", \"rb\"))\ncAGR = pickle.load( open( \"app/lib/models/cAGR.p\", \"rb\"))\ncCON = pickle.load( open( \"app/lib/models/cCON.p\", \"rb\"))\ncOPN = pickle.load( open( \"app/lib/models/cOPN.p\", \"rb\"))\nvectorizer_31 = pickle.load( open( \"app/lib/models/vectorizer_31.p\", \"rb\"))\nvectorizer_30 = pickle.load( open( \"app/lib/models/vectorizer_30.p\", \"rb\"))\n\ndef preprocess(sentence):\n sentence=str(sentence)\n sentence = sentence.lower()\n sentence=sentence.replace('{html}',\"\") \n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, '', sentence)\n rem_url=re.sub(r'http\\S+', '',cleantext)\n rem_num = re.sub('[0-9]+', '', rem_url)\n tokenizer = RegexpTokenizer(r'\\w+')\n tokens = tokenizer.tokenize(rem_num) \n filtered_words = [w for w in tokens if len(w) > 2 if not w in stopwords.words('english')]\n stem_words=[stemmer.stem(w) for w in filtered_words]\n lemma_words=[lemmatizer.lemmatize(w) for w in stem_words]\n return \" \".join(filtered_words)\n\ndef apply_nlp(data):\n data = data.fillna('')\n dirty_text = data.iloc[:,[4,21,22,23,24,25]].copy()\n dirty_text = dirty_text.applymap(lambda s:preprocess(s))\n data['words']=dirty_text.sum(axis=1).astype(str)\n return data\n\ndef padding(data):\n fellow = data.loc[data['Role:'] == \"Fellow\"]\n coach = data.loc[data['Role:'] == \"Coach\"]\n num_fellow = len(fellow)\n num_coach = len(coach)\n\n diff = math.floor(num_fellow/num_coach)\n rem = num_fellow%num_coach\n\n c_diff = math.floor(num_coach/num_fellow)\n c_rem = num_coach%num_fellow\n \n \n if(num_fellow > num_coach):\n coach = pd.concat([coach]*diff, ignore_index=True)\n \n if(rem>=1):\n last = coach.iloc[:rem]\n coach = coach.append([last], ignore_index=True)\n data = pd.concat([coach, fellow], ignore_index= \"true\")\n data['UID'] = ''\n uid = []\n for i in range(len(data['UID'])):\n x=uuid.uuid4()\n uid.append(x)\n data['UID']= pd.DataFrame(uid, columns=['UID'])\n\n elif(num_coach > num_fellow): \n fellow = pd.concat([fellow]*c_diff, ignore_index=True)\n\n if(c_rem>=1):\n last = fellow.iloc[:c_rem]\n fellow = fellow.append([last], ignore_index=True)\n data = pd.concat([coach, fellow], ignore_index= \"true\")\n data['UID'] = ''\n uid = []\n for i in range(len(data['UID'])):\n x=uuid.uuid4()\n uid.append(x)\n data['UID']= pd.DataFrame(uid, columns=['UID'])\n \n return data\n\ndef df_column_uniquify(df):\n df_columns = df['Full Name (First Middle Last)']\n new_columns = []\n for item in df_columns:\n counter = 0\n newitem = item\n while newitem in new_columns:\n counter += 1\n newitem = \"{}_{}\".format(item, counter)\n new_columns.append(newitem)\n df['Full Name (First Middle Last)'] = new_columns\n return df\n\ndef unpickle(filename):\n open_first = open(filename, \"rb\")\n Dic1 = pickle.load(open_first)\n open_again = open(filename, \"rb\")\n Dic2 = pickle.load(open_again)\n open_first.close()\n open_again.close()\n return Dic1,Dic2\n\ndef predict_personality(text):\n sentences = re.split(\"(?<=[.!?]) +\", text)\n text_vector_31 = vectorizer_31.transform(sentences)\n text_vector_30 = vectorizer_30.transform(sentences)\n EXT = cEXT.predict(text_vector_31)\n NEU = cNEU.predict(text_vector_30)\n AGR = cAGR.predict(text_vector_31)\n CON = cCON.predict(text_vector_31)\n OPN = cOPN.predict(text_vector_31)\n return [EXT[0], NEU[0], AGR[0], CON[0], OPN[0]]\n\ndef big5(data):\n score = []\n for row in data['words']:\n score.append(predict_personality(row))\n data[['EXT', 'NEU', 'AGR', 'CON', 'OPN']]= pd.DataFrame(score, columns=[['EXT', 'NEU', 'AGR', 'CON', 'OPN']])\n data = data.astype(str)\n return data\n\ndef KNN_dictionary(data):\n final = data\n text = final[['Briefly describe a leader or person you admire and why.', 'What do you hope to gain/learn as a result of the fellow-coach relationship?']].applymap(lambda s:preprocess(s))\n text = text['Briefly describe a leader or person you admire and why.'].map(str) + \" \" + text['What do you hope to gain/learn as a result of the fellow-coach relationship?'].map(str) \n word_kw = text.to_frame()\n UID_kw = final['UID'] \n keywords_hash= dict()\n for i in range(len(word_kw)):\n keywords_hash[UID_kw[i]] = word_kw[0][i]\n data = calculate_keyword(keywords_hash, UID_kw, final)\n return data\n\ndef calculate_keyword(keywords_hash, UID_kw, final):\n x = KNN_NLC_Classifer()\n common_words = []\n for i in range(len(keywords_hash)):\n split_it = keywords_hash[UID_kw[i]].split()\n CounterVariable = Counter(split_it)\n most_occur = CounterVariable.most_common(1)\n if most_occur[0][0] not in common_words:\n common_words.append(most_occur[0][0])\n\n centroid = \" \".join(common_words)\n print(common_words)\n\n origin_hash= dict()\n for i in range(len(keywords_hash)):\n origin_hash[UID_kw[i]] = 1-x.document_similarity(centroid, keywords_hash[UID_kw[i]]) #distance from all participants and origin\n \n final[\"distance\"] = \"\"\n for i in range(len(origin_hash)):\n final[\"distance\"][i] = origin_hash[UID_kw[i]].astype(float)\n return final\n\nclass KNN_NLC_Classifer():\n def __init__(self, k=1, distance_type = 'path'):\n self.k = k\n self.distance_type = distance_type\n\n # This function is used for training\n def fit(self, x_train, y_train):\n self.x_train = x_train\n self.y_train = y_train\n\n # This function runs the K(1) nearest neighbour algorithm and\n # returns the label with closest match. \n def predict(self, x_test):\n self.x_test = x_test\n y_predict = []\n\n for i in range(len(x_test)):\n max_sim = 0\n max_index = 0\n for j in range(self.x_train.shape[0]):\n temp = self.document_similarity(x_test[i], self.x_train[j])\n if temp > max_sim:\n max_sim = temp\n max_index = j\n y_predict.append(self.y_train[max_index])\n return y_predict\n def convert_tag(self, tag):\n \"\"\"Convert the tag given by nltk.pos_tag to the tag used by wordnet.synsets\"\"\"\n tag_dict = {'N': 'n', 'J': 'a', 'R': 'r', 'V': 'v'}\n try:\n return tag_dict[tag[0]]\n except KeyError:\n return None\n\n\n def doc_to_synsets(self, doc):\n tokens = word_tokenize(doc+' ')\n\n l = []\n tags = nltk.pos_tag([tokens[0] + ' ']) if len(tokens) == 1 else nltk.pos_tag(tokens)\n\n for token, tag in zip(tokens, tags):\n syntag = self.convert_tag(tag[1])\n syns = wn.synsets(token, syntag)\n if (len(syns) > 0):\n l.append(syns[0])\n return l \n def similarity_score(self, s1, s2, distance_type = 'path'):\n s1_largest_scores = []\n\n for i, s1_synset in enumerate(s1, 0):\n max_score = 0\n for s2_synset in s2:\n if distance_type == 'path':\n score = s1_synset.path_similarity(s2_synset, simulate_root = False)\n else:\n score = s1_synset.wup_similarity(s2_synset) \n if score != None:\n if score > max_score:\n max_score = score\n \n if max_score != 0:\n s1_largest_scores.append(max_score)\n \n mean_score = np.mean(s1_largest_scores)\n \n return mean_score \n def document_similarity(self,doc1, doc2):\n \"\"\"Finds the symmetrical similarity between doc1 and doc2\"\"\"\n\n synsets1 = self.doc_to_synsets(doc1)\n synsets2 = self.doc_to_synsets(doc2)\n \n return (self.similarity_score(synsets1, synsets2) + self.similarity_score(synsets2, synsets1)) / 2","repo_name":"Maroon-White-Matching/BackendServer","sub_path":"app/utils/nlp.py","file_name":"nlp.py","file_ext":"py","file_size_in_byte":8956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"27589212090","text":"#from mdr import MDR\nimport time\nimport numpy as np\nimport pandas as pd\n#from sklearn.model_selection import cross_val_score\n\ninput_file = '/home/ansohn/Python/data/gametes-data/loc2_filtered/a5000/Her04/a_5000s_2000her_0.4__maf_0.2_EDM-1_01.txt'\ndata = pd.read_csv(input_file, sep='\\t')\n\nt1 = time.time()\n\ndata = data.sample(frac=1)\nfeatures = data.drop('Class', axis=1)\npred_feat = features.ix[:, ['M0P0','M0P1']]\nprint(pred_feat.columns)\n#pred_feat = pred_feat.values\n#labels = data['Class'].values\n\n#mymdr = MDR()\n#mymdr.fit(pred_feat, labels)\n#pred_scores = cross_val_score(mymdr, pred_feat, labels, cv=10)\n#print('mdr_pred scores: ', np.mean(pred_scores))\n \nt2 = time.time()\n\n","repo_name":"sohnam/backup-scripts","sub_path":"mdr_predictive.py","file_name":"mdr_predictive.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"2970270222","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"AutoDiff-Exponentials\",\n version=\"0.0.1\",\n author=\"Galit\",\n author_email=\"glukin@mit.edu\",\n description=\"Foward mode implemented.\",\n long_description=\"supports vectors, operators, and various trig functions\",\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/CSExponentials/cs207-FinalProject\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)\n","repo_name":"CSExponentials/cs207-FinalProject","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"26294257030","text":"#!/usr/bin/env python3\nfrom __future__ import print_function\nimport numpy as np\n\nimport sys\nimport rospy\nimport cv2\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\nimport message_filters\n\n\nclass ImageDepthSynchroniser:\n def __init__(self):\n self.image_pub = rospy.Publisher(\"image_topic_2\", Image)\n\n self.bridge = CvBridge()\n self.image_sub = message_filters.Subscriber(\"/camera/color/image_raw\", Image)\n self.depth_sub = message_filters.Subscriber(\"/camera/aligned_depth_to_color/image_raw\", Image)\n self.image_depth_sync = message_filters.TimeSynchronizer([self.image_sub, self.depth_sub], 10)\n self.image_depth_sync.registerCallback(self.callback)\n\n def callback(self, image_data, depth_data):\n # rospy.loginfo(\"{time_difference}\".format(dt=image_data.header.stamp-depth_data.header.stamp))\n cv_image = np.frombuffer(image_data.data, dtype=np.uint8).reshape(image_data.height, image_data.width, -1)\n try:\n cv_image = self.bridge.imgmsg_to_cv2(image_data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n # try:\n # cv_depth = self.bridge.imgmsg_to_cv2(depth_data, \"16UC1\")\n # except CvBridgeError as e:\n # print(e)\n\n (rows, cols, channels) = cv_image.shape\n if cols > 60 and rows > 60:\n cv2.circle(cv_image, (50, 50), 10, 255)\n try:\n self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, \"bgr8\"))\n except CvBridgeError as e:\n print(e)\n\n\ndef main(args):\n ids = ImageDepthSynchroniser()\n rospy.init_node('image_depth_synchronise', anonymous=True)\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","repo_name":"JinraeKim/myros","sub_path":"image_depth_synchroniser.py","file_name":"image_depth_synchroniser.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"39220301425","text":"from PyQt5 import QtCore, QtGui, QtWidgets\r\nclass Ui_Form(object):\r\n def setupUi(self,Form,wid,lay,main_grid,h_lay,v_lay):\r\n self.model_all_widget = QtWidgets.QWidget(wid)\r\n self.model_all_widget.setStyleSheet(\"background-color: rgb(11, 11, 11);\")\r\n self.model_all_widget.setObjectName(\"model_all_widget\")\r\n self.all_grid_Layout = QtWidgets.QGridLayout(self.model_all_widget)\r\n self.all_grid_Layout.setContentsMargins(0, 0, 0, 0)\r\n self.all_grid_Layout.setObjectName(\"all_grid_Layout\")\r\n self.model_widget = QtWidgets.QWidget(self.model_all_widget)\r\n self.model_widget.setStyleSheet(\"\\n\"\r\n\"\\n\"\r\n\"background-color: rgb(11,11,11);\")\r\n self.model_widget.setObjectName(\"model_widget\")\r\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.model_widget)\r\n self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)\r\n self.verticalLayout_2.setSpacing(0)\r\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\r\n self.horizontalLayout = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout.setSpacing(0)\r\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\r\n spacerItem3 = QtWidgets.QSpacerItem(5, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout.addItem(spacerItem3)\r\n self.label = QtWidgets.QLabel(self.model_widget)\r\n self.label.setLayoutDirection(QtCore.Qt.LeftToRight)\r\n self.label.setStyleSheet(\"font: 75 16pt \\\"MS Shell Dlg 2\\\";\\n\"\r\n\"color: rgb(255, 255, 255);\\n\"\r\n\"border-bottom: 3px solid rgb(72, 72, 72);\")\r\n self.label.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label.setObjectName(\"label\")\r\n self.horizontalLayout.addWidget(self.label)\r\n self.verticalLayout_2.addLayout(self.horizontalLayout)\r\n spacerItem4 = QtWidgets.QSpacerItem(20, 50, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)\r\n self.verticalLayout_2.addItem(spacerItem4)\r\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\r\n self.label_2 = QtWidgets.QLabel(self.model_widget)\r\n self.label_2.setStyleSheet(\"font: 75 12pt \\\"MS Shell Dlg 2\\\";\\n\"\r\n\"color: rgb(255, 255, 255);\\n\"\r\n\"\")\r\n self.label_2.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_2.setObjectName(\"label_2\")\r\n self.horizontalLayout_3.addWidget(self.label_2)\r\n self.verticalLayout_2.addLayout(self.horizontalLayout_3)\r\n self.gridLayout = QtWidgets.QGridLayout()\r\n self.gridLayout.setObjectName(\"gridLayout\")\r\n spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.gridLayout.addItem(spacerItem5, 0, 4, 1, 1)\r\n spacerItem6 = QtWidgets.QSpacerItem(20, 500, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)\r\n self.gridLayout.addItem(spacerItem6, 2, 2, 1, 1)\r\n spacerItem7 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.gridLayout.addItem(spacerItem7, 0, 0, 1, 1)\r\n self.cd_btn = QtWidgets.QPushButton(self.model_widget)\r\n self.cd_btn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\r\n self.cd_btn.setStyleSheet(\"QPushButton{\\n\"\r\n\"\\n\"\r\n\"background-color: rgb(11,11,11);\\n\"\r\n\"color: rgb(255, 255, 255);\\n\"\r\n\"border-style:outset;\\n\"\r\n\"border-width:4.5px;\\n\"\r\n\"border-radius:12px;\\n\"\r\n\"border-color: rgb(182, 182, 182);\\n\"\r\n\"padding:6px\\n\"\r\n\"}\\n\"\r\n\"QPushButton:hover{\\n\"\r\n\"background-color: rgb(106, 106, 106);\\n\"\r\n\"}\\n\"\r\n\"\\n\"\r\n\"\")\r\n self.cd_btn.setText(\"\")\r\n icon = QtGui.QIcon()\r\n icon.addPixmap(QtGui.QPixmap(\"icon/cd.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n self.cd_btn.setIcon(icon)\r\n self.cd_btn.setIconSize(QtCore.QSize(150, 150))\r\n self.cd_btn.setObjectName(\"cd_btn\")\r\n self.gridLayout.addWidget(self.cd_btn, 0, 3, 1, 1)\r\n spacerItem8 = QtWidgets.QSpacerItem(20, 250, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)\r\n self.gridLayout.addItem(spacerItem8, 0, 2, 1, 1)\r\n self.internet_btn = QtWidgets.QPushButton(self.model_widget)\r\n self.internet_btn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\r\n self.internet_btn.setStyleSheet(\"\\n\"\r\n\"\\n\"\r\n\"QPushButton{\\n\"\r\n\"\\n\"\r\n\"background-color: rgb(11,11,11);\\n\"\r\n\"color: rgb(255, 255, 255);\\n\"\r\n\"border-style:outset;\\n\"\r\n\"border-width:4.5px;\\n\"\r\n\"border-radius:12px;\\n\"\r\n\"border-color: rgb(182, 182, 182);\\n\"\r\n\"padding:6px\\n\"\r\n\"}\\n\"\r\n\"QPushButton:hover{\\n\"\r\n\"background-color: rgb(106, 106, 106);\\n\"\r\n\"}\\n\"\r\n\"\\n\"\r\n\"\\n\"\r\n\"/*\\n\"\r\n\"QPushButton{\\n\"\r\n\"background-color:rgb(11, 11, 11);\\n\"\r\n\"color: rgb(255, 255, 255);\\n\"\r\n\"border-style:solid ;\\n\"\r\n\"border-width:5px;\\n\"\r\n\"border-radius:0px;\\n\"\r\n\"border-color:rgb(11, 11, 11);\\n\"\r\n\"\\n\"\r\n\"}\\n\"\r\n\"QpushButton:focus{\\n\"\r\n\"\\n\"\r\n\" border-color: rgb(106, 106, 106);\\n\"\r\n\"}\\n\"\r\n\"*/\")\r\n self.internet_btn.setText(\"\")\r\n icon1 = QtGui.QIcon()\r\n icon1.addPixmap(QtGui.QPixmap(\"icon/internet.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\r\n self.internet_btn.setIcon(icon1)\r\n self.internet_btn.setIconSize(QtCore.QSize(150, 150))\r\n self.internet_btn.setObjectName(\"internet_btn\")\r\n self.gridLayout.addWidget(self.internet_btn, 0, 1, 1, 1)\r\n self.label_3 = QtWidgets.QLabel(self.model_widget)\r\n self.label_3.setStyleSheet(\"font: 75 10pt \\\"MS Shell Dlg 2\\\";\\n\"\r\n\"color: rgb(255, 255, 255);\\n\"\r\n\"\")\r\n self.label_3.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_3.setObjectName(\"label_3\")\r\n self.gridLayout.addWidget(self.label_3, 1, 1, 1, 1)\r\n self.label_4 = QtWidgets.QLabel(self.model_widget)\r\n self.label_4.setStyleSheet(\"font: 75 10pt \\\"MS Shell Dlg 2\\\";\\n\"\r\n\"color: rgb(255, 255, 255);\\n\"\r\n\"\")\r\n self.label_4.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_4.setObjectName(\"label_4\")\r\n self.gridLayout.addWidget(self.label_4, 1, 3, 1, 1)\r\n self.verticalLayout_2.addLayout(self.gridLayout)\r\n self.all_grid_Layout.addWidget(self.model_widget, 0, 0, 1, 1)\r\n lay.addWidget(self.model_all_widget, 0, 0, 1, 1)\r\n main_grid.addLayout(lay, 1, 0, 1, 1)\r\n h_lay.addWidget(wid)\r\n v_lay.addLayout(h_lay)\r\n self.retranslateUi()\r\n # self.retranslateUi(Form)\r\n # QtCore.QMetaObject.connectSlotsByName(Form)\r\n\r\n def retranslateUi(self):\r\n _translate = QtCore.QCoreApplication.translate\r\n # Form.setWindowTitle(_translate(\"Form\", \"Form\"))\r\n self.label.setText(_translate(\"Form\", \"Import new Models / Videos...\"))\r\n self.label_2.setText(_translate(\"Form\", \"How would you like to import new Models?\"))\r\n self.label_3.setText(_translate(\"Form\", \"Internet\"))\r\n self.label_4.setText(_translate(\"Form\", \"CD / USB\"))\r\n\r\n","repo_name":"princejagani/3d-software-gui-","sub_path":"3d/import_module.py","file_name":"import_module.py","file_ext":"py","file_size_in_byte":6918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"70782549989","text":"import httplib2\nimport json\nimport base64\nimport tempfile\nimport datetime\nfrom shutil import copyfile\nimport os, sys\nfrom multiprocessing.pool import ThreadPool\nfrom urllib.parse import urlencode\n\nparent_module = sys.modules['.'.join(__name__.split('.')[:-1]) or '__main__']\nif __name__ == '__main__' or parent_module.__name__ == '__main__':\n import config\nelse:\n from . import config\n\n_credentials = None\n_auth = \"\"\n\ndef _DecodeJson(s):\n try:\n #if (sys.version_info >= (3, 0)):\n # return json.loads(s.decode())\n #else:\n # return json.loads(s)\n return json.loads(s.decode())\n except:\n return s\n\ndef get_datetime(instance):\n # Function to get date and time of an instance.\n # CAREFUL, datetime orders the instances for analysis\n date_var = \"\"\n time_var = \"\"\n try:\n date_var = instance[\"ContentDate\"]\n time_var = instance[\"ContentTime\"]\n except:\n try:\n date_var = instance[\"AcquisitionDate\"]\n time_var = instance[\"AcquisitionTime\"]\n except:\n try:\n date_var = instance[\"StudyDate\"]\n time_var = instance[\"StudyTime\"]\n except:\n date_var = \"Unknown\" # Do not change, or else you will break the code!\n time_var = \"Unknown\"\n if date_var == \"\":\n date_var = \"Unknown\"\n if time_var == \"\":\n time_var = \"Unknown\"\n\n return date_var, time_var\n\n\ndef order_instance_datetime(instance_list):\n # This function gets instances with time stamp for further ordering\n\n instance_datetime = []\n instance_datetime_order = []\n epoch = datetime.datetime.utcfromtimestamp(0)\n instance_num = []\n for ii in instance_list:\n try:\n date_var, time_var = get_datetime(ii) # Get raw from REST (Dicom)\n date_str = datetime.datetime.strptime(date_var, \"%Y%m%d\").strftime(\"%Y-%m-%d\") # To string\n try:\n time_str = datetime.datetime.strptime(time_var, \"%H%M%S\").strftime(\"%H:%M:%S\")\n except:\n time_str = datetime.datetime.strptime(time_var, \"%H%M%S.%f\").strftime(\"%H:%M:%S.%f\")\n time_str = time_str.split('.')[0]\n inst_datetime_temp = date_str + \" \" + time_str\n instance_datetime.append(inst_datetime_temp)\n #try:\n # instance_datetime_order.append(int((datetime.datetime.strptime(inst_datetime_temp, \"%Y/%m/%d | %H:%M:%S.%f\") - epoch).total_seconds()*1000))\n #except:\n instance_datetime_order.append(int((datetime.datetime.strptime(inst_datetime_temp, \"%Y-%m-%d %H:%M:%S\") - epoch).total_seconds()*1000))\n except:\n instance_datetime.append(\"Unknown\")\n instance_datetime_order.append(int((epoch - epoch).total_seconds()*1000))\n\n try:\n manufact = ii[\"Manufacturer\"]\n except:\n manufact = \"Undefined\"\n\n if manufact in [\"Varian Medical Systems\"]:\n show_instance_label = \"RTImageLabel\"\n else:\n show_instance_label = \"InstanceNumber\"\n\n try:\n pp = ii[show_instance_label]\n instance_num.append(pp)\n except:\n instance_num.append(\"Undefined\")\n return instance_datetime, instance_datetime_order, instance_num\n\n\ndef SetCredentials(username, password):\n global _credentials, _auth\n _credentials = (username, password)\n _auth = base64.encodebytes( (_credentials[0] + ':' + _credentials[1] ).encode())\n\ndef _SetupCredentials(h):\n global _credentials, _auth\n if _credentials != None:\n h.add_credentials(_credentials[0], _credentials[1])\n\ndef DoGet(uri, data = {}, interpretAsJson = True):\n d = ''\n if len(data.keys()) > 0:\n d = '?' + urlencode(data)\n\n h = httplib2.Http()\n _SetupCredentials(h)\n\n headers = {'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'Authorization' : 'Basic ' + _auth.decode()}\n resp, content = h.request(uri + d, 'GET', headers=headers)\n h.close()\n if not (resp.status in [ 200 ]):\n raise Exception(resp.status)\n elif not interpretAsJson:\n return content.decode()\n else:\n return _DecodeJson(content)\n\n\ndef _DoPutOrPost(uri, method, data, contentType):\n h = httplib2.Http()\n _SetupCredentials(h)\n\n if isinstance(data, str):\n body = data\n if len(contentType) != 0:\n headers = { 'content-type' : contentType,\n 'Authorization' : 'Basic ' + _auth.decode() }\n else:\n headers = { 'content-type' : 'text/plain',\n 'Authorization' : 'Basic ' + _auth.decode()}\n else:\n body = json.dumps(data)\n headers = { 'content-type' : 'application/json',\n 'Authorization' : 'Basic ' + _auth.decode()}\n\n resp, content = h.request(\n uri, method,\n body = body,\n headers = headers)\n\n if not (resp.status in [ 200, 302 ]):\n raise Exception(resp.status)\n else:\n return _DecodeJson(content)\n\n\ndef DoDelete(uri):\n h = httplib2.Http()\n _SetupCredentials(h)\n headers = { 'Authorization' : 'Basic ' + _auth.decode() }\n\n resp, content = h.request(uri, 'DELETE', headers=headers)\n\n if not (resp.status in [ 200 ]):\n raise Exception(resp.status)\n else:\n return _DecodeJson(content)\n\n\ndef DoPut(uri, data = {}, contentType = ''):\n return _DoPutOrPost(uri, 'PUT', data, contentType)\n\n\ndef DoPost(uri, data = {}, contentType = ''):\n return _DoPutOrPost(uri, 'POST', data, contentType)\n\n\ndef GetPatientIds(uri, interpretAsJson = True):\n\n h = httplib2.Http()\n _SetupCredentials(h)\n headers = {'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'Authorization' : 'Basic ' + _auth.decode()}\n\n resp, content = h.request(uri + \"/patients\", 'GET', headers=headers)\n\n if not (resp.status in [ 200 ]):\n raise Exception(resp.status)\n elif not interpretAsJson:\n return content.decode()\n else:\n return _DecodeJson(content)\n\ndef GetPatientData(uri, patient_id, interpretAsJson = True):\n\n patient_properties = []\n h = httplib2.Http()\n _SetupCredentials(h)\n headers = {'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'Authorization' : 'Basic ' + _auth.decode(),\n 'Connection': 'keep-alive'}\n for p in patient_id:\n resp, content = h.request(uri + \"/patients/\"+p, 'GET', headers=headers)\n #h.close()\n if not (resp.status in [ 200 ]):\n raise Exception(resp.status)\n elif not interpretAsJson:\n patient_properties.append(content.decode())\n else:\n patient_properties.append(_DecodeJson(content))\n h.close()\n return patient_properties\n\n\ndef GetStudies(uri, studies, interpretAsJson = True):\n study_data = []\n h = httplib2.Http()\n _SetupCredentials(h)\n for p in studies:\n headers = {'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'Authorization' : 'Basic ' + _auth.decode(),\n 'Connection': 'keep-alive'}\n\n resp, content = h.request(uri + \"/studies/\"+p, 'GET', headers=headers)\n if not (resp.status in [ 200 ]):\n raise Exception(resp.status)\n elif not interpretAsJson:\n study_data.append(content.decode())\n else:\n study_data.append(_DecodeJson(content))\n h.close()\n return study_data\n\ndef GetSeries(uri, series, interpretAsJson = True):\n series_data = []\n h = httplib2.Http()\n _SetupCredentials(h)\n for p in series:\n headers = {'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'Authorization' : 'Basic ' + _auth.decode(),\n 'Connection': 'keep-alive'}\n\n resp, content = h.request(uri + \"/series/\"+p, 'GET', headers=headers)\n\n if not (resp.status in [ 200 ]):\n raise Exception(resp.status)\n elif not interpretAsJson:\n series_data.append(content.decode())\n else:\n series_data.append(_DecodeJson(content))\n h.close()\n return series_data\n\ndef GetInstances(uri, instances, interpretAsJson = True):\n instance_data = []\n h = httplib2.Http()\n _SetupCredentials(h)\n for p in instances:\n \n headers = {'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'Authorization' : 'Basic ' + _auth.decode(),\n 'Connection': 'keep-alive'}\n\n resp, content = h.request(uri + \"/instances/\"+p+\"/simplified-tags\", 'GET', headers=headers)\n\n if not (resp.status in [ 200 ]):\n raise Exception(resp.status)\n elif not interpretAsJson:\n instance_data.append(content.decode())\n else:\n instance_data.append(_DecodeJson(content))\n h.close()\n return instance_data\n\ndef GetImageDescription(uri, instance, interpretAsJson = True):\n # Function that is used to get RTimagedescription when it is too long\n h = httplib2.Http()\n _SetupCredentials(h)\n headers = {'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'Authorization' : 'Basic ' + _auth.decode(),\n 'Connection': 'Close'}\n\n resp, content = h.request(uri + \"/instances/\"+instance+\"/content/3002-0004\", 'GET', headers=headers)\n h.close()\n if not (resp.status in [ 200 ]):\n raise Exception(resp.status)\n elif not interpretAsJson:\n instance_data = content.decode()\n else:\n instance_data = _DecodeJson(content)\n return instance_data\n\ndef GetSeries2Subfolders_helperf(args):\n i = args[0] # instance\n uri = args[1]\n if i != None:\n temp_folder = tempfile.mkdtemp(prefix=i+\"_\", dir=config.TEMP_DCM_FOLDER)\n temp_file1 = tempfile.NamedTemporaryFile(delete=False, prefix=i+\"_\", suffix=\".DCM\", dir=temp_folder)\n temp_file2 = tempfile.NamedTemporaryFile(delete=False, prefix=i+\"_second_\", suffix=\".DCM\", dir=temp_folder)\n file = DoGet(uri + \"/instances/\" + i + \"/file\")\n\n with open(temp_file1.name, 'wb') as dst:\n dst.write(file)\n copyfile(temp_file1.name, temp_file2.name)\n temp_file1.close()\n temp_file2.close()\n return temp_folder\n else:\n return \"None\"\n\n\ndef GetSeries2Subfolders(uri, list_instances, pickinstances, interpretAsJson = True):\n # Get filepaths, ordered in sequence of datetime! Ordering is important for analysis (Winston Lutz etc.)\n\n # If image was not chose put None into instance list:\n for p in range(0, len(list_instances)):\n if not pickinstances[p]:\n list_instances[p] = None\n \n # Now save each file twice in a subfolder\n arguments = []\n for i in range(len(list_instances)):\n arguments.append([list_instances[i], uri])\n\n p = ThreadPool(4)\n file_paths = p.map(GetSeries2Subfolders_helperf, arguments)\n p.close()\n p.join()\n \n return file_paths\n\ndef GetSeries2Folder(uri, instances, pickinstances, interpretAsJson = True):\n # Get filepaths, ordered in sequence of datetime! But this time save in one folder\n # and use numeric image names!\n # series is a list of series, for only one series = [\"...\"]\n # For winston lutz \"pylinac\" method\n file_paths_final = []\n file_paths_full = []\n temp_folder = tempfile.mkdtemp(prefix=instances[0]+\"_\", dir=config.TEMP_DCM_FOLDER)\n\n # If image was not chose put None into instance list:\n for p in range(0, len(instances)):\n if not pickinstances[p]:\n instances[p] = None\n\n # Now save each file\n for i in range(0, len(instances), 1):\n if instances[i] != None:\n temp_file = os.path.join(temp_folder, \"img\"+str(i+1)+\".dcm\")\n file = DoGet(uri + \"/instances/\" + instances[i] + \"/file\")\n with open(temp_file, 'wb') as dst:\n dst.write(file)\n dst.close()\n file_paths_final.append(temp_file)\n file_paths_full.append(temp_file)\n else:\n file_paths_full.append(\"None\")\n return temp_folder, file_paths_final, file_paths_full\n\ndef GetSeries2Folder2_helperf(args):\n instance = args[0]\n temp_folder = args[1]\n uri = args[2]\n\n temp_file = tempfile.NamedTemporaryFile(delete=False, prefix=instance+\"_\", suffix=\".DCM\", dir=temp_folder)\n file = DoGet(uri + \"/instances/\" + instance + \"/file\")\n with open(temp_file.name, 'wb') as dst:\n dst.write(file)\n dst.close()\n temp_file.close()\n\n\ndef GetSeries2Folder2(uri, series, interpretAsJson = True):\n h = httplib2.Http()\n _SetupCredentials(h)\n headers = {'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'Authorization' : 'Basic ' + _auth.decode(),\n 'Connection': 'Close'}\n\n resp, content = h.request(uri + \"/series/\" + series, 'GET', headers=headers)\n h.close()\n if not (resp.status in [ 200 ]):\n raise Exception(resp.status)\n elif not interpretAsJson:\n data = content.decode()\n else:\n data = _DecodeJson(content)\n instances = data[\"Instances\"]\n\n temp_folder = tempfile.mkdtemp(prefix=series+\"_\", dir=config.TEMP_DCM_FOLDER)\n \n arguments = []\n for i in range(len(instances)):\n arguments.append([instances[i], temp_folder, uri])\n\n p = ThreadPool(4)\n p.map(GetSeries2Folder2_helperf, arguments)\n p.close()\n p.join()\n\n return temp_folder\n\ndef GetSingleDcm(uri, instance, interpretAsJson = True):\n #Get one single dicom file\n #Used, for example, for the starshot module'''\n temp_folder = tempfile.mkdtemp(prefix=instance+\"_\", dir=config.TEMP_DCM_FOLDER)\n temp_file = tempfile.NamedTemporaryFile(delete=False, prefix=\"Dicom_\", suffix=\".DCM\", dir=temp_folder)\n file = DoGet(uri + \"/instances/\" + instance + \"/file\")\n with open(temp_file.name, 'wb') as dst:\n dst.write(file)\n dst.close()\n temp_file.close()\n return temp_folder, temp_file.name\n\ndef DoGet_image(uri, data = {}, interpretAsJson = True):\n d = ''\n if len(data.keys()) > 0:\n d = '?' + urlencode(data)\n\n h = httplib2.Http()\n _SetupCredentials(h)\n\n headers = {'Accept': 'image/png',\n 'Content-Type': 'image/png',\n 'Authorization' : 'Basic ' + _auth.decode() }\n resp, content = h.request(uri + d, 'GET', headers=headers)\n h.close()\n if not (resp.status in [ 200 ]):\n raise Exception(resp.status)\n elif not interpretAsJson:\n return content.decode()\n else:\n return _DecodeJson(content)\n","repo_name":"brjdenis/pyqaserver","sub_path":"pyqaserver/RestToolbox_modified.py","file_name":"RestToolbox_modified.py","file_ext":"py","file_size_in_byte":14781,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"71"} +{"seq_id":"28048072382","text":"import os\nimport subprocess\n\nfrom proksee.parser.read_quality_parser import parse_read_quality_from_fastp\nfrom proksee.reads import Reads\n\n\nclass ReadFilterer():\n \"\"\"\n A class for filtering reads.\n\n ATTRIBUTES\n reads (Reads): the reads to filter\n output_directory (str): the file location of the output directory for writing files\n \"\"\"\n\n LOGFILE_FILENAME = \"fastp.log\"\n FWD_FILTERED_FILENAME = \"fwd_filtered.fastq\"\n REV_FILTERED_FILENAME = \"rev_filtered.fastq\"\n\n def __init__(self, reads, output_directory):\n \"\"\"\n Initializes the read filterer.\n\n PARAMETERS\n reads (Reads): the reads to filter\n output_directory (str): the file location of the output directory for writing files\n \"\"\"\n\n self.reads = reads\n self.output_directory = output_directory\n\n def __build_fastp_command(self):\n \"\"\"\n Builds the command for running the FASTP program.\n\n RETURNS\n command (str): a string for running the FASTP program\n \"\"\"\n\n forward_reads = self.reads.forward\n reverse_reads = self.reads.reverse\n\n self.forward_filtered = os.path.join(self.output_directory, self.FWD_FILTERED_FILENAME)\n self.reverse_filtered = os.path.join(self.output_directory,\n self.REV_FILTERED_FILENAME) if reverse_reads else None\n json = os.path.join(self.output_directory, 'fastp.json')\n html = os.path.join(self.output_directory, 'fastp.html')\n\n '''Creating fastp command based on absence/presence of reverse read'''\n if reverse_reads is None:\n command = 'fastp -i ' + forward_reads + ' -o ' + \\\n self.forward_filtered + ' -j ' + json + ' -h ' + html\n else:\n command = 'fastp -i ' + forward_reads + ' -I ' + reverse_reads + \\\n ' -o ' + self.forward_filtered + ' -O ' + self.reverse_filtered + ' -j ' + json + ' -h ' + html\n\n return command\n\n def __run_fastp(self):\n \"\"\"\n Runs the FASTP program in order to perform filtering on reads.\n \"\"\"\n\n logfile_location = open(os.path.join(self.output_directory, self.LOGFILE_FILENAME), 'w+')\n command = self.__build_fastp_command()\n\n try:\n subprocess.check_call(command, shell=True, stderr=logfile_location)\n\n except subprocess.CalledProcessError as e:\n raise e\n\n filtered_reads = Reads(self.forward_filtered, self.reverse_filtered)\n return filtered_reads\n\n def filter_reads(self):\n \"\"\"\n Filters reads in order to improve their quality.\n\n RETURNS\n filtered_reads (Reads): the filtered reads\n\n POST\n The FASTP program will be run and related files will be written into the output directory.\n \"\"\"\n\n if not os.path.isfile(self.reads.forward):\n raise FileNotFoundError(\"Read input file not found: \" + str(self.reads.forward))\n\n if self.reads.reverse is not None and not os.path.isfile(self.reads.reverse):\n raise FileNotFoundError(\"Read input file not found: \" + str(self.reads.reverse))\n\n filtered_reads = self.__run_fastp()\n return filtered_reads\n\n def summarize_quality(self):\n \"\"\"\n Summarizes the quality of the filtered reads. This function should be run after filtering reads.\n\n RETURNS\n read_quality (ReadQuality): quality statistics of filtered reads\n \"\"\"\n\n json_file = os.path.join(self.output_directory, \"fastp.json\")\n\n if os.path.isfile(json_file):\n read_quality = parse_read_quality_from_fastp(json_file)\n\n return read_quality\n","repo_name":"proksee-project/proksee-cmd","sub_path":"proksee/read_filterer.py","file_name":"read_filterer.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"71"} +{"seq_id":"3886823342","text":"\"\"\"Test grib.\"\"\"\nimport json\n\nimport pytest\n\nfrom pysurfex.cache import Cache\nfrom pysurfex.datetime_utils import as_datetime\nfrom pysurfex.grib import Grib, Grib1Variable, Grib2Variable\nfrom pysurfex.read import ConvertedInput, Converter\n\n\n@pytest.fixture()\ndef converter_config(lambert_t2m_grib1, lambert_t1_grib2):\n config = {\n \"grib1\": {\"fcint\": 10800, \"file_inc\": 3600, \"offset\": 0},\n \"grib2\": {\"fcint\": 10800, \"file_inc\": 3600, \"offset\": 0},\n \"t2m\": {\n \"grib1\": {\n \"converter\": {\n \"none\": {\n \"parameter\": 11,\n \"type\": 105,\n \"level\": 2,\n \"tri\": 0,\n \"filepattern\": lambert_t2m_grib1,\n }\n }\n }\n },\n \"t1\": {\n \"grib2\": {\n \"converter\": {\n \"none\": {\n \"discipline\": 0,\n \"parameterCategory\": 0,\n \"parameterNumber\": 0,\n \"levelType\": 103,\n \"typeOfStatisticalProcessing\": -1,\n \"level\": 2,\n \"filepattern\": lambert_t1_grib2,\n }\n }\n }\n },\n }\n return config\n\n\ndef get_var(edition, conf):\n kwargs = conf[\"none\"]\n if edition == 1:\n parameter = kwargs[\"parameter\"]\n typ = kwargs[\"type\"]\n level = kwargs[\"level\"]\n tri = kwargs[\"tri\"]\n var = Grib1Variable(parameter, typ, level, tri)\n return var\n elif edition == 2:\n discipline = kwargs[\"discipline\"]\n parameter_category = kwargs[\"parameterCategory\"]\n parameter_number = kwargs[\"parameterNumber\"]\n level_type = kwargs[\"levelType\"]\n level = kwargs[\"level\"]\n type_of_statistical_processing = kwargs[\"typeOfStatisticalProcessing\"]\n var = Grib2Variable(\n discipline,\n parameter_category,\n parameter_number,\n level_type,\n level,\n type_of_statistical_processing,\n )\n return var\n\n\ndef write_json_file(fname, keys):\n with open(fname, mode=\"w\", encoding=\"utf-8\") as fhandler:\n json.dump(keys, fhandler)\n\n\n@pytest.mark.usefixtures(\"_mockers\")\ndef test_grib1_from_converter(converter_config, conf_proj_domain):\n \"\"\"Test grib1 from converter.\"\"\"\n # Grib 1\n fileformat = \"grib1\"\n var = \"t2m\"\n print(var, fileformat)\n defs = converter_config[fileformat]\n converter_conf = converter_config[var][fileformat][\"converter\"]\n\n var = get_var(1, converter_conf)\n validtime = as_datetime(\"2020111306\")\n cache = Cache(7200)\n initial_basetime = validtime\n converter = Converter(\"none\", initial_basetime, defs, converter_conf, fileformat)\n ConvertedInput(conf_proj_domain, var, converter).read_time_step(validtime, cache)\n\n\n@pytest.mark.usefixtures(\"_mockers\")\ndef test_grib2_from_converter(converter_config, conf_proj_domain):\n \"\"\"Test grib2 from converter.\"\"\"\n fileformat = \"grib2\"\n var = \"t1\"\n print(var, fileformat)\n defs = converter_config[fileformat]\n converter_conf = converter_config[var][fileformat][\"converter\"]\n\n var = get_var(2, converter_conf)\n validtime = as_datetime(\"2020111306\")\n cache = Cache(7200)\n initial_basetime = validtime\n converter = Converter(\"none\", initial_basetime, defs, converter_conf, fileformat)\n ConvertedInput(conf_proj_domain, var, converter).read_time_step(validtime, cache)\n\n\n@pytest.mark.usefixtures(\"_mockers\")\ndef test_read_rotated_ll_grib1(converter_config, rotated_ll_t2m_grib1):\n\n converter_conf = converter_config[\"t2m\"][\"grib1\"][\"converter\"]\n var = get_var(1, converter_conf)\n grib_file = Grib(rotated_ll_t2m_grib1)\n assert not var.is_accumulated()\n var.print_keys()\n validtime = as_datetime(\"2020111306\")\n grib_file.field(var, validtime)\n\n\n@pytest.mark.usefixtures(\"_mockers\")\ndef test_read_rotated_ll_grib2(converter_config, rotated_ll_t1_grib2):\n\n converter_conf = converter_config[\"t1\"][\"grib2\"][\"converter\"]\n var = get_var(2, converter_conf)\n grib_file = Grib(rotated_ll_t1_grib2)\n assert not var.is_accumulated()\n var.print_keys()\n validtime = as_datetime(\"2020111306\")\n grib_file.field(var, validtime)\n\n\n@pytest.mark.usefixtures(\"_mockers\")\ndef test_read_regular_ll_grib1(converter_config, regular_ll_t2m_grib1):\n\n converter_conf = converter_config[\"t2m\"][\"grib1\"][\"converter\"]\n var = get_var(1, converter_conf)\n\n grib_file = Grib(regular_ll_t2m_grib1)\n assert not var.is_accumulated()\n var.print_keys()\n validtime = as_datetime(\"2020111306\")\n grib_file.field(var, validtime)\n\n\n@pytest.mark.usefixtures(\"_mockers\")\ndef test_read_regular_ll_grib2(converter_config, regular_ll_t1_grib2):\n\n converter_conf = converter_config[\"t1\"][\"grib2\"][\"converter\"]\n var = get_var(2, converter_conf)\n\n grib_file = Grib(regular_ll_t1_grib2)\n assert not var.is_accumulated()\n var.print_keys()\n validtime = as_datetime(\"2020111306\")\n grib_file.field(var, validtime)\n","repo_name":"metno/pysurfex","sub_path":"tests/unit/test_grib.py","file_name":"test_grib.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"71"} +{"seq_id":"73189979108","text":"\"\"\"\nThis is temporary script to remove header line from parsed files and merge info in one giant file.\n\"\"\"\n# import libraries\n# import csv\nimport logging\nimport os\nfrom lib import my_env\nfrom os import listdir\n\n\ncfg = my_env.init_env(\"vdab\", __file__)\nlogging.info(\"Start Application\")\n\n# Set directories\nparsed_dir = cfg[\"LogFiles\"][\"parsed_dir\"]\nmerged_fn = cfg[\"LogFiles\"][\"merged_fn\"]\n\n# Get list of filenames in scandir, collect file full path names.\nwith open(merged_fn, \"w\", encoding='utf-8') as fout:\n fi = my_env.LoopInfo(\"MergeFiles\", 10)\n for f in listdir(parsed_dir):\n fi.info_loop()\n with open(os.path.join(parsed_dir, f), mode=\"r\", encoding='utf-8') as fin:\n for line in fin:\n fout.write(line)\n fi.end_loop()\nlogging.info(\"End Application\")\n","repo_name":"dirkhpe/vdcs","sub_path":"isamparsing/20_merge_logs.py","file_name":"20_merge_logs.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"31513440671","text":"\"\"\"\nThis module is responsible of indexing a corpus of text inside a folder\nThe filenames to index must be unique integers\n\nIt relies on the `shelve` module to provide a on-disk persistent dictionary\nLargely facilitating the indexing of the documents. As it's on disk, it only\nuse a very limited (and raisonnable) amount of memory. Once the computing of\nthe index is done, the `shelf` is reduce using (c)Pickle to specified file:\none for the dicitonary and one for the postings\n\nnote: The shelve version is now commented replaced by a in-memory version\nbecause according to the newest post on IVLE, in memory indexing is perfectly\nfine. In further versions, better on disk indexing (like SPIMI) could be interesting.\n\"\"\"\n#!/usr/bin/python\n\nimport getopt\nimport os\nimport shelve\nimport sys\nimport time\nimport re\nfrom tuple_type import Entry\nfrom typing import Dict, Iterable, List, Set, Union\n\nfrom nltk.stem import PorterStemmer\nfrom nltk.tokenize import sent_tokenize, word_tokenize\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nDELIMITER = '/'\nSTEMMER = PorterStemmer()\n\n\ndef get_file_list(directory: str) -> Iterable[int]:\n \"\"\"\n Create a sorted list of the files inside a directory\n\n *params*:\n - directory: The directory to list the FileExistsError\n *return*:\n - A sorted Iterable[int] containing the namefiles\n \"\"\"\n return sorted(map(int, os.listdir(directory)))\n\n\ndef generate_token(in_file: str) -> Set[str]:\n \"\"\"\n Generate the token for a specified file to be added in the Dictionary/Postings.\n Reads the file then apply tokenization (sentence, word), case folding and stemming\n\n *params*:\n - in_file: The file to generate the token for\n *return*:\n - A Set[str] of the unique element to be added\n \"\"\"\n with open(in_file, encoding=\"utf8\") as file:\n return {STEMMER.stem(w.lower()) for w in\n {word for sent in sent_tokenize(file.read()) # (re.sub(\"[-']\", \" \", file.read()))\n for word in word_tokenize(sent)}}\n\n\ndef add_token(dictionary: Union[shelve.Shelf, Dict[str, List[int]]], token: str, in_file: int):\n \"\"\"\n Add a specified token from a specified file in the in-memory dictionary\n\n *params*:\n - dictionary: The in-memory to add into\n - token: The token to add into the dictionary\n - in_file: The file to which belong the token\n \"\"\"\n if token not in dictionary:\n dictionary[token] = [in_file]\n else:\n temp = dictionary[token]\n temp.append(in_file)\n dictionary[token] = temp\n\n\ndef cleanup(tempFilename: str):\n \"\"\"\n cleanup the temporary shelf\n\n *params*:\n - tempFilename: The filename of the shelf\n \"\"\"\n try:\n os.remove(tempFilename + \".bak\")\n os.remove(tempFilename + \".dat\")\n os.remove(tempFilename + \".dir\")\n except:\n print(\"ERROR: Couldn't remove all Shelf files\")\n\n\ndef index(directory: str, dict_file: str, post_file: str):\n \"\"\"\n Core of the module. Index all the file of a specified directory into a couple\n of Dictionary and Postings. The Dictionary stores the list of all the document\n and of the entries (with their frequency, offset in the postings and size (in bytes))\n Whereas the Postings file stores the list of lists of documents. The postings are useless\n without the Dictonary.\n\n *params*:\n - directory: The directory containing the file to index\n - dict_file: The file that will contain the Dictionary\n - post_file: The file that will contain the Postings\n \"\"\"\n # tempFilename = 'tmp' + str(int(time.time()))\n # with shelve.open(tempFilename, flag=\"n\") as shelf:\n shelf = dict()\n file_list = get_file_list(directory) # [:10]\n shelf['__all__'] = file_list\n # Generate Dict\n for in_file in file_list:\n for token in generate_token(directory + str(in_file)):\n add_token(shelf, token, in_file)\n\n # Write Postings\n dictionary = dict()\n with open(post_file, mode=\"wb\") as postings_file:\n for key, value in shelf.items():\n offset = postings_file.tell()\n size = postings_file.write(pickle.dumps(value))\n dictionary[key] = Entry(len(value), offset, size)\n\n # Write Dictionary\n with open(dict_file, mode=\"wb\") as dictionary_file:\n pickle.dump(dictionary, dictionary_file)\n\n # cleanup(tempFilename)\n\n\ndef usage():\n \"\"\"\n Print the usage of `index`\n \"\"\"\n print(\"usage: \" +\n sys.argv[0] + \" -i directory-of-documents -d dictionary-file -p postings-file\")\n\n\ndef main():\n \"\"\"\n Main fonction of the module, check the argv and pass them to the index function\n \"\"\"\n directory = dict_file = post_file = None\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'i:d:p:')\n except getopt.GetoptError as err:\n usage()\n sys.exit(2)\n for o, a in opts:\n if o == '-i':\n directory = a\n elif o == '-d':\n dict_file = a\n elif o == '-p':\n post_file = a\n else:\n assert False, \"unhandled option\"\n if directory is None or dict_file is None or post_file is None:\n usage()\n sys.exit(2)\n\n if not directory.endswith(DELIMITER):\n directory += DELIMITER\n index(directory, dict_file, post_file)\n\nif __name__ == '__main__':\n main()\n","repo_name":"tvaucher/CS3245-InfoRetrieval","sub_path":"Assignment2/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":5383,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"} +{"seq_id":"6105780037","text":"#!/usr/bin/env python3\n\nimport locale\nimport curses\nimport curses.textpad\nimport collections\nimport os\nimport pickle\nimport sys\n\nfrom copy import deepcopy\n\n\nclass mapElement:\n pass\n\n\nclass mapEditor():\n\n kMapWinWidth = 20\n kMapWinHeight = 12\n mapWinWidth = 70\n mapWinHeight = 20\n kLowerTop = kMapWinHeight+4\n kScrollMargin = 2\n\n kDisplayCharacters = [\n # ---------- dungeon tiles ---------------\n ['.', 'D space/floor'], # 0 : space/floor\n [u\"\\u25c6\", 'D item'], # 1 : item = diamond\n # 2 : vertical line (door)\n [u\"\\u007C\", 'D vertical door'],\n # 3 : horizontal line (door)\n [u\"\\u2015\", 'D horizontal door'],\n [u\"\\u2588\", 'D wall'], # 4 : wall = solid block\n # ------------ outdoor tiles -------------\n [',', 'O grass'], # 5 : grass\n ['%', 'O sand'], # 6 : sand\n ['#', 'O stone path'], # 7 : stone path\n ['t', 'O small trees'], # 8 : small trees\n ['T', 'O large trees'], # 9 : large trees\n # 10 : small water\n ['w', 'O small water'],\n ['W', 'O large water'], # 11 : lg water\n # 12 : sm mountain\n ['^', 'O hills'],\n # 13 : lg mountain\n ['M', 'O mountains'],\n ['c', 'O village'], # 14 : village\n ['C', 'O castle'], # 15 : castle\n ['i', 'O inn'], # 16 : inn\n ['d', 'O dungeon'], # 17 : dungeon\n ['b', 'O bridge'] # 18 : bridge\n ]\n\n def setupEmptyMap(self):\n\n self.map = []\n self.feels = [\"\"]\n self.routines = [[0, 0, 0, 0, 0, 0, 0, 0]]\n self.copyMapElement = 0\n\n for y in range(self.mapWidth):\n self.map.append([])\n for x in range(self.mapHeight):\n newMapElement = mapElement()\n newMapElement.mapElementID = 4\n newMapElement.initiallyVisible = False\n newMapElement.impassable = True\n newMapElement.startOpcodeIndex = 0\n self.map[y].append(newMapElement)\n\n def __init__(self, outwin):\n\n self.stdscr = outwin\n\n self.cursorX = 0\n self.cursorY = 0\n self.originX = 0\n self.originY = 0\n self.startX = 0\n self.startY = 0\n\n self.mapWidth = 32\n self.mapHeight = 32\n self.currentFilename = sys.argv[1]\n\n\n curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)\n curses.init_pair(2, curses.COLOR_CYAN, curses.COLOR_BLACK)\n\n # init map structure\n self.setupEmptyMap()\n\n def resizeWindows(self):\n\n height,width = self.stdscr.getmaxyx()\n self.rawMapWinHeight = height-4\n self.rawMapWinWidth = width\n\n if self.rawMapWinHeight > self.mapHeight+2:\n self.rawMapWinHeight = self.mapHeight+2\n\n if self.rawMapWinWidth > self.mapWidth+2:\n self.rawMapWinWidth = self.mapWidth+2\n\n self.kMapWinWidth = self.rawMapWinWidth-2\n self.kMapWinHeight = self.rawMapWinHeight-2\n\n self.mapwin = self.stdscr.subwin(\n self.rawMapWinHeight, self.rawMapWinWidth, 1, 0)\n\n self.topwin = self.stdscr.subwin(\n 1, width,0,0\n )\n\n self.bottomwin = self.stdscr.subwin(\n 3, width, self.rawMapWinHeight+1,0\n )\n \n\n self.mapwin.keypad(True)\n\n\n# self.helpwin = self.stdscr.subwin(\n# self.kMapWinHeight+2, 0, 3, self.kMapWinWidth+4)\n\n# except:\n# curses.curs_set(0)\n# self.stdscr.addstr(\"No room for UI. Aborting.\\n\"\n# \"Please provide a screen with at least 80x24 characters.\\n\\n\"\n# \"- press any key -\")\n# self.stdscr.getch()\n# exit(127)\n\n\n def refreshStatus(self):\n e = self.getCurrentMapEntry()\n self.bottomwin.erase()\n self.topwin.move(0, 0)\n self.topwin.clrtoeol()\n self.topwin.addstr(0, 0, \"maped 1.0 <\")\n self.topwin.addstr(self.currentFilename)\n self.topwin.addstr(\"> \")\n self.topwin.noutrefresh()\n\n x = self.originX + self.cursorX - 1\n y = self.originY + self.cursorY - 1\n self.bottomwin.move(0,0)\n self.bottomwin.addstr(\"x,y: \"+str(x)+\",\"+str(y))\n self.bottomwin.addstr(\" startX,Y: \"+str(self.startX)+\",\"+str(self.startY))\n self.bottomwin.addstr(\" imp: \"+str(e.impassable))\n self.bottomwin.noutrefresh()\n\n def checkScrollMap(self):\n if self.cursorX > self.kMapWinWidth-self.kScrollMargin:\n if self.originX + self.kMapWinWidth < self.mapWidth:\n self.originX += 1\n self.cursorX -= 1\n if self.cursorY > self.kMapWinHeight-self.kScrollMargin:\n if self.originY + self.kMapWinHeight < self.mapHeight:\n self.originY += 1\n self.cursorY -= 1\n if self.cursorX < self.kScrollMargin+1:\n if self.originX > 0:\n self.originX -= 1\n self.cursorX += 1\n if self.cursorY < self.kScrollMargin+1:\n if self.originY > 0:\n self.originY -= 1\n self.cursorY += 1\n\n def checkBounds(self):\n if self.cursorX < 1:\n self.cursorX = 1\n elif self.cursorX > self.kMapWinWidth:\n self.cursorX = self.kMapWinWidth\n if self.cursorY < 1:\n self.cursorY = 1\n elif self.cursorY > self.kMapWinHeight:\n self.cursorY = self.kMapWinHeight\n\n def refreshMap(self):\n for x in range(self.kMapWinWidth):\n for y in range(self.kMapWinHeight):\n gX = x + self.originX\n gY = y + self.originY\n if gX 0:\n e.mapElementID -= 1\n\n def copyElem(self):\n self.copyMapElement = deepcopy(self.getCurrentMapEntry())\n\n def pasteElem(self):\n if self.copyMapElement != 0:\n self.getCurrentMapEntry().mapElementID = self.copyMapElement.mapElementID\n self.getCurrentMapEntry().initiallyVisible = self.copyMapElement.initiallyVisible\n self.getCurrentMapEntry().impassable = self.copyMapElement.impassable\n self.cursorX += 1\n\n def goto(self):\n self.bottomwin.erase()\n self.bottomwin.move(0, 0)\n inCoords = self.getUserInput(\"goto (x,y):\")\n try:\n splitInCoords = inCoords.decode().split(',') # python3, you SUCK!\n self.cursorX = int(splitInCoords[0])+1\n self.cursorY = int(splitInCoords[1])+1\n self.bottomwin.clear()\n except:\n pass\n\n def loadMap(self):\n loadFilename = self.currentFilename\n infile = open(loadFilename, \"br\")\n mdata = pickle.load(infile)\n self.mapWidth = mdata[\"width\"]\n self.mapHeight = mdata[\"height\"]\n self.startX = mdata[\"startX\"]\n self.startY = mdata[\"startY\"]\n self.map = mdata[\"map\"]\n self.resizeWindows()\n infile.close()\n\n def loadOrCreateMap(self):\n if (os.path.exists(self.currentFilename)):\n self.loadMap()\n else:\n self.newMap()\n\n def _saveMap(self, fname=\"\"):\n if (fname):\n saveFilename = fname\n else:\n saveFilename = self.getUserInput(\"Save file:\")\n saveFilename = self.currentFilename\n self.stdscr.addstr(\"\\nSaving...\")\n mdata = {\n \"width\": self.mapWidth,\n \"height\": self.mapHeight,\n \"startX\": self.startX,\n \"startY\": self.startY,\n \"map\": self.map,\n }\n outfile = open(saveFilename, \"bw\")\n pickle.dump(mdata, outfile)\n outfile.close()\n self.bottomwin.clwar()\n self.bottomwin.addstr(0, 0,\n \"\\nfile saved.\\n- press any key -\")\n self.bottomwin.refresh()\n self.bottomwin.getch()\n self.redrawStdEditorScreen()\n\n def saveMap(self):\n if (self.currentFilename):\n self._saveMap(self.currentFilename)\n else:\n self._saveMap(\"\")\n\n def saveMapAs(self):\n self._saveMap(\"\")\n\n def fillMap(self):\n elem = self.getCurrentMapEntry()\n for y in range(self.mapWidth):\n for x in range(self.mapHeight):\n self.map[y][x].initiallyVisible = elem.initiallyVisible\n self.map[y][x].mapElementID = elem.mapElementID\n self.map[y][x].impassable = elem.impassable\n self.map[y][x].startOpcodeIndex = elem.startOpcodeIndex\n\n def trimMap(self):\n width = 0\n height = 0\n while (width < 16 or width > self.mapWidth or height < 16 or height > self.mapHeight):\n self.stdscr.erase()\n self.stdscr.addstr(\"\\n\\n** trim map **\\n\")\n self.stdscr.addstr(\"new width (16-\"+str(self.mapWidth)+\"): \")\n self.stdscr.refresh()\n curses.echo()\n width = int(self.stdscr.getstr(4))\n self.stdscr.addstr(\"new height (16-\"+str(self.mapHeight)+\"): \")\n height = int(self.stdscr.getstr(4))\n curses.noecho()\n\n newMap = []\n for y in range(width):\n newRow = []\n for x in range(height):\n newRow.append(self.map[y][x])\n newMap.append(newRow)\n self.map = newMap\n self.mapHeight = height\n self.mapWidth = width\n self.redrawStdEditorScreen()\n\n def newMap(self):\n width = 0\n height = 0\n while (width < 16 or width > 128 or height < 16 or height > 128):\n self.stdscr.erase()\n self.stdscr.addstr(\"\\n\\n** new map \\\"\" +\n self.currentFilename+\"\\\" **\\n\")\n self.stdscr.addstr(\"Width (16-128): \")\n self.stdscr.refresh()\n curses.echo()\n try:\n width = int(self.stdscr.getstr(4))\n self.stdscr.addstr(\"Height (16-128): \")\n height = int(self.stdscr.getstr(4))\n except:\n curses.noecho()\n self.stdscr.addstr(\"-- aborted. press any key --\")\n self.stdscr.getch()\n exit(0)\n curses.noecho()\n self.mapHeight = height\n self.mapWidth = width\n self.setupEmptyMap()\n self.resizeWindows()\n\n\n def userStartup(self):\n self.stdscr.erase()\n self.stdscr.addstr(\"### maped v0.1a ###\\n\"\n \"stephan kleinert, 7turtles software, 2019\\n\\n\"\n \"n)ew map or l)oad existing map?\")\n self.stdscr.refresh()\n choice = \"\"\n while choice != ord('l') and choice != ord('n'):\n choice = self.stdscr.getch()\n if choice == ord('l'):\n self.loadMap()\n else:\n self.newMap()\n\n def redrawStdEditorScreen(self):\n self.stdscr.erase()\n self.mapwin.border()\n self.mapwin.addstr(0, 1, \"Map\")\n #self.showHelp()\n\n def showHelp(self):\n self.helpwin.erase()\n self.helpwin.addstr(\"moves:\\n\"\n \"[c] copy [SPC] paste [+/-] inc/dec \\n\"\n \"toggles:\\n\"\n \"[g] impassable [v] visible\\n\"\n \"io:\\n\"\n \"[l] load [s] save [S] saveAs\\n\"\n \"misc:\\n\"\n \"[p] start pos [F] fill [G] goto\")\n self.helpwin.refresh()\n\n def runEditor(self):\n\n edcmds = {\n 259: self.cursorUp,\n 258: self.cursorDown,\n 260: self.cursorLeft,\n 261: self.cursorRight,\n 10: self.nextLine,\n '+': self.increaseCurrentElementID,\n '-': self.decreaseCurrentElementID,\n ' ': self.pasteElem,\n 'c': self.copyElem,\n 's': self.saveMap,\n 'S': self.saveMapAs,\n 'l': self.loadMap,\n 'v': self.toggleInitiallyVisible,\n 'g': self.toggleImpassable,\n 'p': self.setStartPosition,\n 't': self.trimMap,\n 'F': self.fillMap,\n 'G': self.goto\n }\n\n stopEd = 0\n self.cursorX = 1\n self.cursorY = 1\n\n self.loadOrCreateMap()\n self.redrawStdEditorScreen()\n\n while 0 == stopEd:\n self.refreshMap()\n self.refreshStatus()\n self.mapwin.move(self.cursorY, self.cursorX)\n elem = self.kDisplayCharacters[self.getCurrentMapEntry(\n ).mapElementID]\n elemChar = elem[0]\n elemDesc = elem[1]\n self.bottomwin.move(1, 0)\n self.bottomwin.clrtoeol()\n self.bottomwin.addstr(1, 0, '>'+elemChar+'< '+elemDesc)\n self.bottomwin.noutrefresh()\n curses.doupdate()\n\n c = self.mapwin.getch()\n if c== curses.KEY_RESIZE:\n self.resizeWindows()\n self.redrawStdEditorScreen()\n else:\n func = edcmds.get(c, 0)\n if (func == 0):\n func = edcmds.get(chr(c), 0)\n if (func != 0):\n func()\n self.checkScrollMap()\n self.checkBounds()\n # self.stdscr.addstr(18, 0, str(c))\n pass\n\n\ndef runMaped(aWin):\n myMaped = mapEditor(aWin)\n myMaped.runEditor()\n\n\nif len(sys.argv) != 2:\n print(\"usage: \"+sys.argv[0]+\" \")\n sys.exit(127)\n\nlocale.setlocale(locale.LC_ALL, '')\ncurses.wrapper(runMaped)\n","repo_name":"steph72/dragonrock-mega65","sub_path":"tools/maped.py","file_name":"maped.py","file_ext":"py","file_size_in_byte":15827,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"} +{"seq_id":"10997824795","text":"from typing import List\nfrom topsdk.client import BaseRequest\nfrom topsdk.util import convert_struct_list,convert_basic_list,convert_struct,convert_basic\nfrom datetime import datetime\n\n\nclass TaobaoTmcMessagesConsumeRequest(BaseRequest):\n\n def __init__(\n self,\n group_name: str = None,\n quantity: int = None\n ):\n \"\"\"\n 用户分组名称,不传表示消费默认分组,如果应用没有设置用户分组,传入分组名称将会返回错误\n \"\"\"\n self._group_name = group_name\n \"\"\"\n 每次批量消费消息的条数,最小值:10;最大值:200\n \"\"\"\n self._quantity = quantity\n\n @property\n def group_name(self):\n return self._group_name\n\n @group_name.setter\n def group_name(self, group_name):\n if isinstance(group_name, str):\n self._group_name = group_name\n else:\n raise TypeError(\"group_name must be str\")\n\n @property\n def quantity(self):\n return self._quantity\n\n @quantity.setter\n def quantity(self, quantity):\n if isinstance(quantity, int):\n self._quantity = quantity\n else:\n raise TypeError(\"quantity must be int\")\n\n\n def get_api_name(self):\n return \"taobao.tmc.messages.consume\"\n\n def to_dict(self):\n request_dict = {}\n if self._group_name is not None:\n request_dict[\"group_name\"] = convert_basic(self._group_name)\n\n if self._quantity is not None:\n request_dict[\"quantity\"] = convert_basic(self._quantity)\n\n return request_dict\n\n def get_file_param_dict(self):\n file_param_dict = {}\n return file_param_dict\n\n","repo_name":"LIANGCYRUS/TopApiSite","sub_path":"apps/topsdk/ability132/request/taobao_tmc_messages_consume_request.py","file_name":"taobao_tmc_messages_consume_request.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"19285265565","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nimport mne\nfrom mne.channels import read_vectorview_selection\nfrom mne.datasets import sample\nfrom mne.minimum_norm import apply_inverse, apply_inverse_epochs, make_inverse_operator\n\ndata_path = sample.data_path()\nmeg_path = data_path / \"MEG\" / \"sample\"\nraw_fname = meg_path / \"sample_audvis_raw.fif\"\ncov_fname = meg_path / \"sample_audvis-shrunk-cov.fif\"\nbem_dir = data_path / \"subjects\" / \"sample\" / \"bem\"\nbem_fname = bem_dir / \"sample-5120-5120-5120-bem-sol.fif\"\n\n###############################################################################\n# Read the MEG data from the audvis experiment. Make epochs and evokeds for the\n# left and right auditory conditions.\nraw = mne.io.read_raw_fif(raw_fname)\nraw = raw.pick(picks=[\"meg\", \"eog\", \"stim\"])\ninfo = raw.info\n\n# Create epochs for auditory events\nevents = mne.find_events(raw)\nevent_id = dict(right=1, left=2)\nepochs = mne.Epochs(\n raw,\n events,\n event_id,\n tmin=-0.1,\n tmax=0.3,\n baseline=(None, 0),\n reject=dict(mag=4e-12, grad=4000e-13, eog=150e-6),\n)\n\n# Create evokeds for left and right auditory stimulation\nevoked_left = epochs[\"left\"].average()\nevoked_right = epochs[\"right\"].average()\n\n###############################################################################\n# Guided dipole modeling, meaning fitting dipoles to a manually selected subset\n# of sensors as a manually chosen time, can now be performed in MEGINs XFit on\n# the evokeds we computed above. However, it is possible to do it completely\n# in MNE-Python.\n\n# Setup conductor model\ncov = mne.read_cov(cov_fname) # bad channels were already excluded here\nbem = mne.read_bem_solution(bem_fname)\n\n# Fit two dipoles at t=80ms. The first dipole is fitted using only the sensors\n# on the left side of the helmet. The second dipole is fitted using only the\n# sensors on the right side of the helmet.\npicks_left = read_vectorview_selection(\"Left\", info=info)\nevoked_fit_left = evoked_left.copy().crop(0.08, 0.08)\nevoked_fit_left.pick(picks_left)\ncov_fit_left = cov.copy().pick_channels(picks_left, ordered=True)\n\npicks_right = read_vectorview_selection(\"Right\", info=info)\npicks_right = list(set(picks_right) - set(info[\"bads\"]))\nevoked_fit_right = evoked_right.copy().crop(0.08, 0.08)\nevoked_fit_right.pick(picks_right)\ncov_fit_right = cov.copy().pick_channels(picks_right, ordered=True)\n\n# Any SSS projections that are active on this data need to be re-normalized\n# after picking channels.\nevoked_fit_left.info.normalize_proj()\nevoked_fit_right.info.normalize_proj()\ncov_fit_left[\"projs\"] = evoked_fit_left.info[\"projs\"]\ncov_fit_right[\"projs\"] = evoked_fit_right.info[\"projs\"]\n\n# Fit the dipoles with the subset of sensors.\ndip_left, _ = mne.fit_dipole(evoked_fit_left, cov_fit_left, bem)\ndip_right, _ = mne.fit_dipole(evoked_fit_right, cov_fit_right, bem)\n\n###############################################################################\n# Now that we have the location and orientations of the dipoles, compute the\n# full timecourses using MNE, assigning activity to both dipoles at the same\n# time while preventing leakage between the two. We use a very low ``lambda``\n# value to ensure both dipoles are fully used.\n\nfwd, _ = mne.make_forward_dipole([dip_left, dip_right], bem, info)\n\n# Apply MNE inverse\ninv = make_inverse_operator(info, fwd, cov, fixed=True, depth=0)\nstc_left = apply_inverse(evoked_left, inv, method=\"MNE\", lambda2=1e-6)\nstc_right = apply_inverse(evoked_right, inv, method=\"MNE\", lambda2=1e-6)\n\n# Plot the timecourses of the resulting source estimate\nfig, axes = plt.subplots(nrows=2, sharex=True, sharey=True)\naxes[0].plot(stc_left.times, stc_left.data.T)\naxes[0].set_title(\"Left auditory stimulation\")\naxes[0].legend([\"Dipole 1\", \"Dipole 2\"])\naxes[1].plot(stc_right.times, stc_right.data.T)\naxes[1].set_title(\"Right auditory stimulation\")\naxes[1].set_xlabel(\"Time (s)\")\nfig.supylabel(\"Dipole amplitude\")\n\n###############################################################################\n# We can also fit the timecourses to single epochs. Here, we do it for each\n# experimental condition separately.\n\nstcs_left = apply_inverse_epochs(epochs[\"left\"], inv, lambda2=1e-6, method=\"MNE\")\nstcs_right = apply_inverse_epochs(epochs[\"right\"], inv, lambda2=1e-6, method=\"MNE\")\n\n###############################################################################\n# To summarize and visualize the single-epoch dipole amplitudes, we will create\n# a detailed plot of the mean amplitude of the dipoles during different\n# experimental conditions.\n\n# Summarize the single epoch timecourses by computing the mean amplitude from\n# 60-90ms.\namplitudes_left = []\namplitudes_right = []\nfor stc in stcs_left:\n amplitudes_left.append(stc.crop(0.06, 0.09).mean().data)\nfor stc in stcs_right:\n amplitudes_right.append(stc.crop(0.06, 0.09).mean().data)\namplitudes = np.vstack([amplitudes_left, amplitudes_right])\n\n# Visualize the epoch-by-epoch dipole ampltudes in a detailed figure.\nn = len(amplitudes)\nn_left = len(amplitudes_left)\nmean_left = np.mean(amplitudes_left, axis=0)\nmean_right = np.mean(amplitudes_right, axis=0)\n\nfig, ax = plt.subplots(figsize=(8, 4))\nax.scatter(np.arange(n), amplitudes[:, 0], label=\"Dipole 1\")\nax.scatter(np.arange(n), amplitudes[:, 1], label=\"Dipole 2\")\ntransition_point = n_left - 0.5\nax.plot([0, transition_point], [mean_left[0], mean_left[0]], color=\"C0\")\nax.plot([0, transition_point], [mean_left[1], mean_left[1]], color=\"C1\")\nax.plot([transition_point, n], [mean_right[0], mean_right[0]], color=\"C0\")\nax.plot([transition_point, n], [mean_right[1], mean_right[1]], color=\"C1\")\nax.axvline(transition_point, color=\"black\")\nax.set_xlabel(\"Epochs\")\nax.set_ylabel(\"Dipole amplitude\")\nax.legend()\nfig.suptitle(\"Single epoch dipole amplitudes\")\nfig.text(0.30, 0.9, \"Left auditory stimulation\", ha=\"center\")\nfig.text(0.70, 0.9, \"Right auditory stimulation\", ha=\"center\")\n","repo_name":"mne-tools/mne-python","sub_path":"examples/inverse/multi_dipole_model.py","file_name":"multi_dipole_model.py","file_ext":"py","file_size_in_byte":5888,"program_lang":"python","lang":"en","doc_type":"code","stars":2405,"dataset":"github-code","pt":"71"} +{"seq_id":"41205774290","text":"from typing import Literal\n\nfrom pydantic import Field\nfrom pydantic.functional_validators import AfterValidator\nfrom typing_extensions import Annotated\n\n\ndef aaa_group_prefix(v: str) -> str:\n \"\"\"Prefix the AAA method with 'group' if it is known\"\"\"\n built_in_methods = [\"local\", \"none\", \"logging\"]\n return f\"group {v}\" if v not in built_in_methods and not v.startswith(\"group \") else v\n\n\nAAAAuthMethod = Annotated[str, AfterValidator(aaa_group_prefix)]\nVlan = Annotated[int, Field(ge=0, le=4094)]\nVni = Annotated[int, Field(ge=1, le=16777215)]\nTestStatus = Literal[\"unset\", \"success\", \"failure\", \"error\", \"skipped\"]\nInterface = Annotated[str, Field(pattern=r\"^(Ethernet|Fabric|Loopback|Management|Port-Channel|Tunnel|Vlan|Vxlan)[0-9]+(\\/[0-9]+)*(\\.[0-9]+)?$\")]\nAfi = Literal[\"ipv4\", \"ipv6\", \"vpn-ipv4\", \"vpn-ipv6\", \"evpn\", \"rt-membership\"]\nSafi = Literal[\"unicast\", \"multicast\", \"labeled-unicast\"]\n","repo_name":"arista-netdevops-community/anta","sub_path":"anta/custom_types.py","file_name":"custom_types.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"71"} +{"seq_id":"70230164390","text":"\"\"\"\nDesenvolva um programa que leia o comprimento de três retas e\ndiga ao usuário se elas podem ou não formar um triângulo.\n\"\"\"\n\n\nfrom time import sleep\nprint('Informe 3 segmentos de retas abaixo: ')\nr1 = float(input('Digite o 1° valor: '))\nr2 = float(input('Digite o 2° valor: '))\nr3 = float(input('Digite o 3° valor: '))\nprint('\\033[1;33mANALISANDO...\\033[m')\nsleep(1)\nif r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:\n print('\\033[4;35mAs três retas podem formar um Triângulo.\\033[m')\nelse:\n print('\\033[4;31mNão é possível formar um triângulo.')\n","repo_name":"m-suelen/exPython-CursoEmVideo","sub_path":"mundo1/ex035.py","file_name":"ex035.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"38551704712","text":"import pandas as pd\nimport numpy as np\nfrom utils.prep_dataset import clear_city\n\n# для каждого заказа считаем цену товара только \ndef base_info_check(df, window='14d'):\n ind = []\n # collect information about every order (в один день несколько заказо от одного юезра)\n check_info_order = pd.DataFrame(df\\\n .groupby(['user_id', 'НомерЗаказаНаСайте', 'date'])\\\n .sum()[['Количество', 'Цена']])\\\n .reset_index()[['user_id', 'Цена', 'Количество', 'date']]\\\n \n check_info_order = check_info_order\\\n .rename(columns={'Цена': 'Cумма_в_чеке', 'Количество': 'Количество_товаров_в_чеке'})\n \n # считаем траты за день\n check_info_order = check_info_order.groupby(['user_id', 'date']).sum().reset_index()\n \n # в начале считаем окном \n # от значений полученных окном берем статистики \n check_info_order = check_info_order.groupby('user_id')\\\n .rolling(window = window, on = 'date')\\\n .mean().reset_index(1, drop=True)\\\n .groupby('user_id')\\\n .agg(['mean', lambda x: np.std(x, ddof=0)])\n \n multi_index = check_info_order.columns\n ind.append('user_id')\n for pair in multi_index:\n i_col = pair[0] + '_' + pair[1] \n ind.append(i_col)\n \n check_info = pd.DataFrame(data =np.array(check_info_order.reset_index()))\n check_info = check_info.set_axis(ind, axis=1) # final result/\n # check_info = check_info.fillna(0)\n \n check_info = check_info\\\n .rename(columns={'Cумма_в_чеке_mean': 'mean_amt_order', 'Количество_товаров_в_чеке_mean': 'mean_qty_item_in_order',\\\n 'Cумма_в_чеке_': 'std_amt_order', 'Количество_товаров_в_чеке_': \\\n 'std_qty_item_in_order'})\n check_info = check_info[['user_id','mean_amt_order', 'mean_qty_item_in_order', 'std_amt_order', \\\n 'std_qty_item_in_order']]\n\n return check_info\n\ndef detect_sex(df):\n female_name, male_name = [], []\n with open(\"../russian_name/female_names_rus.txt\", \"r\") as f:\n for line in f.readlines():\n female_name.append(line[:-1])\n \n with open(\"../russian_name/male_names_rus.txt\", \"r\") as f:\n for line in f.readlines():\n male_name.append(line[:-1])\n \n sex = pd.DataFrame({'name': female_name, 'sex': [1]*len(female_name)})\n sex = pd.concat([sex, pd.DataFrame({'name': male_name, 'sex': [-1]*len(male_name)})])\n sex = sex[['name', 'sex']].drop_duplicates() \n \n df = df[['user_id', 'Клиент']].drop_duplicates() \n df = df[['user_id', 'Клиент']].drop_duplicates() \n a = df[['user_id', 'Клиент']].groupby('user_id').count() > 1\n user_id = a[a.Клиент == True].reset_index().user_id.values\n df = df[~df.user_id.isin(user_id)]\n df = df.merge(sex, left_on='Клиент', right_on='name', how='left')\n df = df.drop(['name', 'Клиент'], axis = 1)\n df = df.drop_duplicates() \n df = df.dropna(thresh = 2)\n return df\n\ndef add_stat(df):\n \n info_city_stat = pd.read_csv('../stat_data/info_city.csv')[['address', 'dolgota', 'население', 'зп новое 2014']]\n info_region = pd.read_csv('../stat_data/region.csv')\n \n df = df[['user_id', 'city']].drop_duplicates()\n info_city_stat['dolgota'] = info_city_stat['dolgota'].apply(lambda x: clear_city(x, 2))\n info_city_stat = info_city_stat.rename(columns={'dolgota': 'city'})\n info_region = info_region.rename(columns={'Город': 'city'})\n\n info_city_stat = pd.merge(info_city_stat, info_region, on=['city'], how='left')\\\n [['address', 'население', 'city','зп новое 2014', 'Регион']]\n info_city_stat = info_city_stat.rename(columns={'Регион': 'region'})\n info_city_stat = info_city_stat.rename(columns={'dolgota': 'city'})\n\n client_region = pd.merge(df, info_city_stat, on=['city'], how='left')\\\n [['user_id', 'region', 'население', 'зп новое 2014']]\n \n a = client_region[['user_id', 'region']].groupby('user_id').count() > 1\n user_id = a[a.region == True].reset_index().user_id.values\n client_region = client_region[~client_region.user_id.isin(user_id)]\n \n client_region['население'] = client_region.apply(lambda x: np.nan if x['население'] == '#Н/Д'\\\n else x['население'],axis = 1)\n client_region = client_region.dropna(thresh = 2)\n client_region = client_region.drop_duplicates() \n return client_region\n\ndef share_group2(df):\n ind_share, to_drop_column = [], []\n \n df = df.pivot_table(index='user_id', columns='Группа2', \\\n values=['Количество'], aggfunc=['sum'])\n multi_index = df.columns\n ind_share.append('user_id')\n for pair in multi_index:\n i_col = pair[0] + '_' + pair[1] + '_' + pair[2]\n ind_share.append(i_col)\n\n category_info = pd.DataFrame(data = np.array(df.reset_index()))\n category_info = category_info.set_axis(ind_share, axis=1) # final result/\n category_info = category_info.fillna(0)\n \n category_info['sum'] = category_info.loc[:, category_info.columns != 'user_id'].sum(axis=1)\n \n columns_actual = [i_name for i_name in ind_share if i_name.find('sum_Количество') != -1]\n for i_name_col in columns_actual:\n start = i_name_col.find('о_') + 2\n new_name = 'share_' + i_name_col[start:]\n to_drop_column.append(i_name_col)\n category_info[new_name] = category_info[i_name_col] / category_info['sum']\n \n category_info = category_info.drop(columns=to_drop_column)\n return category_info\n\n# для формирования этих признаков не важна дата и поэтому не зависит от разбиения \ndef stat_features(df, users):\n sex = detect_sex(df)\n stat = add_stat(df)\n \n # # результут сегменатации\n segment = pd.read_csv('../segment_model/user_segmentation.csv', index_col=0)\n \n stat_user_features = users.merge(sex,\n on=['user_id'],\n how='left')\\\n .merge(stat,\n on=['user_id'],\n how='left')\\\n .merge(segment,\n on=['user_id'],\n how='left')\n # preprocessing\n stat_item_features = df[['item_id', 'Группа2', 'Тип']].drop_duplicates()\n \n stat_user_features['население'] = stat_user_features['население'].apply(lambda x: x.replace(u'\\xa0', '') if x is not np.nan\n else x)\n stat_user_features['население'] = stat_user_features['население'].apply(lambda x: int(x) if x is not np.nan\n else x)\n stat_user_features['зп новое 2014'] = stat_user_features['зп новое 2014'].apply(lambda x: x.replace(u'\\xa0', '') if x is not np.nan\n else x)\n stat_user_features['зп новое 2014'] = stat_user_features['зп новое 2014'].apply(lambda x: float(x) if x is not np.nan\n else x)\n \n stat_user_features['sex'] = stat_user_features['sex'].fillna(0)\n stat_user_features = pd.get_dummies(stat_user_features, columns=['sex'], drop_first=True)\n stat_user_features = pd.get_dummies(stat_user_features, columns=['region'], drop_first=True)\n stat_item_features = pd.get_dummies(stat_item_features, columns=['Группа2'], drop_first=True)\n stat_item_features = pd.get_dummies(stat_item_features, columns=['Тип'], drop_first=True)\n stat_user_features = pd.get_dummies(stat_user_features, columns=['segment'], drop_first=True)\n \n stat_user_features = stat_user_features\\\n .rename(columns={'зп новое 2014': 'Зарплата'})\n \n return stat_user_features, stat_item_features\n\n# количество прошедших дней между покупкой\ndef qty_last_dt(group):\n if len(group) == 1:\n return 0\n else:\n group = group.sort_values(by='date', ascending=False)\n day = (group.iloc[0,-1] - group.iloc[1,-1]).days\n return int(day)","repo_name":"maria-farfan/2lvl_recommendation_system","sub_path":"utils/.ipynb_checkpoints/create_features-checkpoint.py","file_name":"create_features-checkpoint.py","file_ext":"py","file_size_in_byte":8882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"3557446106","text":"class Solution:\n def XXX(self, s: str) -> int:\n dic = {'I':1, 'V':5, 'X':10, 'L':50, 'C':100, 'D':500, 'M':1000}\n pre = 1001\n out = 0\n for ch in s:\n if dic[ch]<=pre:\n out+=dic[ch]\n else:\n out = out+dic[ch]-2*pre\n pre = dic[ch]\n return out\n\n","repo_name":"kkcookies99/UAST","sub_path":"Dataset/Leetcode/valid/13/250.py","file_name":"250.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"71"} +{"seq_id":"33790087110","text":"from .track import Track\nimport numpy as np\n\n\nclass FastTrack(Track):\n\n def __init__(self, track_dict: dict):\n '''\n As long as the track is only composed of constant\n curvature elements, we can accelerate computations\n with this class.\n '''\n super().__init__(track_dict)\n num_elems = len(self.track_elems)\n self.num_elems = num_elems\n self.curves = np.zeros((num_elems, 3), dtype='float')\n # precompute curvatures\n dist = 0.0\n for idx, elem in enumerate(self.track_elems):\n elem_len = elem.get_length()\n elem_start = dist\n elem_end = dist + elem_len\n crve = elem.get_curvature(0.5*elem_len)\n self.curves[idx, :] = [crve, elem_start, elem_end]\n dist += elem_len\n\n def get_curvature(self, dist: np.ndarray):\n '''\n Vectorised computation of curvature\n '''\n result = np.zeros_like(dist)\n curvatures = self.curves[:, 0]\n start_dists = self.curves[:, 1]\n end_dists = self.curves[:, 2]\n for elem_idx in range(self.num_elems):\n starts_after = np.array(dist >= start_dists[elem_idx], dtype='float')\n end_before = np.array(dist < end_dists[elem_idx], dtype='float')\n result += curvatures[elem_idx]*starts_after*end_before\n return result\n","repo_name":"Hoeppke/pyracetrack","sub_path":"pyracetrack/track_fast.py","file_name":"track_fast.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"20820440404","text":"import argparse\nfrom typing import Dict, Type\n\nimport tomli\n\nfrom envira.environment import EXEC_INFO, SYS_INFO, Environment, set_config_file\nfrom envira.git import GitLoader\nfrom envira.providers import BaseProvider, get_providers\nfrom envira.utils import is_path, is_url\n\n\nclass Configurator:\n env: Environment\n providers: Dict[str, Type[BaseProvider]]\n\n def __init__(self, env: Environment) -> None:\n self.env = env\n\n self._prepare_providers()\n\n def load(self, force: bool = False):\n with open(self.env.config_path, \"r\") as f: # type: ignore\n prepared_raw_data = self._unfold_macro(f.read())\n conf_obj = tomli.loads(prepared_raw_data)\n\n for section in self.providers:\n if section in conf_obj:\n provider = self.providers[section](conf_obj[section])\n\n if provider.apply(force=force, env=self.env):\n return\n\n def _prepare_providers(self) -> None:\n self.providers = {\n provider.section_key: provider\n for provider in sorted(get_providers(), key=lambda x: x.priority) # type: ignore\n }\n\n @staticmethod\n def _unfold_macro(raw_data: str):\n return (\n raw_data.replace(\"${distr_name}\", SYS_INFO.id_)\n .replace(\"${distr_ver}\", SYS_INFO.version)\n .replace(\"${user}\", EXEC_INFO.uname)\n .replace(\"${home}\", EXEC_INFO.uhome)\n )\n\n\ndef prepare_args() -> argparse.Namespace:\n argparser = argparse.ArgumentParser(prog=\"envira\")\n\n argparser.add_argument(\n \"path_or_url\",\n type=str,\n help=\"Path to folder or remote repository with configuration\",\n )\n\n argparser.add_argument(\n \"-c\", \"--config-name\", type=str, help=\"Set envira config file name\"\n )\n\n argparser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Allow envira fix and overwrite dirs and files\",\n )\n\n return argparser.parse_args()\n\n\ndef main() -> int:\n if EXEC_INFO.euid != 0:\n print(\n \"You need to have root privileges to run this script.\\n\"\n \"Please try again, this time using 'sudo'. Exiting.\"\n )\n return 1\n\n if SYS_INFO.os != \"linux\":\n print(f\"Platform {SYS_INFO.os} not currently supported!\")\n return 1\n\n args = prepare_args()\n\n if args.config_name:\n set_config_file(args.config_name)\n\n if is_url(args.path_or_url):\n git_loader = GitLoader(args.path_or_url)\n git_loader.clone()\n\n if not git_loader.is_cloned:\n print(\"Cloning failed!\")\n return 1\n\n path = git_loader.folder_path\n\n elif is_path(args.path_or_url):\n path = EXEC_INFO.exc_path / args.path_or_url\n\n else:\n print(\"Invalid URL or path!\")\n return 1\n\n environment = Environment(path)\n\n if environment.prepared:\n configurator = Configurator(environment)\n configurator.load(force=args.force)\n\n else:\n print(\"Failed to prepare an environment!\")\n return 1\n\n return 0\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.exit(main())\n","repo_name":"wallseat/envira","sub_path":"src/envira/configurator.py","file_name":"configurator.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"38098974836","text":"# p6_2.py\ndef discount(price, rate):\n final_price = price * rate\n return final_price\n\nold_price = float(input('请输入原价:'))\nrate = float(input('请输入折扣率:'))\nnew_price = discount(old_price, rate)\n\nprint('打折后价格是:', new_price)\nprint('试图在函数外部访问局部变量final_price的值:%.2f' % final_price)\n","repo_name":"WhiteSheep-y/Python","sub_path":"源代码/p6/p6_2.py","file_name":"p6_2.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"70962810150","text":"import sys\n\nfin = open('shuffle.in', 'r')\nfout = open('shuffle.out', 'w')\n\n#I got confused with initial and final shuffle values\nRow_num = int(fin.readline())\nCow_order = list(map(int, fin.readline().split()))\nCows = list(map(int, fin.readline().split()))\n\n#Reverse shuffle\nfor i in range(3):\n temp = [0]*Row_num\n for i in range(Row_num):\n temp[i] = Cows[Cow_order[i]-1]\n Cows = temp\n\n#Print\nfor item in Cows:\n fout.write(str(item))\n fout.write('\\n')\n","repo_name":"314programs/USACO-practice","sub_path":"The_Bovine_Shuffle.py","file_name":"The_Bovine_Shuffle.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"26447850562","text":"import requests\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nimport datetime\nfrom shutil import copyfile\nimport os\n\n\ndef get_tokens():\n # Read from text file\n f = open(\"ecobee_api_key.txt\", \"r\")\n API_KEY = f.read()\n f = open(\"ecobee_access_token.txt\", \"r\")\n ACCESS_TOKEN = f.read()\n f = open(\"ecobee_refresh_token.txt\", \"r\")\n REFRESH_TOKEN = f.read()\n\n return API_KEY, ACCESS_TOKEN, REFRESH_TOKEN\n\n\ndef refresh_and_get_tokens():\n API_KEY, ACCESS_TOKEN, REFRESH_TOKEN = get_tokens()\n\n # refresh\n url = \"https://api.ecobee.com/token\"\n payload = {\n \"grant_type\": \"refresh_token\",\n \"code\": REFRESH_TOKEN,\n \"client_id\": API_KEY,\n }\n response = requests.post(url, data=payload)\n\n if response.status_code != 200:\n print(\"Something went wrong when trying to refresh access\")\n else:\n f = open(\"ecobee_access_token.txt\", \"w\")\n ACCESS_TOKEN = response.json()[\"access_token\"]\n f.write(ACCESS_TOKEN)\n f.close()\n f = open(\"ecobee_refresh_token.txt\", \"w\")\n f.write(response.json()[\"refresh_token\"])\n f.close()\n\n return API_KEY, ACCESS_TOKEN, REFRESH_TOKEN\n\n\ndef get_thermostat_data(ACCESS_TOKEN):\n # get temperature\n url = \"https://api.ecobee.com/1/thermostat\"\n header = {\"Content-Type\": \"text/json\", \"Authorization\": \"Bearer \" + ACCESS_TOKEN}\n payload = {\n \"json\": '{\"selection\":{\"selectionType\":\"registered\",\"selectionMatch\":\"\",\"includeRuntime\":\"true\",\"includeSensors\":\"true\",\"includeWeather\":\"true\"}}'\n }\n response = requests.get(url, params=payload, headers=header)\n\n return response\n\n\ndef get_temperature():\n now = datetime.datetime.now()\n now_formatted = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n API_KEY, ACCESS_TOKEN, REFRESH_TOKEN = refresh_and_get_tokens()\n response = get_thermostat_data(ACCESS_TOKEN)\n\n if response.status_code != 200:\n print(\"Something went wrong when trying to get thermostat data\")\n else:\n r_json = response.json()\n r_json_thermostatList = r_json[\"thermostatList\"][0]\n r_json_remoteSensors = r_json_thermostatList[\"remoteSensors\"]\n r_json_weather = r_json_thermostatList['weather']\n\n sensorName = \"Cellar\"\n sensorFound = False\n for sensor in r_json_remoteSensors:\n if sensor[\"name\"] == sensorName:\n sensorFound = True\n\n for capability in sensor[\"capability\"]:\n if capability[\"type\"] == \"temperature\":\n temp = capability[\"value\"]\n tempString = f\"{temp[0:temp.__len__() - 1]}.{temp.__len__()}\"\n print(f\"{now_formatted} - Sensor temp: {tempString} degF\")\n break\n break\n\n if not sensorFound:\n print(f\"{sensorName} sensor not found!\")\n\n # Get forecast data; [0] is the most accurate\n r_json_forecast = r_json_weather['forecasts'][0]\n temp_forecast = r_json_forecast['temperature'].__str__()\n tempString_forecast = f\"{temp_forecast[0:temp_forecast.__len__() - 1]}.{temp_forecast.__len__()}\"\n try:\n tempStringFloat = float(tempString)\n except:\n tempStringFloat = -1\n\n try:\n tempStringFloat_forecast = float(tempString_forecast)\n except:\n tempStringFloat_forecast = -1\n\n return tempString, tempStringFloat, now, tempString_forecast, tempStringFloat_forecast\n\n\ndef main():\n now = datetime.datetime.now()\n if os.path.exists('data.txt'):\n copyfile('data.txt',f'archive/data_backup_{now}.txt')\n os.remove('data.txt')\n\n # Execute once\n tempString, tempFloat, time, tempString_forecast, tempFloat_forecast = get_temperature()\n f = open('data.txt', 'a')\n f.write(f'{time},{tempString},{tempString_forecast}\\n')\n f.close()\n\n # Start scheduler\n scheduler = BlockingScheduler()\n\n @scheduler.scheduled_job('interval',seconds=900)\n def my_job():\n tempString, tempFloat, time, tempString_forecast, tempFloat_forecast = get_temperature()\n f = open('data.txt','a')\n f.write(f'{time},{tempString},{tempString_forecast}\\n')\n f.close()\n\n scheduler.start()\n # get_temperature()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"alexgui/temp-monitor-ecobee","sub_path":"ecobeerequest.py","file_name":"ecobeerequest.py","file_ext":"py","file_size_in_byte":4281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"24779684723","text":"import numpy as np\nimport cut as cut\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\na = np.load('../plotCSV/sbdots_adv.npy')\naa = np.load('../plotCSV/sbdots_full8.npy')\nb = np.load('../plotCSV/surviving_adv.npy')\nbb = np.load('../plotCSV/surviving_full8.npy')\n\nc = np.column_stack((a,aa[:,1]))\n\ncc = np.column_stack((b,bb[:,1]))\n\nax1,ax2 = plt.subplot(2,1,1),plt.subplot(2,1,2)\n\nl1, = ax1.plot(a[:,0],a[:,1])\nl2, = ax1.plot(a[:,0],aa[:,1])\nax1.set_xlabel(r'$m_\\chi (GeV)$',fontsize=25)\nax1.set_ylabel(r'$\\epsilon$',fontsize=25)\nax1.set_yscale('log')\nax1.set_xscale('log')\n\nax2.set_yscale('log')\nax2.set_xscale('log')\nax2.plot(b[:,0],b[:,1])\nax2.plot(b[:,0],bb[:,1])\nax2.set_xlabel(r'$m_\\chi (GeV)$',fontsize=25)\nax2.set_ylabel(r'Surviving events',fontsize=25)\nax1.legend((l1,l2),('Advanced cut only','full cut set'),loc='8')\nax2.legend((l1,l2),('Advanced cut only','full cut set'),loc='upper right')\n\n\n\nplt.show()\n# print(c)\n# print(cc)\n# np.savetxt('plotCSV/adv.csv',c,delimiter=', ')\n# np.savetxt('plotCSV/full.csv',cc,delimiter=', ')","repo_name":"XUYONGHENG/CEPC-Millicharge","sub_path":"fcdatawash/plotpy/advvsfullplot.py","file_name":"advvsfullplot.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"30481500089","text":"\nimport os\nimport sys\nimport unittest\nfrom mock import Mock\nsys.path.append('../../')\nimport blng.Voodoo\n\n\nclass TestVoodoo(unittest.TestCase):\n\n def setUp(self):\n self.maxDiff = 400000\n\n def _get_session(self):\n self.subject = blng.Voodoo.DataAccess('crux-example.xml')\n self.root = self.subject.get_root()\n return self.root\n\n def test_list_delete_element(self):\n # BUild\n root = self._get_session()\n\n one = root.twokeylist.create('a1', 'b1')\n two = root.twokeylist.create('a2', 'b2')\n three = root.twokeylist.create('a3', 'b3')\n\n self.assertTrue(('a1', 'b1') in root.twokeylist)\n self.assertTrue(('x1', 'b1') not in root.twokeylist)\n\n ELEPHANT = root.simplelist.create('elephant')\n CAMEL = root.simplelist.create('camel')\n ZOMBIE = root.simplelist.create('zombie')\n GHOUL = root.simplelist.create('ghoul')\n\n self.assertEqual(len(root.simplelist), 4)\n self.assertTrue('zombie' in root.simplelist)\n self.assertFalse('zombie' not in root.simplelist)\n self.assertEqual(['elephant', 'camel', 'zombie', 'ghoul'], root.simplelist.keys())\n self.assertEqual([['a1', 'b1'], ['a2', 'b2'], ['a3', 'b3']], root.twokeylist.keys())\n\n for listelement in root.twokeylist:\n listelement.tertiary = listelement.primary + listelement.secondary\n\n self.assertEqual(root.simplelist['zombie']._path, \"/simplelist[simplekey='zombie']\")\n self.assertEqual(root.simplelist['ghoul'].simplekey, \"ghoul\")\n\n # Action\n del root.simplelist['zombie']\n\n self.assertTrue('zombie' not in root.simplelist)\n self.assertFalse('elephant' not in root.simplelist)\n self.assertFalse('camel' not in root.simplelist)\n self.assertFalse('ghoul' not in root.simplelist)\n self.assertFalse('zombie' in root.simplelist)\n self.assertTrue('elephant' in root.simplelist)\n self.assertTrue('camel' in root.simplelist)\n self.assertTrue('ghoul' in root.simplelist)\n\n # Test that this does not actually remove the item from the list\n # it should delete the reference to the list element only\n listelement = root.twokeylist['a2', 'b2']\n del listelement\n self.assertEqual(root.twokeylist['a2', 'b2'].tertiary, 'a2b2')\n\n del root.twokeylist['a2', 'b2']\n with self.assertRaises(blng.Voodoo.BadVoodoo) as context:\n x = root.twokeylist['a2', 'b2']\n self.assertEqual(str(context.exception), \"ListElement does not exist: /twokeylist[primary='a2'][secondary='b2']\")\n\n # Assert\n self.assertEqual(len(root.simplelist), 3)\n self.assertEqual(['elephant', 'camel', 'ghoul'], root.simplelist.keys())\n self.assertEqual([['a1', 'b1'], ['a3', 'b3']], root.twokeylist.keys())\n\n expected_xml = \"\"\"\n \n a1\n b1\n a1b1\n \n \n a3\n b3\n a3b3\n \n \n elephant\n \n \n camel\n \n \n ghoul\n \n\n\"\"\"\n\n self.assertEqual(self.subject.dumps(), expected_xml)\n\n def test_list_iteration(self):\n root = self._get_session()\n\n one = root.twokeylist.create('a1', 'b1')\n two = root.twokeylist.create('a2', 'b2')\n\n for listelement in root.twokeylist:\n listelement.tertiary = listelement.primary + listelement.secondary\n\n for listelement in root.simplelist:\n self.fail('This list was empty so we should not have iterated around it')\n\n # This has two list elements\n i = 0\n for listelement in root.twokeylist:\n i = i + 1\n self.assertEqual(i, 2)\n\n one = root.simplelist.create('1111')\n for listelement in root.simplelist:\n listelement.nonleafkey = 'first-set'\n listelement.nonleafkey = listelement.simplekey\n\n expected_xml = \"\"\"\n \n a1\n b1\n a1b1\n \n \n a2\n b2\n a2b2\n \n \n 1111\n 1111\n \n\n\"\"\"\n\n self.assertEqual(self.subject.dumps(), expected_xml)\n\n def test_accessing_list_elements(self):\n root = self._get_session()\n\n x = root.twokeylist.create('a', 'b')\n y = root.twokeylist['a', 'b']\n y.tertiary = '3'\n x = root.twokeylist.create('a', 'b')\n x = root.twokeylist.create('A', 'B')\n root.twokeylist.create('A', 'B').tertiary = 'sdf'\n\n self.assertEqual(y.tertiary, '3')\n expected_xml = \"\"\"\n \n a\n b\n 3\n \n \n A\n B\n sdf\n \n\n\"\"\"\n self.assertEqual(self.subject.dumps(), expected_xml)\n self.assertEqual(repr(y), \"VoodooListElement: /twokeylist[primary='a'][secondary='b']\")\n\n with self.assertRaises(blng.Voodoo.BadVoodoo) as context:\n a = root.twokeylist['not-existing-key', 'b']\n self.assertEqual(str(context.exception), \"ListElement does not exist: /twokeylist[primary='not-existing-key'][secondary='b']\")\n\n with self.assertRaises(blng.Voodoo.BadVoodoo) as context:\n a = root.twokeylist['a', 'non-existing-second-key']\n self.assertEqual(str(context.exception), \"ListElement does not exist: /twokeylist[primary='a'][secondary='non-existing-second-key']\")\n\n def test_deserialise_and_serilaise_example_with_cache_checks(self):\n serilaised_xml = \"\"\"\n 9999\n \n a\n \n \n firstkey\n \n abc123\n \n a\n \n A\n \n \n \n b\n \n\"\"\"\n\n root = self._get_session()\n (keystore_cache, schema_cache) = self.subject._cache\n\n root.simpleleaf = 'value_before_loading_serialised_data'\n self.assertEqual(root.simpleleaf, 'value_before_loading_serialised_data')\n self.assertEqual(list(keystore_cache.items.keys()), ['/voodoo/simpleleaf'])\n\n self.subject.loads(serilaised_xml)\n self.assertEqual(list(keystore_cache.items.keys()), [])\n self.assertEqual(root.morecomplex.leaf2, 'a')\n\n self.assertEqual(root.simpleleaf, '9999')\n self.assertEqual(root.hyphen_leaf, 'abc123')\n self.assertEqual(list(keystore_cache.items.keys()), ['/voodoo/morecomplex',\n '/voodoo/morecomplex/leaf2', '/voodoo/simpleleaf', '/voodoo/hyphen_leaf'])\n\n root.simpleleaf = \"value_after_deserialised_and_modified\"\n\n re_serilaised_xml = \"\"\"value_after_deserialised_and_modified\n \n a\n \n \n firstkey\n \n abc123\n \n a\n \n A\n \n \n \n b\n \n\n\"\"\"\n\n self.assertEqual(self.subject.dumps(), re_serilaised_xml)\n\n def test_deserialise_and_serilaise(self):\n serilaised_xml = \"\"\"\n 9999\n \n a\n \n \n firstkey\n \n abc123\n \n a\n \n A\n \n \n \n b\n \n\"\"\"\n\n root = self._get_session()\n self.subject.loads(serilaised_xml)\n root.simpleleaf = \"value_after_deserialised_and_modified\"\n# + is what we have extra in the test\n# - is what was recevied extra in the running out\n re_serilaised_xml = \"\"\"value_after_deserialised_and_modified\n \n a\n \n \n firstkey\n \n abc123\n \n a\n \n A\n \n \n \n b\n \n\n\"\"\"\n # raise ValueError(self.subject.dumps())\n self.assertEqual(self.subject.dumps(), re_serilaised_xml)\n\n def test_parents(self):\n root = self._get_session()\n root.psychedelia.psychedelic_rock.noise_pop.shoe_gaze.bands._parent._parent.bands.create('Jesus and the Mary Chain')\n root.psychedelia.psychedelic_rock.noise_pop.dream_pop.bands.create('Night Flowers')\n root.psychedelia.psychedelic_rock.noise_pop.dream_pop.bands.create('Mazzy Star')\n root.psychedelia.psychedelic_rock.noise_pop.dream_pop.bands['Mazzy Star']._parent['Night Flowers'].favourite = 'True'\n\n expected_xml = \"\"\"\n \n \n \n \n Jesus and the Mary Chain\n \n \n \n Night Flowers\n True\n \n \n Mazzy Star\n \n \n \n \n \n\n\"\"\"\n\n self.assertEqual(self.subject.dumps(), expected_xml)\n self.assertEqual(root.psychedelia.psychedelic_rock.noise_pop.shoe_gaze._path, '/psychedelia/psychedelic_rock/noise_pop/shoe_gaze')\n self.assertEqual(root.psychedelia.psychedelic_rock.noise_pop.shoe_gaze._parent._path, '/psychedelia/psychedelic_rock/noise_pop')\n\n def test_list_within_list(self):\n root = self._get_session()\n a = root.simplelist.create('a')\n\n for c in range(2):\n root = self._get_session()\n\n a = root.simplelist.create('a')\n a.nonleafkey = 'b'\n b = root.simplelist.create('b')\n b.nonleafkey = 'bb'\n A = root.outsidelist.create('AA')\n B = root.outsidelist.create('BB')\n B = root.outsidelist.create('BB')\n B = root.outsidelist.create('BB')\n B.insidelist.create('bbbbbb')\n A = root.outsidelist.create('AA')\n a = A.insidelist.create('aaaaa')\n english = A.otherinsidelist.create('one', 'two', 'three')\n english.otherlist4 = 'four'\n french = A.otherinsidelist.create('un', 'deux', 'trois')\n french.otherlist4 = 'quatre'\n french.language = 'french'\n italian = B.otherinsidelist.create('uno', 'due', 'tres')\n italian.otherlist4 = 'quattro'\n italian.language = 'italian'\n spanish = B.otherinsidelist.create('uno', 'dos', 'tres')\n spanish.otherlist4 = 'cuatro'\n spanish.language = 'spanish'\n spanish = A.otherinsidelist.create('uno', 'dos', 'tres')\n spanish.otherlist4 = 'cuatro'\n spanish.language = 'spanish'\n german = B.otherinsidelist.create('eins', 'zwei', 'drei')\n with self.assertRaises(blng.Voodoo.BadVoodoo) as context:\n swedish = B.otherinsidelist.create('et', 'två', 'tre', 'fyra')\n self.assertEqual(str(context.exception), \"Wrong Number of keys require 3 got 4. keys defined: ['otherlist1', 'otherlist2', 'otherlist3']\")\n\n with self.assertRaises(blng.Voodoo.BadVoodoo) as context:\n danish = A.otherinsidelist.create('et', 'to')\n danish.language = 'danish'\n self.assertEqual(str(context.exception), \"Wrong Number of keys require 3 got 2. keys defined: ['otherlist1', 'otherlist2', 'otherlist3']\")\n\n dutch_part1 = A.otherinsidelist.create('een', 'twee', 'drie')\n dutch_part1.otherlist4 = 'vier'\n dutch_part1.language = 'dutch'\n dutch_part2 = B.otherinsidelist.create('een', 'twee', 'drie')\n dutch_part2.otherlist5 = 'vijf'\n dutch_part2.language = 'dutch'\n\n expected_xml = \"\"\"\n \n a\n b\n \n \n b\n bb\n \n \n AA\n \n aaaaa\n \n \n one\n two\n three\n four\n \n \n un\n deux\n trois\n quatre\n french\n \n \n uno\n dos\n tres\n cuatro\n spanish\n \n \n een\n twee\n drie\n vier\n dutch\n \n \n \n BB\n \n bbbbbb\n \n \n uno\n due\n tres\n quattro\n italian\n \n \n uno\n dos\n tres\n cuatro\n spanish\n \n \n eins\n zwei\n drei\n \n \n een\n twee\n drie\n vijf\n dutch\n \n \n\n\"\"\"\n\n self.assertEqual(self.subject.dumps(), expected_xml)\n\n def test_list_with_dump(self):\n # note quite test driven but want to go to bed!\n\n # list create()\n # list create() without enough keys\n # list create() with too many keys\n # list create() then trying to change the key (not allowed)\n # list Create() and then modifying non keys (allows)\n # creating multiple list entries (different keys) shoudl be allowed\n #\n\n # Act\n root = self._get_session()\n listelement = root.simplelist.create('Shamanaid')\n listelement.nonleafkey = 'sdf'\n # Check the same list element can have the create method called a second name\n listelement = root.simplelist.create('Shamanaid')\n\n with self.assertRaises(blng.Voodoo.BadVoodoo) as context:\n listelement.simplekey = 'change the value'\n self.assertEqual(str(context.exception), \"Changing a list key is not supported. /simplelist[simplekey='Shamanaid']/simplekey\")\n\n received_xml = self.subject.dumps()\n\n # Assert\n expected_xml = \"\"\"\n \n Shamanaid\n sdf\n \n\n\"\"\"\n self.assertEqual(expected_xml, received_xml)\n\n listelement2 = root.simplelist.create('Prophet')\n listelement2.nonleafkey = 'master'\n received_xml = self.subject.dumps()\n\n # Assert\n expected_xml = \"\"\"\n \n Shamanaid\n sdf\n \n \n Prophet\n master\n \n\n\"\"\"\n self.assertEqual(expected_xml, received_xml)\n\n def test_basic_xmldumps(self):\n root = self._get_session()\n\n # Act\n root.morecomplex.leaf2 = \"sing-and-dance-or-youll\"\n leaf2_value = root.morecomplex.leaf2\n root.hyphen_leaf = 'underscore_in_voodoo-should-be-hyphens-in-xmldoc'\n hyphen_leaf_value = root.hyphen_leaf\n\n received_xml = self.subject.dumps()\n\n # Assert\n self.assertEqual(\"sing-and-dance-or-youll\", leaf2_value)\n self.assertEqual(\"underscore_in_voodoo-should-be-hyphens-in-xmldoc\", hyphen_leaf_value)\n expected_xml = \"\"\"\n \n sing-and-dance-or-youll\n \n underscore_in_voodoo-should-be-hyphens-in-xmldoc\n\n\"\"\"\n self.assertEqual(expected_xml, received_xml)\n\n def test_basic_list(self):\n root = self._get_session()\n\n listelement = root.simplelist.create('Shamanaid')\n self.assertEqual(repr(listelement), \"VoodooListElement: /simplelist[simplekey='Shamanaid']\")\n\n with self.assertRaises(blng.Voodoo.BadVoodoo) as context:\n a = root.simplelist['not-existing-key']\n self.assertEqual(str(context.exception), \"ListElement does not exist: /simplelist[simplekey='not-existing-key']\")\n\n expected_hits = ['nonleafkey', 'simplekey']\n self.assertEqual(dir(listelement), expected_hits)\n self.assertEqual(dir(root.simplelist), [])\n self.assertEqual(root.simplelist['Shamanaid'].simplekey, 'Shamanaid')\n self.assertEqual(repr(root.simplelist['Shamanaid']), \"VoodooListElement: /simplelist[simplekey='Shamanaid']\")\n\n def test_basic_dir(self):\n root = self._get_session()\n\n expected_hits = ['inner', 'leaf2', 'leaf3', 'leaf4', 'nonconfig']\n self.assertEqual(dir(root.morecomplex), expected_hits)\n\n def test_basic_repr(self):\n root = self._get_session()\n\n node = root.morecomplex\n self.assertEqual(repr(node), \"VoodooContainer: /morecomplex\")\n self.assertEqual(repr(node.inner), \"VoodooPresenceContainer: /morecomplex/inner\")\n\n node = root.morecomplex.leaf2\n node = \"x123\"\n self.assertEqual(repr(node), \"'x123'\")\n\n def test_basic_session_leaf(self):\n root = self._get_session()\n\n value = root.simpleleaf\n self.assertEqual(value, None)\n\n root.simpleleaf = 'ABC123'\n value = root.simpleleaf\n self.assertEqual(value, 'ABC123')\n\n def test_basic_session_setup(self):\n self._get_session()\n\n self.assertEqual(repr(self.root), \"VoodooRoot\")\n\n def test_root_only_returns_root(self):\n root = self._get_session()\n\n with self.assertRaises(blng.Voodoo.BadVoodoo) as context:\n x = root.platinum\n self.assertEqual(str(context.exception), \"Unable to find '/platinum' in the schema\")\n","repo_name":"allena29/brewerslabng","sub_path":"test/unit/test_Voodoo.py","file_name":"test_Voodoo.py","file_ext":"py","file_size_in_byte":20544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"27645347007","text":"# coding:utf-8\nimport torch\nimport torch.nn as nn\nfrom torch.utils import data\nfrom torchvision import datasets, transforms, models\nimport numpy as np\nimport argparse\nfrom tqdm import tqdm\nimport sys, time\nfrom tensorboardX import SummaryWriter\n\nfrom utils.util import load_checkpoint, save_checkpoint, ensure_dir\nfrom model import example_model\n\n# set flags / seeds\ntorch.backends.cudnn.benchmark = True\nnp.random.seed(1)\ntorch.manual_seed(1)\ntorch.cuda.manual_seed(1)\n\n\nclass Logger(object):\n def __init__(self, filename='train.log', stream=sys.stdout):\n self.terminal = stream\n self.log = open(filename, 'a')\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n pass\n\n\nsys.stdout = Logger('Model.log', sys.stdout)\n\n# Start with main code\nif __name__ == '__main__':\n '''step0.参数配置'''\n parser = argparse.ArgumentParser(description=\"图像分类代码示例\")\n parser.add_argument('--lr', type=float, default=1e-3,\n help='指定优化器的初始学习率 默认1e-3)')\n parser.add_argument('--resume', action='store_true',\n help='使用前一次保存的权重 默认None,可选True or False')\n parser.add_argument('--path_to_checkpoint', type=str, default='',\n help='前一次保存权重的地址 默认:\"\"')\n parser.add_argument('--epochs', type=int, default=10,\n help='epoch数 默认50')\n parser.add_argument('--batch_size', type=int, default=8,\n help='Batch size大小 默认32')\n parser.add_argument('--num_workers', type=int, default=0,\n help='数据集加载进程数 默认0')\n parser.add_argument('--Dataset', type=str, default='custom', choices=['custom', 'cifar10', 'cifar100', 'mnist'],\n help='选择使用训练和验证的数据集')\n parser.add_argument('--input_size', type=int, default=28,\n help='输入图像的尺寸,例如,28*28')\n parser.add_argument('--num_classes', type=int, default=3,\n help='类别数,多少个类别就是多少')\n\n opt = parser.parse_args()\n\n '''step1.设置数据集和对应的transform'''\n train_transforms = transforms.Compose([\n transforms.RandomCrop(opt.input_size, padding=4),\n # transforms.RandomHorizontalFlip(), # mnist不适用\n transforms.ToTensor(),\n # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) # mnist不适用\n ])\n\n val_transforms = transforms.Compose([\n transforms.RandomCrop(opt.input_size, padding=4),\n transforms.ToTensor(),\n # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) # mnist不适用\n ])\n\n if opt.Dataset == 'cifar10':\n train_dataset = datasets.CIFAR10(root='cifar10', train=True, transform=train_transforms, download=True)\n validate_dataset = datasets.CIFAR10(root='cifar10', train=False, transform=val_transforms, download=True)\n\n elif opt.Dataset == 'cifar100':\n train_dataset = datasets.CIFAR10(root='cifar100', train=True, transform=train_transforms, download=True)\n validate_dataset = datasets.CIFAR10(root='cifar100', train=False, transform=val_transforms, download=True)\n\n elif opt.Dataset == 'mnist':\n train_dataset = datasets.MNIST(root='mnist', train=True, transform=train_transforms, download=True)\n validate_dataset = datasets.MNIST(root='mnist', train=False, transform=train_transforms, download=True)\n\n elif opt.Dataset == 'custom':\n train_dataset = datasets.ImageFolder(root=\"CustomDataSet\\\\train\", transform=train_transforms)\n validate_dataset = datasets.ImageFolder(root=\"CustomDataSet\\\\val\", transform=val_transforms)\n\n train_data_loader = data.DataLoader(train_dataset,\n batch_size=opt.batch_size,\n shuffle=True,\n drop_last=True,\n num_workers=opt.num_workers)\n val_data_loader = data.DataLoader(validate_dataset,\n batch_size=1,\n shuffle=False,\n num_workers=opt.num_workers)\n\n '''step2.加载网络'''\n net = example_model.ExampleModel(class_num=opt.num_classes)\n\n '''step3.加载损失函数'''\n criterion_CE = nn.CrossEntropyLoss()\n\n # gpu加速\n use_cuda = torch.cuda.is_available()\n if use_cuda:\n net = net.cuda()\n\n '''step4.加载学习器'''\n optim = torch.optim.Adam(net.parameters(), lr=opt.lr)\n\n '''step5.加载预训练权重'''\n start_n_iter = 0\n start_epoch = 0\n if opt.resume:\n ckpt = load_checkpoint(opt.path_to_checkpoint) # custom method for loading last checkpoint\n net.load_state_dict(ckpt['net'])\n start_epoch = ckpt['epoch']\n start_n_iter = ckpt['n_iter']\n optim.load_state_dict(ckpt['optim'])\n print(\"last checkpoint restored\")\n\n # 用tensorboardX跟踪进展\n writer = SummaryWriter()\n\n '''step6.开始训练'''\n n_iter = start_n_iter\n for epoch in range(start_epoch, opt.epochs):\n # 设置为train模式\n net.train()\n\n # 使用tqdm进行迭代\n pbar = tqdm(enumerate(train_data_loader),\n total=len(train_data_loader))\n start_time = time.time()\n\n # 数据集批处理\n for i, data in pbar:\n '''step6.1数据加载'''\n img, label = data\n if use_cuda:\n img = img.cuda()\n label = label.cuda()\n\n # 使用tqdm跟踪准备时间和计算时间\n prepare_time = start_time - time.time()\n\n '''step6.2前向计算'''\n out = net(img)\n '''step6.3损失计算'''\n loss = criterion_CE(out, label)\n '''step6.4学习器梯度清零'''\n optim.zero_grad()\n '''step6.5后向梯度计算'''\n loss.backward()\n '''step6.6参数更新'''\n optim.step()\n\n # 更新 tensorboardX\n writer.add_scalar('train_loss', n_iter)\n\n # 计算时间和计算效率computation time & *compute_efficiency*\n process_time = start_time - time.time() - prepare_time\n compute_efficiency = process_time / (process_time + prepare_time)\n pbar.set_description(\n f'Compute efficiency: {compute_efficiency:.2f}, '\n f'loss: {loss.item():.2f}, epoch: {epoch}/{opt.epochs}')\n start_time = time.time()\n\n # 验证测试\n if epoch % 1 == 0:\n # 设置为evel模式\n net.eval()\n\n correct = 0\n total = 0\n\n pbar = tqdm(enumerate(val_data_loader),\n total=len(val_data_loader))\n with torch.no_grad():\n for i, data in pbar:\n # data preparation\n img, label = data\n if use_cuda:\n img = img.cuda()\n label = label.cuda()\n\n out = net(img)\n _, predicted = torch.max(out.data, 1)\n total += label.size(0)\n correct += (predicted == label).sum().item()\n\n print(f'Accuracy on val set: {100 * correct / total:.2f}')\n\n # 保存历史\n cpkt = {\n 'net': net.state_dict(),\n 'epoch': epoch,\n 'n_iter': n_iter,\n 'optim': optim.state_dict()\n }\n save_checkpoint(cpkt, 'model_checkpoint.ckpt')\n","repo_name":"Windxy/Easy-Image-Classify","sub_path":"train_val.py","file_name":"train_val.py","file_ext":"py","file_size_in_byte":7793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"9379806031","text":"# -*- coding:ascii -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1428168617.075995\n_enable_loop = True\n_template_filename = '/Users/Dennis/Developer/Python/chf/homepage/templates/events.html'\n_template_uri = 'events.html'\n_source_encoding = 'ascii'\nimport os, os.path, re\n_exports = ['content']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, 'base.htm', _template_uri)\ndef render_body(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n def content():\n return render_content(context._locals(__M_locals))\n events = context.get('events', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\n\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):\n context['self'].content(**pageargs)\n \n\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_content(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n STATIC_URL = context.get('STATIC_URL', UNDEFINED)\n def content():\n return render_content(context)\n events = context.get('events', UNDEFINED)\n __M_writer = context.writer()\n __M_writer('\\n\\n\\n
\\n')\n for Event in events:\n __M_writer('
\\n
\\n
\\n \"Image\\n
\\n

')\n __M_writer(str( Event.event_name ))\n __M_writer('

\\n

')\n __M_writer(str( Event.start_date.strftime('%x') ))\n __M_writer(' to ')\n __M_writer(str( Event.end_date.strftime('%x') ))\n __M_writer('

\\n\\n

Venue name: ')\n __M_writer(str( Event.venue.name ))\n __M_writer('

\\n

Venue address: ')\n __M_writer(str( Event.venue.address.address_1 ))\n __M_writer(', ')\n __M_writer(str( Event.venue.address.city ))\n __M_writer(' ')\n __M_writer(str( Event.venue.address.state ))\n __M_writer('

\\n

View Areas\\n

\\n
\\n
\\n
\\n')\n __M_writer('
\\n\\n\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n\"\"\"\n__M_BEGIN_METADATA\n{\"line_map\": {\"64\": 14, \"65\": 14, \"66\": 14, \"67\": 16, \"68\": 16, \"69\": 17, \"70\": 17, \"71\": 17, \"72\": 17, \"73\": 17, \"74\": 17, \"75\": 18, \"76\": 18, \"77\": 24, \"83\": 77, \"27\": 0, \"36\": 1, \"46\": 3, \"54\": 3, \"55\": 7, \"56\": 8, \"57\": 11, \"58\": 11, \"59\": 11, \"60\": 11, \"61\": 13, \"62\": 13, \"63\": 14}, \"source_encoding\": \"ascii\", \"filename\": \"/Users/Dennis/Developer/Python/chf/homepage/templates/events.html\", \"uri\": \"events.html\"}\n__M_END_METADATA\n\"\"\"\n","repo_name":"sknott2/colonial-heritage-foundation","sub_path":"homepage/cached_templates/templates/events.html.py","file_name":"events.html.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"12973097175","text":"# https://www.interviewbit.com/problems/stairs/\ndef fib(n):\n el1, el2 = 1, 2\n if n < 2:\n return 1\n\n for i in xrange(n - 2):\n el1, el2 = el2, el1 + el2\n\n return el2","repo_name":"salvador-dali/algorithms_general","sub_path":"interview_bits/level_6/06_dp/02_simple_array/02_stairs.py","file_name":"02_stairs.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"33558919361","text":"\"\"\"\nbaseline.py\n\nThis is the vanilla implementation of transformer network.\n\n23.09.2019 - @yashbonde\n\"\"\"\n\nimport tensorflow as tf\nfrom types import SimpleNamespace\n\nfrom .ops_util import get_opt\nfrom .tf_layers import encoder_block, decoder_block, embed_sequence, positions_for, ff, noam_scheme, label_smoothing\n\nfrom .common_layer_fns import shift_right_2d\n\n\ndef prepare_decoder_function(target, pad=None):\n decoder_input = shift_right_2d(target, pad)\n return decoder_input\n\n\ndef decoder_fn(config, dec_out, enc_out, encoder_pad_mask, decoder_pad_mask):\n with tf.variable_scope('decoder', reuse = tf.AUTO_REUSE):\n for layer_idx in range(config.num_layers):\n dec_out = decoder_block(q = dec_out, k = enc_out, v = enc_out, enc_mask = encoder_pad_mask,\n dec_mask = decoder_pad_mask, scope = 'layer_{}'.format(layer_idx), config = config)\n return dec_out\n\n\ndef encoder_fn(config, enc_out, encoder_pad_mask):\n with tf.variable_scope('encoder', reuse = tf.AUTO_REUSE):\n for layer_idx in range(config.num_layers):\n enc_out = encoder_block(q = enc_out, ext_mask = encoder_pad_mask, scope = 'layer_{}'.format(layer_idx),\n config = config)\n return enc_out\n\n\ndef transformer(config, encoder_placeholder, target_placeholder=None, training=False):\n \"\"\"\n Function for making and training baseline model. This style of code is heavily\n inspired from OpenAI's code.\n\n encoder_placeholder: placeholder to encoder stacks, tensor at the bottom of encoder\n [batch_size, input_length]\n target_placeholder: placeholder with target values, tensor at the top of decoder\n [batch_size, target_length]\n \"\"\"\n encoder_inp = encoder_placeholder\n # decoder_inp = prepare_decoder_function(target_placeholder) # TODO: Fix this\n decoder_inp = target_placeholder[:, :-1]\n\n with tf.variable_scope('embed'):\n '''\n NOTE: There are some differences from the method that was used by authors in their paper.\n They used the sinusoidal method for making positional embedding. This was basically a\n simple hack by creating multiple switch on/somewhere in the middle/off system. OpenAI\n instead used a much simpler method where they created an embedding matrix for positions\n along with one for context.\n\n To do that we use the `positions_for` method, which gives us a range according to input\n length of tensor.\n\n There are many different thing that we can actually this kind of embdding for\n - Additional of extra features such as POS /+ NER tags\n\n I found during my experiments that simply adding more and more tensors often improves\n the results. My hypothesis is that since we have a fixed embedding dimension for\n different features such as context and position. Adding more simply means that we keep\n morphing it's structure. This might cause difficulty initially however once the\n embedding matrices are trained this will be compensated. \n '''\n\n # we start with embedding the input sequence\n (enc_con_emb, dec_con_emb), con_emb_matrix = embed_sequence(encoder_inp, decoder_inp, in_dim=config.vocab_size,\n out_dim=config.embedding_dim, scope='context')\n\n (enc_pos_emb, dec_pos_emb), pos_emb_matrix = embed_sequence(\n positions_for(encoder_inp, past_length=0),\n positions_for(decoder_inp, past_length=0),\n in_dim=max(config.cntx_len, config.max_decode_length),\n out_dim=config.embedding_dim,\n scope='position')\n\n # appropriate value normalisation and getting masks\n enc_con_emb *= config.embedding_dim ** 0.5\n dec_con_emb *= config.embedding_dim ** 0.5\n encoder_pad_mask = tf.math.equal(encoder_inp, config.pad_id, name = 'encoder_pad_masking')\n decoder_pad_mask = tf.math.equal(decoder_inp, config.pad_id, name = 'decoder_pad_masking')\n\n # add the two embeddings\n enc_out = tf.layers.dropout(enc_con_emb + enc_pos_emb, 0.3, training = training)\n dec_out = tf.layers.dropout(dec_con_emb + dec_pos_emb, 0.3, training = training)\n\n # print('main_model > enc_out: {}'.format(enc_out))\n # print('main_model > dec_out: {}'.format(dec_out))\n \n # now we make the model, this is simple matter of calling the layers --> calling encoder and decoder functions\n enc_out = encoder_fn(config = config, enc_out = enc_out, encoder_pad_mask = encoder_pad_mask)\n dec_out = decoder_fn(config = config, dec_out = dec_out, enc_out = enc_out,\n encoder_pad_mask = encoder_pad_mask, decoder_pad_mask = decoder_pad_mask)\n\n if config.use_inverse_embedding:\n # use the same embedding for input and output\n pred_logits = tf.matmul(dec_out, con_emb_matrix, transpose_b = True)\n fproj_w, fproj_b = con_emb_matrix, None\n else:\n # use different embedding\n pred_logits, fproj_w, fproj_b = ff(dec_out, 'final_projection', config.vocab_size, return_param = True)\n\n pred_seq = tf.argmax(pred_logits, axis = 2) # [bs, seqlen]\n\n if training:\n # calculate loss --> use the just below one for using without label smoothing\n # print('\\n\\n\\ndecoder_inp: {}\\ntarget: {}\\npred_logits: {}\\n\\n'.format(decoder_inp, target_placeholder[:, 1:], pred_logits))\n loss = tf.reduce_sum(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels = target_placeholder[:, 1:],\n logits = pred_logits,\n )\n )\n\n # train ops with gradient clipping\n global_step = tf.train.get_or_create_global_step()\n lr = noam_scheme(config.lr, global_step, 4000)\n opt = get_opt(config.opt)(lr)\n capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in opt.compute_gradients(loss)]\n train_step = opt.apply_gradients(capped_gvs, global_step)\n train_step = get_opt(config.opt)(lr).minimize(loss, global_step)\n\n # simple optimizer\n # train_step = get_opt(config.opt)(config.lr).minimize(loss)\n\n # summary\n tf.summary.scalar('loss', loss)\n tf.summary.scalar('lr', lr)\n\n ret = SimpleNamespace(\n context_embedding = con_emb_matrix,\n position_embedding = pos_emb_matrix,\n pred_seq = pred_seq,\n pred_logits = pred_logits,\n loss = loss,\n train_step = train_step,\n encoder_embedding = enc_out,\n encoder_pad_mask = encoder_pad_mask,\n decoder_pad_mask = decoder_pad_mask,\n fproj_w = fproj_w,\n fproj_b = fproj_b\n )\n\n return ret\n\n else:\n return pred_logits, pred_seq\n","repo_name":"yashbonde/transformer_network_tensorflow","sub_path":"transformer/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":6853,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"71"} +{"seq_id":"19201386081","text":"#!/usr/bin/env python\n\nfrom __future__ import absolute_import, division, print_function\n\nimport sys\nimport glob\nimport math\nimport os.path\n\ndef main():\n if len(sys.argv) != 4:\n sys.exit('Usage: %s ' %\n sys.argv[0])\n\n input_directory, output_filepath, alpha = sys.argv[1:]\n\n alpha = float(alpha)\n if not 0.0 <= alpha <= 1.0:\n sys.exit('Invalid alpha: %r. Alpha must be between 0 and 1 (inclusive)'\n % alpha)\n\n status('Calculating total number of tests (this may take awhile)...')\n test_count = get_test_count(input_directory)\n status('Total number of tests: %d' % test_count)\n\n status('Finding significant observation IDs for each category '\n '(alpha=%r)...' % alpha)\n with open(output_filepath, 'w') as output_fh:\n header = ['Category', 'Count significant',\n 'Significant observation IDs']\n output_fh.write('\\t'.join(header))\n output_fh.write('\\n')\n\n for filepath in iter_results_filepaths(input_directory):\n category, significant_ids = find_significant_correlations(\n filepath, alpha, test_count)\n\n if len(significant_ids) > 0:\n cells = [category, '%d' % len(significant_ids)]\n cells.append(\n ', '.join('%s (%r, %r)' % e for e in significant_ids))\n\n output_fh.write('\\t'.join(cells))\n output_fh.write('\\n')\n status('Results are in %s' % output_filepath)\n\n\ndef status(msg):\n \"\"\"Write message immediately to stdout.\"\"\"\n sys.stdout.write(msg)\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\ndef get_test_count(input_directory):\n test_count = 0\n for filepath in iter_results_filepaths(input_directory):\n with open(filepath, 'U') as fh:\n # skip the header line\n next(fh)\n for line in fh:\n test_count += 1\n return test_count\n\n\ndef iter_results_filepaths(input_directory):\n return glob.glob(os.path.join(input_directory, 'corr_*.txt'))\n\n\ndef find_significant_correlations(results_filepath, alpha, test_count):\n category = os.path.splitext(results_filepath)[0].split('corr_')[1]\n\n with open(results_filepath, 'U') as fh:\n header = next(fh).split('\\t')\n id_idx = header.index('Feature ID')\n test_stat_idx = header.index('Test stat.')\n p_value_idx = header.index('pval')\n\n significant_ids = []\n for line in fh:\n cells = line.split('\\t')\n id_ = cells[id_idx]\n test_stat = float(cells[test_stat_idx])\n\n # bonferroni-correct p-value for total number of comparisons, cap\n # at 1.0. if NaN the output will still be NaN\n p_value = min(float(cells[p_value_idx]) * test_count, 1.0)\n\n # NaN will never be <= alpha\n if p_value <= alpha:\n significant_ids.append((id_, test_stat, p_value))\n else:\n # file is sorted by increasing p-values so we can stop\n # searching. NaNs always appear at the bottom of the file so we\n # can stop searching in that case too\n break\n return category, significant_ids\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jairideout/one-offs","sub_path":"omc-munger/omc-munger.py","file_name":"omc-munger.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"5328754803","text":"import os\nfrom tensorflow import keras\nfrom tensorflow.keras.preprocessing import image\nimport tensorflow.keras\n\n\nmainPath = os.path.abspath(os.path.realpath(__file__))\nmainPath = mainPath[:len(mainPath) - 7]\n\nmodel_path = os.path.join(mainPath,'yolo.h5')\n\n\nfrom imageai import Detection\n\nyolo = Detection.ObjectDetection()\nyolo.setModelTypeAsYOLOv3()\nyolo.setModelPath(model_path)\nyolo.loadModel()\n\nimport cv2\ncam = cv2.VideoCapture(0) #0=front-cam, 1=back-cam\ncam.set(cv2.CAP_PROP_FRAME_WIDTH, 1300)\ncam.set(cv2.CAP_PROP_FRAME_HEIGHT, 1500)\n\n\n\nwhile True:\n ## read frames\n ret, img = cam.read()\n ## predict yolo\n img, preds = yolo.detectCustomObjectsFromImage(input_image=img, \n custom_objects=None, input_type=\"array\",\n output_type=\"array\",\n minimum_percentage_probability=70,\n display_percentage_probability=False,\n display_object_name=True)\n ## display predictions\n cv2.imshow(\"\", img)\n ## press q or Esc to quit \n if (cv2.waitKey(1) & 0xFF == ord(\"q\")) or (cv2.waitKey(1)==27):\n break\n## close camera\ncam.release()\ncv2.destroyAllWindows()\n\n","repo_name":"abiglary1372/Web-Cam-Object-Detection","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"72960168548","text":"import requests\nfrom auth import http_auth\nimport loggy\nfrom pathlib import Path\nfrom inspect import stack\nfrom requests.auth import HTTPBasicAuth\nfrom http.client import HTTPConnection\nfrom pprintpp import pprint as pp\nfrom dataclasses import dataclass, field\nimport json\nimport os\n\nscript_name = Path(__file__).stem\nlogger = loggy.logging.getLogger(script_name)\n\nheaders = {\n 'Content-Type': 'application/json'\n}\n\nurl=os.getenv(\"FS_API_URL\",'')\n\n@dataclass\nclass GetResults():\n status_code: int\n results: list=field(default_factory=list)\n\n\n\ndef _get(path: str=None, params: dict=None, auth=http_auth, headers: dict=headers)->requests.Response:\n uri=f'{url}{path}'\n logger.debug(f\"caller: {stack()[1].function}, Getting {uri}\")\n return requests.get(uri, auth=auth, params=params, headers=headers)\n\ndef get_freshservice(path: str=None, params: dict={}, headers: dict=headers, no_page: bool=False):\n page_number = 1\n if not no_page:\n params['per_page']=100\n params['page']=1\n pag_list = []\n status_code = None\n autho=http_auth\n while True:\n done=None\n pagination = _get(path=path, params=params, auth=autho, headers=headers)\n logger.debug(f\"Trying {path}, Page: {page_number}\")\n try:\n content = pagination.json()\n except Exception as e:\n logger.error(f\"Failed to decode JSON. {pagination.content}\")\n content = {}\n done=True\n if content:\n for item in content:\n if not content[item]:\n done=True\n break\n status_code = pagination.status_code\n if done:\n break\n pag_list.append(content)\n if not no_page:\n params['page']+=1\n page_number +=1\n \n if no_page:\n break\n return GetResults(status_code, pag_list)\n \n \n \n\n\ndef post_freshservice(path: str=None, headers: dict=headers, data: dict=None)->requests.Response:\n uri=f'{url}{path}'\n autho=http_auth\n HTTPConnection.debuglevel = 1\n logger.debug(f'caller: {stack()[1].function}, Postting {uri} :: {data}')\n return requests.post(uri, auth=autho, headers=headers, json=data)\n\ndef put_freshservice(path: str=None, headers: dict=headers, data: dict=None)->requests.Response:\n uri=f'{url}{path}'\n logger.debug(f'caller: {stack()[1].function}, Putting {uri} :: {data}')\n autho=http_auth\n HTTPConnection.debuglevel = 1\n return requests.put(uri, auth=autho, headers=headers, json=data)","repo_name":"jat92/helpdesk_backup","sub_path":"http_func.py","file_name":"http_func.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"3288551200","text":"import concurrent.futures\nimport math\nimport timeit\n\nPRIMES = [\n 112272535095293,\n 112582705942171,\n 112272535095293,\n 115280095190773,\n 115797848077099,\n 1099726899285419]\n\ndef is_prime(n):\n if n % 2 == 0:\n return False\n\n sqrt_n = int(math.floor(math.sqrt(n)))\n \n for i in range(3, sqrt_n + 1, 2):\n if n % i == 0:\n return False\n return True\n\ndef parallel():\n with concurrent.futures.ProcessPoolExecutor() as executor:\n for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)):\n pass\n\n\ndef serial():\n for prime in PRIMES:\n is_prime(prime)\n\nif __name__ == '__main__':\n # Note the two ways to get the functions into the namespace...\n print(timeit.repeat(stmt=\"parallel()\", number=1, repeat=3, globals=globals()))\n print(timeit.repeat(\"serial()\", number=1, repeat=3, setup=\"from __main__ import serial\"))","repo_name":"JMGEHMAN/python201","sub_path":"time_primes.py","file_name":"time_primes.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"8980335471","text":"from nltk import ngrams\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import word_tokenize\r\nfrom sklearn.cluster import KMeans\r\nfrom nltk import pos_tag\r\nimport numpy as np\r\nfrom multiprocessing import Pool\r\nimport time\r\nstart_time = time.time()\r\n# getting all reviews\r\nf_reviews = open('all_reviews/all_reviews.txt','r',encoding='utf-8')\r\n# all review in one string\r\nall_review = f_reviews.read()\r\nf_reviews.close()\r\n\r\n# f_noun = open('Directionary/nouns/91K nouns.txt','r',encoding=\"windows-1252\")\r\n# all_nouns = f_noun.read()\r\n# f_noun.close()\r\n#\r\n# all_review = all_nouns + all_review\r\n# getting all reviews\r\nf_reviews = open('all_reviews/all_reviews.txt','r',encoding='utf-8')\r\n# spit the review\r\nreviews = f_reviews.read().splitlines()\r\nf_reviews.close()\r\n\r\n# container of word and frequency\r\nwords_dic = {}\r\n#words_dic_bi = {}\r\n# load the stopword from the library of nltk\r\nstop_words = set(stopwords.words('english'))\r\n# string to list\r\nword_tokens = word_tokenize(all_review)\r\n#test = pos_tag(word_tokens)\r\n# filter out the review\r\nfiltered_review = [w for w in word_tokens if not w in stop_words]\r\n# print the size of review\r\n#print(len(review))\r\n# print the size of after removing stopword\r\n#print(len(filtered_reivew))\r\n# # call the ngram function with n = 2 which represent bigrams\r\n# def gram_generator(data, n, container):\r\n# ngram = ngrams(data, n)\r\n# # loop the result from ngram function\r\n# for grams in ngram:\r\n# # result in string\r\n# result = \"\"\r\n# # for more than one gram, we need to loop the grams for each word\r\n# for gram in grams:\r\n# # we add into result which it become a complete string with two words\r\n# result += gram\r\n# if n > 1:\r\n# result += \" \"\r\n# # check if it is in the container\r\n# if n > 1:\r\n# result = result[:-1]\r\n# if result in container:\r\n# # yes we add frequency\r\n# container[result] += 1\r\n# else:\r\n# # no we set it as new index which value of frequency is 1\r\n# container[result] = 1\r\n# make the value of all key in dict to 0\r\ndef mofi_to_index(dict):\r\n count = 0\r\n for key in dict:\r\n dict[key] = count\r\n count = count + 1\r\n\r\ndef mofi_to_zero(dict):\r\n for key in dict:\r\n dict[key] = 0\r\ndef create_frequency_dict(data,container):\r\n for i in range(len(data)):\r\n if data[i] in container:\r\n container[data[i]] += 1\r\n else:\r\n container[data[i]] = 1\r\n\r\n#gram_generator(filtered_review, 2, words_dic_bi)\r\ncreate_frequency_dict(filtered_review, words_dic)\r\n#print(len(words_dic))\r\n\"\"\"A small check to see if we can replace word_tokens to unigram\"\"\"\r\n# test_container = {}\r\n# test(filtered_review,test_container)\r\n# result = True\r\n# for key in words_dic:\r\n# if key not in test_container:\r\n# result = False\r\n# else:\r\n# if words_dic[key] != test_container[key]:\r\n# result = False\r\n# print(result)\r\nmofi_to_index(words_dic)\r\n# print all words\r\n# print(words_dic)\r\n# print(len(words_dic))\r\n# print bigrams\r\n#print(words_dic_bi)\r\n# print the words for unqigram\r\n#print(words_dic)\r\n\r\n\r\n\"\"\"Above the stuff are for all review, below are for each single review\"\"\"\r\ndef normalization(dic):\r\n for key in dic:\r\n if dic[key] > 0:\r\n dic[key] = dic[key] / len(dic)\r\n\r\n# get a copy from word_dic\r\n# count = 0\r\n# print(len(word_dic_temp))\r\ndef find_frequency_dict(data,container):\r\n for i in range(len(data)):\r\n if data[i] in container:\r\n container[data[i]] += 1\r\n\r\n# def review_to_train_data(reviews, container_gram):\r\n# for review in reviews:\r\n# # string to list\r\n# review_word_tokens = word_tokenize(review)\r\n# # filter out the review\r\n# filtered_review_each = [w for w in review_word_tokens if not w in stop_words]\r\n# # set to zero for all keys\r\n# mofi_to_zero(word_dic_temp)\r\n# # call the function to generate the gram vector\r\n# find_frequency_dict(filtered_review_each,word_dic_temp)\r\n# # normalization\r\n# normalization(word_dic_temp)\r\n# #print(len(word_dic_temp))\r\n# # if count == 0:\r\n# # test = word_dic_temp.copy()\r\n# # count += 1\r\n# # and save in an array\r\n# container_gram.append(word_dic_temp.copy())\r\ndef review_to_train_data_pall(review):\r\n word_dic_temp = words_dic.copy()\r\n # string to list\r\n review_word_tokens = word_tokenize(review)\r\n # filter out the review\r\n filtered_review_each = [w for w in review_word_tokens if not w in stop_words]\r\n # set to zero for all keys\r\n mofi_to_zero(word_dic_temp)\r\n # call the function to generate the gram vector\r\n find_frequency_dict(filtered_review_each,word_dic_temp)\r\n # normalization\r\n normalization(word_dic_temp)\r\n #print(len(word_dic_temp))\r\n # if count == 0:\r\n # test = word_dic_temp.copy()\r\n # count += 1\r\n # and save in an array\r\n return word_dic_temp\r\n\r\ndef pall_review_to_train_data_pall(reviews):\r\n with Pool(35) as p:\r\n result = p.map(review_to_train_data_pall, reviews)\r\n p.terminate()\r\n p.join()\r\n return result\r\ncontainer_gram = pall_review_to_train_data_pall(reviews)\r\n#review_to_train_data(reviews,container_gram)\r\n#print(type(container_gram[0]))\r\n# print(len(container_gram))\r\ndef get_frequncy_array(dict):\r\n array = []\r\n for key in dict:\r\n array.append(dict[key])\r\n return array\r\n\r\ndef pall_get_frequncy_array(container_gram):\r\n with Pool(35) as p:\r\n result = p.map(get_frequncy_array, container_gram)\r\n p.terminate()\r\n p.join()\r\n return result\r\nfre_array = pall_get_frequncy_array(container_gram)\r\n# #print(fre_array)\r\nkmeans = KMeans(n_clusters=2, random_state=0)\r\nkmeans.fit(np.array(fre_array))\r\nprint(time.time() - start_time)\r\n#print(kmeans.cluster_centers_)\r\n#print(kmeans.labels_)\r\n# def find_SSE(clusters,labels_,fre_array):\r\n# sse = {}\r\n# average_cluster = []\r\n# for i in range(len(clusters)):\r\n# sum = 0\r\n# sse[i] = 0\r\n# for value in clusters[i]:\r\n# sum += value\r\n# average = sum/len(clusters[i])\r\n# average_cluster.append(average)\r\n# for i in range(len(fre_array)):\r\n# for datapoint in fre_array[i]:\r\n# sse[labels_[i]] += (average_cluster[labels_[i]] - datapoint) ** 2\r\n# sum_sse = 0\r\n# for x in sse:\r\n# sum_sse += sse[x]\r\n# return sse, sum_sse/len(sse)\r\n# sse,avg_sse = find_SSE(kmeans.cluster_centers_,kmeans.labels_,fre_array)\r\n# print(sse)\r\n# print(avg_sse)\r\n#print(find_SSE(kmeans.cluster_centers_))\r\n","repo_name":"arvin36987/Rest_R","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"908248031","text":"import imageio\nimport glob\n\nframes_per_second = 1\n\n# Name of GIF to be created\ngif_name = 'my_gif'\n\n# Path to folder with pictures from which GIF should be created\ndir_path = '/Users/marcinpaluch/Desktop/Wαrm_up/f/'\n\npaths_to_images = sorted(glob.glob(dir_path + '*.png'))\npath_to_gif = dir_path + gif_name + '.gif'\n\n# Iterate over pictures and add them to the GIF\nwith imageio.get_writer(path_to_gif, mode='I', fps=frames_per_second) as writer:\n for path_to_image in paths_to_images:\n image = imageio.imread(path_to_image)\n writer.append_data(image)","repo_name":"SensorsINI/CartPoleSimulation","sub_path":"others/Presentation Helpers/gif_from_pictures.py","file_name":"gif_from_pictures.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"71"} +{"seq_id":"38522477769","text":"# -*- coding: windows-1252 -*-\r\nimport os\r\n\r\na = True\r\nwhile a:\r\n try:\r\n num = int(input(\"Insira um valor (-1) para sair: \"))\r\n if num < -1:\r\n print('O valor {} é negativo. Insira um valor positivo.'.format(num))\r\n for i in range(num + 1):\r\n div = 0\r\n for x in range(1, i + 1):\r\n resto = i % x\r\n if resto == 0:\r\n div += 1\r\n if div == 2:\r\n print('O número {} é primo.'.format(i))\r\n else:\r\n print('O número {} não é primo.'.format(i))\r\n if num == -1:\r\n a = False\r\n print(\"Encerrando aplicação. Obrigado.\")\r\n except ValueError:\r\n os.system(\"cls\")\r\n print('O valor {} não é válido.'.format(num))\r\n os.system(\"pause\")\r\n","repo_name":"LivMLop/numeroPrimo","sub_path":"NumerosPrimos.py","file_name":"NumerosPrimos.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"8618958182","text":"def merge_sort(list):\n\n if len(list) <= 1:\n return list\n\n middle = len(list) // 2\n left_side = list[:middle]\n right_side = list[middle:]\n\n left = merge_sort(left_side)\n right = merge_sort(right_side)\n\n return merge(left,right)\n\ndef merge(left,right):\n l = []\n i = 0\n j = 0\n\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n l.append(left[i])\n i += 1\n else:\n l.append(right[j])\n j += 1\n\n while i < len(left):\n l.append(left[i])\n i += 1\n\n while j < len(right):\n l.append(right[j])\n j += 1\n \n return l\n\na = merge_sort([3,4,5,7,8,2,3])\nprint(a)","repo_name":"tengfone/AlgoRevision","sub_path":"rmergesort.py","file_name":"rmergesort.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"1250832982","text":"import frappe\nfrom frappe.model.document import Document\n\nclass AppointmentQueue(Document):\n pass\n\ndef create_queues_for_today():\n if frappe.flags.in_test:\n clinics = frappe.get_all(\"Clinic\", filters={\"is_published\": True, \"name\": \"Test Clinic\"}, pluck=\"name\")\n else:\n clinics = frappe.get_all(\"Clinic\", filters={\"is_published\": True}, pluck= \"name\")\n \n for clinic in clinics:\n shifts = frappe.get_all(\"Schedule Shift\", filters={\"clinic\": clinic}, pluck=\"name\")\n for shift in shifts:\n frappe.get_doc(\n {\n \"doctype\": \"Appointment Queue\",\n \"clinic\": clinic,\n \"date\": frappe.utils.today(),\n \"shift\": shift,\n }\n ).insert(ignore_if_duplicate=True)\n ","repo_name":"jinalvekariya/Appointment-App","sub_path":"appointments_app/appointments_app/doctype/appointment_queue/appointment_queue.py","file_name":"appointment_queue.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"17914667438","text":"# coding=utf-8\nfrom __future__ import absolute_import, division, print_function\n\n__author__ = \"Gina Häußge \"\n__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'\n__copyright__ = \"Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License\"\n\nfrom flask import request, jsonify, make_response, Response\nfrom werkzeug.exceptions import BadRequest\nimport re\n\nfrom octoprint.settings import settings, valid_boolean_trues\nfrom octoprint.server import printer, printerProfileManager, NO_CONTENT\nfrom octoprint.server.api import api\nfrom octoprint.server.util.flask import restricted_access, get_json_command_from_request\n\nfrom octoprint.printer import UnknownScript\n\n#~~ Printer\n\n\n@api.route(\"/printer\", methods=[\"GET\"])\ndef printerState():\n\tif not printer.is_operational():\n\t\treturn make_response(\"Printer is not operational\", 409)\n\n\t# process excludes\n\texcludes = []\n\tif \"exclude\" in request.values:\n\t\texcludeStr = request.values[\"exclude\"]\n\t\tif len(excludeStr.strip()) > 0:\n\t\t\texcludes = filter(lambda x: x in [\"temperature\", \"sd\", \"state\"], map(lambda x: x.strip(), excludeStr.split(\",\")))\n\n\tresult = {}\n\n\tprocessor = lambda x: x\n\tif not printerProfileManager.get_current_or_default()[\"heatedBed\"]:\n\t\tprocessor = _delete_bed\n\n\t# add temperature information\n\tif not \"temperature\" in excludes:\n\t\tresult.update({\"temperature\": _get_temperature_data(processor)})\n\n\t# add sd information\n\tif not \"sd\" in excludes and settings().getBoolean([\"feature\", \"sdSupport\"]):\n\t\tresult.update({\"sd\": {\"ready\": printer.is_sd_ready()}})\n\n\t# add state information\n\tif not \"state\" in excludes:\n\t\tstate = printer.get_current_data()[\"state\"]\n\t\tresult.update({\"state\": state})\n\n\treturn jsonify(result)\n\n\n#~~ Tool\n\n\n@api.route(\"/printer/tool\", methods=[\"POST\"])\n@restricted_access\ndef printerToolCommand():\n\tif not printer.is_operational():\n\t\treturn make_response(\"Printer is not operational\", 409)\n\n\tvalid_commands = {\n\t\t\"select\": [\"tool\"],\n\t\t\"target\": [\"targets\"],\n\t\t\"offset\": [\"offsets\"],\n\t\t\"extrude\": [\"amount\"],\n\t\t\"flowrate\": [\"factor\"]\n\t}\n\tcommand, data, response = get_json_command_from_request(request, valid_commands)\n\tif response is not None:\n\t\treturn response\n\n\tvalidation_regex = re.compile(\"tool\\d+\")\n\n\t##~~ tool selection\n\tif command == \"select\":\n\t\ttool = data[\"tool\"]\n\t\tif re.match(validation_regex, tool) is None:\n\t\t\treturn make_response(\"Invalid tool: %s\" % tool, 400)\n\t\tif not tool.startswith(\"tool\"):\n\t\t\treturn make_response(\"Invalid tool for selection: %s\" % tool, 400)\n\n\t\tprinter.change_tool(tool)\n\n\t##~~ temperature\n\telif command == \"target\":\n\t\ttargets = data[\"targets\"]\n\n\t\t# make sure the targets are valid and the values are numbers\n\t\tvalidated_values = {}\n\t\tfor tool, value in targets.items():\n\t\t\tif re.match(validation_regex, tool) is None:\n\t\t\t\treturn make_response(\"Invalid target for setting temperature: %s\" % tool, 400)\n\t\t\tif not isinstance(value, (int, long, float)):\n\t\t\t\treturn make_response(\"Not a number for %s: %r\" % (tool, value), 400)\n\t\t\tvalidated_values[tool] = value\n\n\t\t# perform the actual temperature commands\n\t\tfor tool in validated_values.keys():\n\t\t\tprinter.set_temperature(tool, validated_values[tool])\n\n\t##~~ temperature offset\n\telif command == \"offset\":\n\t\toffsets = data[\"offsets\"]\n\n\t\t# make sure the targets are valid, the values are numbers and in the range [-50, 50]\n\t\tvalidated_values = {}\n\t\tfor tool, value in offsets.items():\n\t\t\tif re.match(validation_regex, tool) is None:\n\t\t\t\treturn make_response(\"Invalid target for setting temperature: %s\" % tool, 400)\n\t\t\tif not isinstance(value, (int, long, float)):\n\t\t\t\treturn make_response(\"Not a number for %s: %r\" % (tool, value), 400)\n\t\t\tif not -50 <= value <= 50:\n\t\t\t\treturn make_response(\"Offset %s not in range [-50, 50]: %f\" % (tool, value), 400)\n\t\t\tvalidated_values[tool] = value\n\n\t\t# set the offsets\n\t\tprinter.set_temperature_offset(validated_values)\n\n\t##~~ extrusion\n\telif command == \"extrude\":\n\t\tif printer.is_printing():\n\t\t\t# do not extrude when a print job is running\n\t\t\treturn make_response(\"Printer is currently printing\", 409)\n\n\t\tamount = data[\"amount\"]\n\t\tif not isinstance(amount, (int, long, float)):\n\t\t\treturn make_response(\"Not a number for extrusion amount: %r\" % amount, 400)\n\t\tprinter.extrude(amount)\n\n\telif command == \"flowrate\":\n\t\tfactor = data[\"factor\"]\n\t\tif not isinstance(factor, (int, long, float)):\n\t\t\treturn make_response(\"Not a number for flow rate: %r\" % factor, 400)\n\t\ttry:\n\t\t\tprinter.flow_rate(factor)\n\t\texcept ValueError as e:\n\t\t\treturn make_response(\"Invalid value for flow rate: %s\" % str(e), 400)\n\n\treturn NO_CONTENT\n\n\n@api.route(\"/printer/tool\", methods=[\"GET\"])\ndef printerToolState():\n\tif not printer.is_operational():\n\t\treturn make_response(\"Printer is not operational\", 409)\n\n\treturn jsonify(_get_temperature_data(_delete_bed))\n\n\n##~~ Heated bed\n\n\n@api.route(\"/printer/bed\", methods=[\"POST\"])\n@restricted_access\ndef printerBedCommand():\n\tif not printer.is_operational():\n\t\treturn make_response(\"Printer is not operational\", 409)\n\n\tif not printerProfileManager.get_current_or_default()[\"heatedBed\"]:\n\t\treturn make_response(\"Printer does not have a heated bed\", 409)\n\n\tvalid_commands = {\n\t\t\"target\": [\"target\"],\n\t\t\"offset\": [\"offset\"]\n\t}\n\tcommand, data, response = get_json_command_from_request(request, valid_commands)\n\tif response is not None:\n\t\treturn response\n\n\t##~~ temperature\n\tif command == \"target\":\n\t\ttarget = data[\"target\"]\n\n\t\t# make sure the target is a number\n\t\tif not isinstance(target, (int, long, float)):\n\t\t\treturn make_response(\"Not a number: %r\" % target, 400)\n\n\t\t# perform the actual temperature command\n\t\tprinter.set_temperature(\"bed\", target)\n\n\t##~~ temperature offset\n\telif command == \"offset\":\n\t\toffset = data[\"offset\"]\n\n\t\t# make sure the offset is valid\n\t\tif not isinstance(offset, (int, long, float)):\n\t\t\treturn make_response(\"Not a number: %r\" % offset, 400)\n\t\tif not -50 <= offset <= 50:\n\t\t\treturn make_response(\"Offset not in range [-50, 50]: %f\" % offset, 400)\n\n\t\t# set the offsets\n\t\tprinter.set_temperature_offset({\"bed\": offset})\n\n\treturn NO_CONTENT\n\n\n@api.route(\"/printer/bed\", methods=[\"GET\"])\ndef printerBedState():\n\tif not printer.is_operational():\n\t\treturn make_response(\"Printer is not operational\", 409)\n\n\tif not printerProfileManager.get_current_or_default()[\"heatedBed\"]:\n\t\treturn make_response(\"Printer does not have a heated bed\", 409)\n\n\tdata = _get_temperature_data(_delete_tools)\n\tif isinstance(data, Response):\n\t\treturn data\n\telse:\n\t\treturn jsonify(data)\n\n\n##~~ Print head\n\n\n@api.route(\"/printer/printhead\", methods=[\"POST\"])\n@restricted_access\ndef printerPrintheadCommand():\n\tvalid_commands = {\n\t\t\"jog\": [],\n\t\t\"home\": [\"axes\"],\n\t\t\"feedrate\": [\"factor\"]\n\t}\n\tcommand, data, response = get_json_command_from_request(request, valid_commands)\n\tif response is not None:\n\t\treturn response\n\n\tif not printer.is_operational() or (printer.is_printing() and command != \"feedrate\"):\n\t\t# do not jog when a print job is running or we don't have a connection\n\t\treturn make_response(\"Printer is not operational or currently printing\", 409)\n\n\tvalid_axes = [\"x\", \"y\", \"z\"]\n\t##~~ jog command\n\tif command == \"jog\":\n\t\t# validate all jog instructions, make sure that the values are numbers\n\t\tvalidated_values = {}\n\t\tfor axis in valid_axes:\n\t\t\tif axis in data:\n\t\t\t\tvalue = data[axis]\n\t\t\t\tif not isinstance(value, (int, long, float)):\n\t\t\t\t\treturn make_response(\"Not a number for axis %s: %r\" % (axis, value), 400)\n\t\t\t\tvalidated_values[axis] = value\n\n\t\tabsolute = \"absolute\" in data and data[\"absolute\"] in valid_boolean_trues\n\t\tspeed = data.get(\"speed\", None)\n\n\t\t# execute the jog commands\n\t\tprinter.jog(validated_values, relative=not absolute, speed=speed)\n\n\t##~~ home command\n\telif command == \"home\":\n\t\tvalidated_values = []\n\t\taxes = data[\"axes\"]\n\t\tfor axis in axes:\n\t\t\tif not axis in valid_axes:\n\t\t\t\treturn make_response(\"Invalid axis: %s\" % axis, 400)\n\t\t\tvalidated_values.append(axis)\n\n\t\t# execute the home command\n\t\tprinter.home(validated_values)\n\n\telif command == \"feedrate\":\n\t\tfactor = data[\"factor\"]\n\t\tif not isinstance(factor, (int, long, float)):\n\t\t\treturn make_response(\"Not a number for feed rate: %r\" % factor, 400)\n\t\ttry:\n\t\t\tprinter.feed_rate(factor)\n\t\texcept ValueError as e:\n\t\t\treturn make_response(\"Invalid value for feed rate: %s\" % str(e), 400)\n\n\treturn NO_CONTENT\n\n\n##~~ SD Card\n\n\n@api.route(\"/printer/sd\", methods=[\"POST\"])\n@restricted_access\ndef printerSdCommand():\n\tif not settings().getBoolean([\"feature\", \"sdSupport\"]):\n\t\treturn make_response(\"SD support is disabled\", 404)\n\n\tif not printer.is_operational() or printer.is_printing() or printer.is_paused():\n\t\treturn make_response(\"Printer is not operational or currently busy\", 409)\n\n\tvalid_commands = {\n\t\t\"init\": [],\n\t\t\"refresh\": [],\n\t\t\"release\": []\n\t}\n\tcommand, data, response = get_json_command_from_request(request, valid_commands)\n\tif response is not None:\n\t\treturn response\n\n\tif command == \"init\":\n\t\tprinter.init_sd_card()\n\telif command == \"refresh\":\n\t\tprinter.refresh_sd_files()\n\telif command == \"release\":\n\t\tprinter.release_sd_card()\n\n\treturn NO_CONTENT\n\n\n@api.route(\"/printer/sd\", methods=[\"GET\"])\ndef printerSdState():\n\tif not settings().getBoolean([\"feature\", \"sdSupport\"]):\n\t\treturn make_response(\"SD support is disabled\", 404)\n\n\treturn jsonify(ready=printer.is_sd_ready())\n\n\n##~~ Commands\n\n\n@api.route(\"/printer/command\", methods=[\"POST\"])\n@restricted_access\ndef printerCommand():\n\tif not printer.is_operational():\n\t\treturn make_response(\"Printer is not operational\", 409)\n\n\tif not \"application/json\" in request.headers[\"Content-Type\"]:\n\t\treturn make_response(\"Expected content type JSON\", 400)\n\n\ttry:\n\t\tdata = request.json\n\texcept BadRequest:\n\t\treturn make_response(\"Malformed JSON body in request\", 400)\n\n\tif \"command\" in data and \"commands\" in data:\n\t\treturn make_response(\"'command' and 'commands' are mutually exclusive\", 400)\n\telif (\"command\" in data or \"commands\" in data) and \"script\" in data:\n\t\treturn make_response(\"'command'/'commands' and 'script' are mutually exclusive\", 400)\n\telif not (\"command\" in data or \"commands\" in data or \"script\" in data):\n\t\treturn make_response(\"Need one of 'command', 'commands' or 'script'\", 400)\n\n\tparameters = dict()\n\tif \"parameters\" in data:\n\t\tparameters = data[\"parameters\"]\n\n\tif \"command\" in data or \"commands\" in data:\n\t\tif \"command\" in data:\n\t\t\tcommands = [data[\"command\"]]\n\t\telse:\n\t\t\tif not isinstance(data[\"commands\"], (list, tuple)):\n\t\t\t\treturn make_response(\"'commands' needs to be a list\", 400)\n\t\t\tcommands = data[\"commands\"]\n\n\t\tcommandsToSend = []\n\t\tfor command in commands:\n\t\t\tcommandToSend = command\n\t\t\tif len(parameters) > 0:\n\t\t\t\tcommandToSend = command % parameters\n\t\t\tcommandsToSend.append(commandToSend)\n\n\t\tprinter.commands(commandsToSend)\n\n\telif \"script\" in data:\n\t\tscript_name = data[\"script\"]\n\t\tcontext = dict(parameters=parameters)\n\t\tif \"context\" in data:\n\t\t\tcontext[\"context\"] = data[\"context\"]\n\n\t\ttry:\n\t\t\tprinter.script(script_name, context=context)\n\t\texcept UnknownScript:\n\t\t\treturn make_response(\"Unknown script: {script_name}\".format(**locals()), 404)\n\n\treturn NO_CONTENT\n\n@api.route(\"/printer/command/custom\", methods=[\"GET\"])\ndef getCustomControls():\n\t# TODO: document me\n\tcustomControls = settings().get([\"controls\"])\n\treturn jsonify(controls=customControls)\n\n\ndef _get_temperature_data(preprocessor):\n\tif not printer.is_operational():\n\t\treturn make_response(\"Printer is not operational\", 409)\n\n\ttempData = printer.get_current_temperatures()\n\n\tif \"history\" in request.values.keys() and request.values[\"history\"] in valid_boolean_trues:\n\t\ttempHistory = printer.get_temperature_history()\n\n\t\tlimit = 300\n\t\tif \"limit\" in request.values.keys() and unicode(request.values[\"limit\"]).isnumeric():\n\t\t\tlimit = int(request.values[\"limit\"])\n\n\t\thistory = list(tempHistory)\n\t\tlimit = min(limit, len(history))\n\n\t\ttempData.update({\n\t\t\t\"history\": map(lambda x: preprocessor(x), history[-limit:])\n\t\t})\n\n\treturn preprocessor(tempData)\n\n\ndef _delete_tools(x):\n\treturn _delete_from_data(x, lambda k: k.startswith(\"tool\"))\n\n\ndef _delete_bed(x):\n\treturn _delete_from_data(x, lambda k: k == \"bed\")\n\n\ndef _delete_from_data(x, key_matcher):\n\tdata = dict(x)\n\tfor k in data.keys():\n\t\tif key_matcher(k):\n\t\t\tdel data[k]\n\treturn data\n","repo_name":"Robo3D/roboOctoprint","sub_path":"src/octoprint/server/api/printer.py","file_name":"printer.py","file_ext":"py","file_size_in_byte":12172,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"6008826969","text":"#!/usr/bin/env python\n\"\"\"setup.py\"\"\"\nimport os\nimport io\nfrom setuptools import setup\nimport rosette\n\nNAME = \"rosette_api\"\nDESCRIPTION = \"Rosette API Python client SDK\"\nAUTHOR = \"Basis Technology Corp.\"\nAUTHOR_EMAIL = \"support@rosette.com\"\nHOMEPAGE = \"https://github.com/rosette-api/python\"\nVERSION = rosette.__version__\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*filenames, **kwargs):\n \"\"\"read function\"\"\"\n encoding = kwargs.get('encoding', 'utf-8')\n sep = kwargs.get('sep', '\\n')\n buf = []\n for filename in filenames:\n with io.open(filename, encoding=encoding) as the_file:\n buf.append(the_file.read())\n return sep.join(buf)\n\n\nLONG_DESCRIPTION = read('README.md')\n\nsetup(\n name=NAME,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n description=DESCRIPTION,\n license='Apache License',\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n packages=['rosette'],\n install_requires=['requests'],\n platforms='any',\n url=HOMEPAGE,\n version=VERSION,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3.11',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ]\n)\n","repo_name":"rosette-api/python","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"71"} +{"seq_id":"21886751121","text":"\"\"\"\nCreated on Wed Nov 09 10:43:35 2018\n\n@author: lingquan\n\n Special thanks to Ian to prepared the data and modeling\n The formulation is from Shabbir Ahmed, Semiconductor Tool Planning Via Multistage Stochastic programming\n A wafer fab consisting of M tool types that process N types of wafers. Each product goes through K processing steps, each of which can be performed on one or more tool types.\n I: tool type\n J: wafer type\n K: processing step\n x_it: number of tool type i purchased in period t\n u_jt: the shortage of wafer type j in period t\n v_jkt: the allocation of processing step k of wafer type j to tool type i in period t\n w_jt: the production of wafer type j in period t\n a_i,j,k: time requied (in hrs) by processing step k on wafer type j on tool type i\n alpha_it: cost of tool type i in period t\n beta_jt: penalty cost of unit shortage in wafer type j in period t\n c_i: the capacity of tool type i\n d_jt: demand of wafer type j in period t\n X_it: accumulated number of tool type i purchased till period t\n\"\"\"\n\nimport numpy\nfrom msppy.solver import SDDP\nfrom msppy.msp import MSLP\nfrom msppy.evaluation import EvaluationTrue\nimport gurobipy\n\n# INPUT\nT = 4\nI = 10\nJ = 10\nK = 2\n\nalpha_0 = [686, 784, 540, 641, 1073, 1388, 1727, 1469, 586, 515]\nbeta_0 = [174, 115, 92, 116, 93, 164, 190, 174, 190, 200]\nc = [7,17,11,16,18,7,7,9,8,14]\nd_0 = [607,943,732,1279,434,378,1964,430, 410, 525]\n\na = numpy.random.randint(5,10,size = [I, J, K])\n\nd_perturb = [0.0422902245,\n 0.0549456137,\n 0.0868569685,\n 0.0950609064,\n 0.0538731273,\n 0.0917075818,\n 0.0673065114,\n 0.0594680277,\n 0.0544299191,\n 0.0782010312]\n\nbeta_perturb = [0.0129739644,\n 0.063853852,\n 0.0925580104,\n 0.0766634092,\n 0.0953244752,\n 0.0563760149,\n 0.075759652,\n 0.0583249427,\n 0.0324810132,\n 0.0694020021]\n\nalpha_perturb = [0.0638533975,\n 0.068050401,\n 0.0747693903,\n 0.0514849591,\n 0.0323470258,\n 0.0480910211,\n 0.0304004586,\n 0.0976094813,\n 0.0694752024,\n 0.0703992735,\n 0.0775236862]\n\n# d_jt #\ndef d_generator(random_state):\n return [round(numpy.dot(random_state.normal(1, d_perturb[j]), d_0[j])) for j in range(J)]\n# alpha_it #\ndef alpha_generator(random_state):\n return [round(numpy.dot(random_state.normal(1, alpha_perturb[i]), alpha_0[i])) for i in range(I)]\n# beta_jt #\ndef beta_generator(random_state):\n return [round(numpy.dot(random_state.normal(1, beta_perturb[j]), beta_0[j])) for j in range(J)]\nsemiconductor = MSLP(T=T, bound=0)\nfor t in range(T):\n m = semiconductor[t]\n # X_it #\n X_now, X_past = m.addStateVars(I, name='accumulated purchase')\n if t == 0:\n m.addConstrs(X_now[j] == 0 for j in range(J))\n else:\n # u_jt #\n u = m.addVars(J, name='shortage', uncertainty=beta_generator)\n # v_ijkt #\n v = m.addVars(I,J,K, name='allocation')\n # w_jt #\n w = m.addVars(J, name='production')\n # x_it #\n x = m.addVars(I, name='purchase', uncertainty=alpha_generator)\n ## accumulated number of purchased tools updated ##\n m.addConstrs(X_now[i] == X_past[i] + x[i] for i in range(I))\n # time allocation constraint\n m.addConstrs(gurobipy.quicksum( gurobipy.quicksum( a[i][j][k] * v[(i,j,k)] for k in range(K) ) for j in range(J)) <= c[i] * X_now[i] for i in range(I))\n # production allocation constraint\n m.addConstrs(gurobipy.quicksum( v[(i,j,k)] for i in range(I) ) >= w[j] for j in range(J) for k in range(K) )\n # demand constraint\n m.addConstrs( (w[j] + u[j] >= 0 for j in range(J)), uncertainty=d_generator )\nsemiconductor.discretize(n_samples=20)\nSDDP(semiconductor).solve(max_iterations=10)\nresult = EvaluationTrue(semiconductor)\nresult.run(n_simulations=1000)\n","repo_name":"lingquant/msppy","sub_path":"quick_start/semiconductor.py","file_name":"semiconductor.py","file_ext":"py","file_size_in_byte":4153,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"71"} +{"seq_id":"33781578820","text":"import numpy as np\nimport pickle\nimport cc3d\nfrom scipy.linalg import norm\nfrom sklearn.preprocessing import normalize\nimport os\nfrom collections import defaultdict\nfrom skimage.measure import regionprops\n\ndef main():\n print(\"Loading filtered cell-type map...\")\n with open(\"filtered_celltype_maps.pkl\", \"rb\") as f:\n filtered_celltype_maps = pickle.load(f)\n\n\n print(\"Finding surfaces...\")\n padded_ctmaps = np.pad(filtered_celltype_maps, 1, 'constant', constant_values=-1)\n surface_ctmap = np.array(padded_ctmaps, copy=True)\n ct_coords = np.where(padded_ctmaps > -1)\n for x, y, z in zip(*ct_coords):\n ct = padded_ctmaps[x, y, z]\n if padded_ctmaps[x+1, y, z] == ct and padded_ctmaps[x-1, y, z] == ct and \\\n padded_ctmaps[x, y+1, z] == ct and padded_ctmaps[x, y-1, z] == ct and \\\n padded_ctmaps[x, y, z+1] == ct and padded_ctmaps[x, y, z-1] == ct:\n surface_ctmap[x, y, z] = -1\n\n print(\"Finding connected objects...\")\n connectivity = 6 # only 26, 18, and 6 are allowed\n labels_out = cc3d.connected_components(filtered_celltype_maps + 1, connectivity=connectivity)\n labels_out_surface = np.pad(labels_out, 1, 'constant') * (surface_ctmap != -1)\n\n print(\"Extracting labels...\")\n obj_ct_dic = {}\n xyz_dict = defaultdict(lambda: [])\n for x in range(labels_out_surface.shape[0]):\n for y in range(labels_out_surface.shape[1]):\n for z in range(labels_out_surface.shape[2]):\n obj_id = labels_out_surface[x, y, z]\n if obj_id > 0:\n xyz_dict[obj_id].append([x, y, z])\n\n print(\"Calculating surface norms...\")\n try:\n os.mkdir(\"objects\")\n except FileExistsError:\n pass\n\n n_total_objects = np.max(labels_out_surface)\n for i in np.unique(labels_out_surface)[1:]:\n xyz = np.array(xyz_dict[i], dtype=float)\n if len(xyz) < 4:\n continue\n cell_type = padded_ctmaps[tuple(xyz[0].astype(int))]\n obj_ct_dic[i] = cell_type\n norm_vecs = np.zeros_like(xyz)\n for j in range(len(xyz)):\n xyz_p = np.delete(xyz, j, 0)\n xyz_diffs = xyz[j] - xyz_p\n forces = 1.0 / np.sum(xyz_diffs**2, axis=1)\n unit_vecs = xyz_diffs / norm(xyz_diffs, ord=2, axis=1).reshape([-1, 1])\n norm_vecs[j] = np.sum(unit_vecs * forces.reshape([-1, 1]), axis=0)\n with open(\"objects/object_%d_with_norm.xyz\"%i, \"w\") as f:\n for (x, y, z), (nx, ny, nz) in zip(xyz, norm_vecs):\n f.write(\"%f\\t%f\\t%f\\t%f\\t%f\\t%f\\n\"%(x, y, z, nx, ny, nz))\n if i % 100 == 0:\n print(\"Processed (%d/%d) objects...\"%(i,n_total_objects))\n\n print(\"Saving cell type dictionary...\")\n with open(\"obj_ct_dic.pkl\", \"wb\") as f:\n pickle.dump(obj_ct_dic, f)\n\nmain()\n","repo_name":"pjb7687/ssam_spacejam2","sub_path":"3d_reconstruction/1.generate_3d_points_and_norms.py","file_name":"1.generate_3d_points_and_norms.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"25991806792","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nModule that contains functions and classes related with Maya menus\n\"\"\"\n\nfrom __future__ import print_function, division, absolute_import\n\nimport os\nimport json\nimport logging\nfrom collections import OrderedDict\n\nimport maya.cmds\nimport maya.mel\n\nfrom tpDcc.abstract import menu as abstract_menu\n\nLOGGER = logging.getLogger('tpDcc-dccs-maya')\n\n\nclass MayaMenu(abstract_menu.AbstractMenu, object):\n def __init__(self, name='MayaMenu'):\n super(MayaMenu, self).__init__()\n\n self.name = name\n\n def create_menu(self, file_path=None, parent_menu=None):\n \"\"\"\n Creates a new DCC menu app\n If file path is not given the menu is created without items\n :param name: str, name for the menu\n :param file_path: str, path where JSON menu file is located\n :param parent_menu: str, Name of the menu to append this menu to\n :return: variant, nativeMenu || None\n \"\"\"\n\n if check_menu_exists(self.name):\n return\n\n menu_created = False\n\n if parent_menu:\n m = find_menu(parent_menu)\n if m:\n self._native_pointer = maya.cmds.menuItem(subMenu=True, parent=m, tearOff=True, label=self.name)\n menu_created = True\n\n s_menu = None\n if not menu_created:\n s_menu = maya.cmds.menu(parent=main_menu(), tearOff=True, label=self.name)\n\n if not file_path or not s_menu:\n return\n\n if not os.path.isfile(file_path):\n LOGGER.warning('Menu was not created because menu file is not valid or does not exists!')\n return\n\n with open(file_path, 'r') as f:\n menu_data = json.load(f, object_pairs_hook=OrderedDict)\n\n if menu_data:\n menu_categories = list(menu_data.keys())\n for category in menu_categories:\n self.create_category(category_name=category, category_items=menu_data[category], parent_menu=s_menu)\n\n @staticmethod\n def create_category(category_name, category_items, parent_menu):\n \"\"\"\n Creates a new category on the passed menu. If not menu specified this menu is used, if exists\n :param parent_menu: str, menu to add category to\n :param category_name: str, name of the category\n :param category_items: list, list of items to add to the category\n :return: variant, nativeMenu || None\n \"\"\"\n\n submenu = maya.cmds.menuItem(subMenu=True, tearOff=True, parent=parent_menu, label=category_name)\n for item in category_items:\n maya.cmds.menuItem(parent=submenu, label=item['label'], command=item['command'])\n\n\ndef main_menu():\n \"\"\"\n Returns Maya main menu\n \"\"\"\n\n return maya.mel.eval('$tmp=$gMainWindow')\n\n\ndef get_menus():\n \"\"\"\n Return a list with all Maya menus\n :return: list\n \"\"\"\n\n return maya.cmds.lsUI(menus=True)\n\n\ndef remove_menu(menu_name):\n \"\"\"\n Removes, if exists, a menu of Max\n :param menu_name: str, menu name\n \"\"\"\n\n for m in get_menus():\n lbl = maya.cmds.menu(m, query=True, label=True)\n if lbl == menu_name:\n maya.cmds.deleteUI(m, menu=True)\n\n\ndef check_menu_exists(menu_name):\n \"\"\"\n Returns True if a menu with the given name already exists\n :param menu_name: str, menu name\n :return: bol\n \"\"\"\n\n for m in get_menus():\n lbl = maya.cmds.menu(m, query=True, label=True)\n if lbl == menu_name:\n return True\n\n return False\n\n\ndef find_menu(menu_name):\n \"\"\"\n Returns Menu instance by the given name\n :param menu_name: str, menu of the name to search for\n :return: nativeMenu\n \"\"\"\n\n for m in get_menus():\n lbl = maya.cmds.menu(m, query=True, label=True)\n if lbl == menu_name:\n return m\n\n return None\n","repo_name":"OmniZ3D/tpDcc-dccs-maya","sub_path":"tpDcc/dccs/maya/core/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"19821853406","text":"from typing import Any, Dict, Type, TypeVar, Tuple, Optional, BinaryIO, TextIO, TYPE_CHECKING\n\nfrom typing import List\n\n\nimport attr\n\nfrom ..types import UNSET, Unset\n\n\n\n\n\n\n\nT = TypeVar(\"T\", bound=\"TokenResult\")\n\n\n@attr.s(auto_attribs=True)\nclass TokenResult:\n \"\"\" \n Attributes:\n auth_token (str): Token used as auth token in X-Coffee-Auth header\n refresh_token (str): Token used as refresh token in X-Coffee-Refresh-Auth header\n \"\"\"\n\n auth_token: str\n refresh_token: str\n additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)\n\n\n def to_dict(self) -> Dict[str, Any]:\n auth_token = self.auth_token\n refresh_token = self.refresh_token\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update({\n \"authToken\": auth_token,\n \"refreshToken\": refresh_token,\n })\n\n return field_dict\n\n\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n auth_token = d.pop(\"authToken\")\n\n refresh_token = d.pop(\"refreshToken\")\n\n token_result = cls(\n auth_token=auth_token,\n refresh_token=refresh_token,\n )\n\n token_result.additional_properties = d\n return token_result\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"Alejandro-Mirez/DataProxy","sub_path":"coffee-freaks-api-client/coffee_freaks_api_client/models/token_result.py","file_name":"token_result.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"3518766690","text":"import requests\n\nfrom lxml import etree\n\n'''\n用lxml来解析HTML代码\n加了职位收索和爬取页面深度\n'''\ndef getText(url):\n try:\n kv = {'user-agent':'Mozilla/5.0'}\n r = requests.get(url,headers=kv,timeout=30)\n r.raise_for_status()#状态不为200,引发异常\n r.encoding = r.apparent_encoding\n return r\n except:\n return ''\nc = 0\ns = input('输入想查询的职位:')\nstar_url = 'https://search.51job.com/list/000000,000000,0000,00,9,99,{},2,{}.html'\nfor i in range(1, eval(input('爬取深度:'))+1):\n url = star_url.format(s, i)\n r = getText(url)\n\n print(type(r.content))\n # 利用etree.HTML把字符串解析成HTML文档\n html = etree.HTML(r.content)\n print(type(html))\n rst = html.xpath(\"//div/p/span/a\")\n for i in rst:\n c = c + 1\n print(c, i.xpath(\"@href\")[0], i.text.replace(' ', ''))","repo_name":"javerthu/demo","sub_path":"new/Xpath example.py","file_name":"Xpath example.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"17948217298","text":"import os\nimport sys\nimport json\nimport random\nimport pandas as pd\nimport numpy as np\nfrom torch.utils import data\nimport torch\n\nclass ShillBiddingDataset(data.Dataset):\n def __init__(self, file_dir):\n\n # file_dir represents the path to the csv file\n super(ShillBiddingDataset, self).__init__()\n\n with open(file_dir, 'r') as f:\n# Record_ID,Auction_ID,Bidder_ID,Bidder_Tendency,Bidding_Ratio,Successive_Outbidding,Last_Bidding,Auction_Bids,Starting_Price_Average,Early_Bidding,Winning_Ratio,Auction_Duration,Class\n # 1,732,_***i,0.2,0.4,0,0.0000277778,0,0.993592814,0.0000277778,0.666666667,5,0\n # 2,732,g***r,0.024390244,0.2,0,0.0131226852,0,0.993592814,0.0131226852,0.944444444,5,0\n # 3,732,t***p,0.142857143,0.2,0,0.0030416667,0,0.993592814,0.0030416667,1,5,0\n # 4,732,7***n,0.1,0.2,0,0.0974768519,0,0.993592814,0.0974768519,1,5,0\n self.data = pd.read_csv(f)\n self.class_label = self.data['Class']\n self.bidder_tendency = self.data['Bidder_Tendency']\n self.bidding_ratio = self.data['Bidding_Ratio']\n self.successive_outbidding = self.data['Successive_Outbidding']\n self.last_bidding = self.data['Last_Bidding']\n self.auction_bids = self.data['Auction_Bids']\n self.starting_price_average = self.data['Starting_Price_Average']\n self.early_bidding = self.data['Early_Bidding']\n self.winning_ratio = self.data['Winning_Ratio']\n self.auction_duration = self.data['Auction_Duration']\n \n def __len__(self):\n return len(self.data)\n \n def __getitem__(self, index):\n features = torch.tensor([\n self.bidder_tendency[index],\n self.bidding_ratio[index],\n self.successive_outbidding[index],\n self.last_bidding[index],\n self.auction_bids[index],\n self.starting_price_average[index],\n self.early_bidding[index],\n self.winning_ratio[index],\n self.auction_duration[index]\n ], dtype=torch.float32)\n\n label = torch.tensor(self.class_label[index], dtype=torch.long)\n\n return features, label","repo_name":"Jzw-2001/MachineLearningCourseWork1","sub_path":"bidding/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"16930700591","text":"import logging\nimport configparser\n\n\n# function to pop out unwanted tags from JSON\ndef daily_cases(response_data):\n country_data = response_data['Countries']\n logging.info(f'API fetched {len(country_data)} countries data')\n for i in country_data:\n del i['Slug']\n del i['Premium']\n del i['ID']\n return country_data\n\n\n# function to return last updated date from API for comparison purpose\ndef date_compare(res_data):\n api_temp_date = res_data['Global']['Date']\n api_date = api_temp_date[0: api_temp_date.index(\"T\")]\n return api_date\n\n\n# function to get values from ini file\ndef data_config(section):\n config = configparser.ConfigParser()\n section_allowed = ['SQL_local', 'API', 'SQL_AWS']\n if query_check(section, section_allowed) is True:\n config.read(r\"D:\\code\\corona_cases\\connection.ini\")\n result = dict(config.items(section))\n return result\n else:\n return False\n\n\n# function to check if any input given matches with not_allowed list\ndef query_check(qry_input, not_allowed):\n qry_chk = any(item in qry_input for item in not_allowed)\n return qry_chk\n","repo_name":"manojt2501/corona_cases","sub_path":"data_master.py","file_name":"data_master.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"748728337","text":"## 아래와 같이 N개의 정수로 구성된 수열\n## M개의 쿼리 정보가 주어짐\n### 각 쿼리는 L과 R로 구성\n### [L, R] 구간에 해당하는 데이터들의 합을 구하시오.\n### [10, 20, 30, 40, 50]\n\nrand_list = [10, 20, 30, 40, 50]\n# query_dic_list = [(1, 3), (2, 4), (1, 4), (3, 5), (3, 4)]\nleft = 3\nright = 4\nsum_val = 0\nfor rand_val in range(left-1, right):\n sum_val += rand_list[rand_val]\nprint(sum_val)\n\n\n'''\nfor value in query_dic_list:\n sum_val = 0\n for rand_val in range(value[0] - 1, value[1]):\n sum_val += rand_list[rand_val]\n\n print(sum_val)\n'''\n\n\n\n## 풀이\nn = 5\ndata = [10, 20, 30, 40, 50]\n\nsummary = 0\nprefix_sum = [0]\nfor i in data:\n summary += i\n prefix_sum.append(summary)\n\nleft = 3\nright = 4\nprint(prefix_sum[right] - prefix_sum[left-1])\n","repo_name":"mingginew88/study-python","sub_path":"algorithm/study2.py","file_name":"study2.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"32184805485","text":"import os ,random,Wallets,Payments,Admin,Giveaway,Jackpot,threading,subprocess,SharedCode,datetime\nMainLocation=\"C:/Users/oscar/Desktop/DiscordGamblingBot/DiscordGamblingBot/\"\nos.chdir(MainLocation+\"discord\")\nimport discord,discord.ext\nos.chdir(\"../\")\nclient = discord.Client()\nWalletLocation = MainLocation+\"UserInfo/Wallets\"\nDepositAddresses = MainLocation+\"UserInfo/DepositAddresses\"\nAdminIDs = open(MainLocation+\"UserInfo/AdminIDs.bin\",\"r\").read()\n\nParticipants=[]\nDeposited=[]\nColour=[]\n\nimport time,asyncio\n\nasync def AddParticipant(message,client):\n DiscordId=message.author.id\n Deposit=round(float(message.content.split(\" \")[2]),3)\n CurGRLC=float(open(WalletLocation+\"/\"+DiscordId+\".bin\",\"r\").read())\n LColour=message.content.split(\" \")[3].lower()\n if LColour in \"redgreenblack\":\n if CurGRLC>=Deposit:\n #SharedCode.AdjustWallet(DiscordId,-Deposit)\n Participants.append(DiscordId)\n Deposited.append(Deposit)\n Colour.append(LColour)\n await client.send_message(message.channel,\"<@\"+DiscordId+\"> You deposited \"+str(Deposit)+\"GRLC into the roulete game!\")\n else:\n await client.send_message(message.channel,\"<@\"+DiscordId+\"> You dont have enough GRLC!\")\n else:\n await client.send_message(message.channel,\"<@\"+DiscordId+\"> You chose an invalid colour!\\nTry `?roulete join `\")\n\nasync def FinishGame(channel,client):\n global Participants,Deposited,Colour\n if datetime.datetime.now().minute%10==0 and len(Participants)>=2:\n WinningColourNum=random.randint(0,37)\n WinningColour=\"\"\n Multiplyer=2\n if WinningColourNum==0:\n WinningColour=\"green\"\n Multiplyer=4\n elif WinningColourNum%2==0:\n WinningColour=\"red\"\n elif WinningColourNum%2==1:\n WinningColour=\"black\"\n await client.send_message(channel,\"The Winning Colour Is: \"+WinningColour.upper()+\"!!!!!\")\n for i in range(0,len(Participants)):\n if Colour[i]==WinningColour:\n #SharedCode.AdjustWallet(Participants[i],Deposited[i]*Multiplyer)\n await client.send_message(channel,\"<@\"+Participants[i]+\"> You won \"+str(Deposited[i]*Multiplyer)+\"GRLC!\")\n else:\n await client.send_message(channel,\"<@\"+Participants[i]+\"> You lost! Better luck next time...\")\n Participants=[]\n Deposited=[]\n Colour=[]\n\nasync def IncreaseFunds(message,client):\n DiscordId=message.author.id\n Out=abs(round(float(message.content.split(\" \")[2]),3))\n CurGRLC = float( open(WalletLocation+\"/\"+DiscordId+\".bin\",\"r\").read())\n if DiscordId in Participants and CurGRLC>=Out:\n print(\"Yay\")\n for pos in range(0,len(Participants)):\n if Participants[pos]==DiscordId:\n Deposited[pos]+=Out\n await client.send_message(message.channel,\"<@\"+DiscordId+\"> You now have deposited: \"+str(Deposited[pos]))\n else:\n print(\"Nay\")\n await client.send_message(message.channel,\"<@\"+DiscordId+\"> You are yet to enter the jackpot or dont have enough GRLC!\\nType `?roulette join ` to participate.\")\n\nasync def Scheduler(client):\n while True:\n await asyncio.sleep(10)\n await FinishGame(client.get_channel(\"422829572212523009\"),client)\n","repo_name":"Jaminima/GRLC-ambles","sub_path":"DiscordGamblingBot/Roulette.py","file_name":"Roulette.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"72223841510","text":"\nimport random\nimport math\n\n\nDEFAULT_SIZE = 4\n\nclass level_layout_generator:\n def __init__(self, size=DEFAULT_SIZE, min_room_size = (5,5)):\n # dungeon is kept in a constant 16x9 aspect ratio, size controls the fidelity\n self.x_max = 16*size\n self.y_max = 9*size\n\n # minimum room is a 5x5 box (leaves a 3x3 floor space open,\n # always allows for a spawn or exit to be placed)\n self.x_room_min = min_room_size[0]\n self.y_room_min = min_room_size[1]\n\n # maximum room that can fit in the allocated grid space\n self.xRoomMax = int(self.x_max/3)\n self.yRoomMax = int(self.y_max/3)\n\n # maximum and minimum number of treasure that can be spawned in a room\n self.min_treasure_spawn = 0\n self.max_treasure_spawn = 3\n\n # max and min of enemies that can be spawned in a room\n self.min_enemy_spawn = 0\n self.max_enemy_spawn = 3\n\n # TILE KEYS - Used to avoid writing tiles types as numbers\n self.EMPTY_TILE = 0\n self.CORNER_WALL_TILE = 1\n self.H_WALL_TILE = 2\n self.V_WALL_TILE = 3\n self.FLOOR_TILE = 4\n self.HALLWAY = 5\n self.DOOR = 6\n self.EXIT = 7\n self.SPAWN = 8\n\n self.ENEMY = 20\n self.TREASURE = 100\n\n self.PLAYER = 69\n\n def generate_layout(self, debug=False):\n \"\"\"\n Generates a random dungeon level layout.\n Set debug to TRUE if you want the system to print\n the map at various stages of development\n \"\"\"\n map_data, room_bounds = self.create_room_layout(debug)\n populated_map = self.add_features(map_data, room_bounds, debug)\n\n return populated_map\n\n def create_room_layout(self, debug=False):\n \"\"\"\n Creates a matrix of tile IDs representing a 3x3 collection\n of rooms and the hallways connecting them.\n \"\"\"\n # algorithm starts by dividng given space into a 3x3 grid,\n # with non-uniform heights and widths for the rows and columns\n # one room will be placed in each grid space (for a total of 9 rooms)\n\n X = [ #calculate x values for the edges of the 3x3 room grid\n 0,\n self.x_max/3 + random.randint(-self.x_max/16, self.x_max/16),\n self.x_max*2/3 + random.randint(-self.x_max/16, self.x_max/16),\n self.x_max\n ]\n Y = [ #calculate y values for the edges of the 3x3 room grid\n 0,\n self.y_max/3 + random.randint(-self.y_max/9, self.y_max/9),\n self.y_max*2/3 + random.randint(-self.y_max/9, self.y_max/9),\n self.y_max\n ]\n\n # initialize room matrix. each [x][y] index stores a tuple containing the\n # coordinates of the 4 corners of room [x][y] in the grid space index\n room_bounds = [[None for i in range(3)] for j in range(3)]\n\n for i in range(3):\n for j in range(3):\n # looping through each grid space in the room array\n\n # generate a random point within the grid space to place the top left corner of the room\n # point is biased to be placed closer to the top-left of the grid (to reduce)\n p0 = (int(self.bias_small_rand(X[j], X[j+1]-self.x_room_min-1)),\n int(self.bias_small_rand(Y[i], Y[i+1]-self.y_room_min-1)))\n\n # generate a random room length and width that will keep the room within the gridspace\n # dimensions of room are biased to be smaller to keep the rooms well spaceout\n d = (int(self.bias_small_rand(self.x_room_min, min(self.xRoomMax, X[j+1] - p0[0]))),\n int(self.bias_small_rand(self.y_room_min, min(self.yRoomMax, Y[i+1] - p0[1]))))\n room_bounds[j][i] = ( # create room tuple containing coordinates for the 4 corners ordered as:\n (p0[0], p0[1]), # 0 - - - 1\n (p0[0] + d[0], p0[1]), # | |\n (p0[0], p0[1] + d[1]), # | |\n (p0[0] + d[0], p0[1] + d[1]) # 2 - - - 3\n )\n\n tile_data = [[0 for i in range(self.y_max)] for j in range(self.x_max)]\n # initializes a (x_max, y_max) array to store the tile type at a specified coordinate\n # place floor, wall, and corner tiles in the grid\n for i in range(3):\n for j in range(3):\n # looping through each grid space in the room array\n for a in range(room_bounds[j][i][0][0], room_bounds[j][i][1][0]+1):\n for b in range(room_bounds[j][i][0][1], room_bounds[j][i][2][1]+1):\n if (a == room_bounds[j][i][0][0] or a == room_bounds[j][i][1][0]):\n # point (a, b) is on the left or right wall of the room\n tile_data[a][b] = self.V_WALL_TILE\n elif (b == room_bounds[j][i][0][1] or b == room_bounds[j][i][2][1]):\n # point (a, b) is on the top or bottom wall of the room\n tile_data[a][b] = self.H_WALL_TILE\n else:\n # point (a, b) is contained within the room\n tile_data[a][b] = self.FLOOR_TILE\n\n for p in range(0, 4):\n # sets the coordinates of the room's 4 corners to corner tiles\n tile_data[room_bounds[j][i][p][0]] [room_bounds[j][i][p][1]] = self.CORNER_WALL_TILE\n\n if debug:\n self.debug(tile_data, \"Room Placement\")\n\n # now hallways connecting the rooms must be generated\n for i in range(0, 3):\n for j in range(0, 2):\n # looping through each vertical edge in the grid space\n # looking at space between room1 (j,i) and room2 (j+1, i)\n\n r1 = room_bounds[j][i]\n r2 = room_bounds[j+1][i]\n\n # generate random co-ordinates for the doors\n d1 = (r1[1][0], random.randint(r1[1][1] + 1, r1[3][1] - 1))\n d2 = (r2[0][0], random.randint(r2[0][1] + 1, r2[2][1] - 1))\n\n # now generate a hallway connecting these two doors\n hallway = self.generate_hallway(d1[0]+1, d1[1], d2[0]-1, d2[1])\n\n # we now add the doors to the tile_data\n tile_data[d1[0]][d1[1]] = self.DOOR\n tile_data[d2[0]][d2[1]] = self.DOOR\n\n for p in hallway: # loop through hallway list adding hallway tiles...\n tile_data[p[0]][p[1]] = self.HALLWAY\n\n # and a quick wall system around them\n if tile_data[p[0]-1][p[1]-1] == self.EMPTY_TILE: tile_data[p[0]-1][p[1]-1] = self.CORNER_WALL_TILE\n if tile_data[p[0]-1][p[1]] == self.EMPTY_TILE: tile_data[p[0]-1][p[1]] = self.CORNER_WALL_TILE\n if tile_data[p[0]-1][p[1]+1] == self.EMPTY_TILE: tile_data[p[0]-1][p[1]+1] = self.CORNER_WALL_TILE\n\n if tile_data[p[0]][p[1]-1] == self.EMPTY_TILE: tile_data[p[0]][p[1]-1] = self.CORNER_WALL_TILE\n if tile_data[p[0]][p[1]] == self.EMPTY_TILE: tile_data[p[0]][p[1]] = self.CORNER_WALL_TILE\n if tile_data[p[0]][p[1]+1] == self.EMPTY_TILE: tile_data[p[0]][p[1]+1] = self.CORNER_WALL_TILE\n\n if tile_data[p[0]+1][p[1]-1] == self.EMPTY_TILE: tile_data[p[0]+1][p[1]-1] = self.CORNER_WALL_TILE\n if tile_data[p[0]+1][p[1]] == self.EMPTY_TILE: tile_data[p[0]+1][p[1]] = self.CORNER_WALL_TILE\n if tile_data[p[0]+1][p[1]+1] == self.EMPTY_TILE: tile_data[p[0]+1][p[1]+1] = self.CORNER_WALL_TILE\n \n\n if (debug):\n #debugs a sub-section of tile_data that contains the generated hallway\n bounds = (\n d1[0]-1,\n d2[0]+2,\n min(d1[1], d2[1])-1,\n max(d1[1], d2[1])+2)\n\n slice = [tile_data[i][bounds[2]:bounds[3]] for i in range(bounds[0],bounds[1])]\n self.debug(slice, (\"Hallway b/w: (\" + str(j) + \",\" + str(i) + \") & (\"+ str(j+1) + \",\" + str(i) + \")\"))\n\n # repeat previous process for horizontal edges\n for i in range(0, 2):\n for j in range(0, 3):\n # looping through each horizontal edge in the grid space\n # looking at space between room (j,i) and (j, i+1)\n\n r1 = room_bounds[j][i]\n r2 = room_bounds[j][i+1]\n\n # generate random co-ordinates for the doors\n d1 = (random.randint(r1[2][0] + 1, r1[3][0] - 1), r1[2][1])\n d2 = (random.randint(r2[0][0] + 1, r2[1][0] - 1), r2[0][1])\n\n # now we generate a hallway\n hallway = self.generate_hallway(d1[0], d1[1]+1, d2[0], d2[1]-1)\n\n # we now add doors to the tile_data\n tile_data[d1[0]][d1[1]] = self.DOOR\n tile_data[d2[0]][d2[1]] = self.DOOR\n\n for p in hallway: # loop through hallway list adding points\n tile_data[p[0]][p[1]] = self.HALLWAY\n\n # and a quick wall system around them\n if tile_data[p[0]-1][p[1]-1] == self.EMPTY_TILE: tile_data[p[0]-1][p[1]-1] = self.CORNER_WALL_TILE\n if tile_data[p[0]-1][p[1]] == self.EMPTY_TILE: tile_data[p[0]-1][p[1]] = self.CORNER_WALL_TILE\n if tile_data[p[0]-1][p[1]+1] == self.EMPTY_TILE: tile_data[p[0]-1][p[1]+1] = self.CORNER_WALL_TILE\n\n if tile_data[p[0]][p[1]-1] == self.EMPTY_TILE: tile_data[p[0]][p[1]-1] = self.CORNER_WALL_TILE\n if tile_data[p[0]][p[1]] == self.EMPTY_TILE: tile_data[p[0]][p[1]] = self.CORNER_WALL_TILE\n if tile_data[p[0]][p[1]+1] == self.EMPTY_TILE: tile_data[p[0]][p[1]+1] = self.CORNER_WALL_TILE\n\n if tile_data[p[0]+1][p[1]-1] == self.EMPTY_TILE: tile_data[p[0]+1][p[1]-1] = self.CORNER_WALL_TILE\n if tile_data[p[0]+1][p[1]] == self.EMPTY_TILE: tile_data[p[0]+1][p[1]] = self.CORNER_WALL_TILE\n if tile_data[p[0]+1][p[1]+1] == self.EMPTY_TILE: tile_data[p[0]+1][p[1]+1] = self.CORNER_WALL_TILE\n\n if (debug):\n #debugs a sub-section of tile_data that contains the generated hallway\n bounds = (\n min(d1[0], d2[0])-1,\n max(d1[0], d2[0])+2,\n d1[1]-1,\n d2[1]+2)\n\n slice = [tile_data[i][bounds[2]:bounds[3]] for i in range(bounds[0],bounds[1])]\n self.debug(slice, (\"Hallway b/w: (\" + str(j) + \",\" + str(i) + \") & (\"+ str(j+1) + \",\" + str(i) + \")\"))\n\n if debug:\n self.debug(tile_data, \"Hallway Placement\")\n\n return (tile_data, room_bounds)\n\n def generate_hallway(self, start_x, start_y, end_x, end_y):\n \"\"\"\n Generates a hallway connecting the two given points.\n The hallway is represented as a list of coordinates\n the hallway passes through\n \"\"\"\n # create a 2-dim element storing the current coordinate\n # (initialize it as the first coordinate of the hallway)\n # add that point to the hallway list\n pointer = [start_x, start_y]\n hallway = [tuple(pointer)]\n # find the distance needed to get from the start point to the end point\n d = [end_x - start_x, end_y - start_y]\n\n #\n # -1:left, 1:right -1:up, 1:down\n dir = [int(math.copysign(1 , d[0])), int(math.copysign(1 , d[1]))]\n # verify if the hallway is straight or not (copysign(0) does not return 0)\n if d[0] == 0:\n dir[0] = 0\n if d[1] == 0:\n dir[1] = 0\n\n # iterate the pointer towards the final position, adding each point\n # it passes through to the hallway list\n counter = 0\n while(pointer[0] != end_x or pointer[1] != end_y):\n r = random.randint(0, 1) # pick a random direction to move\n pointer[r] = pointer[r] + dir[r] # update pointer poitiion and distance to end point\n d[r] = d[r] - dir[r]\n hallway.append(tuple(pointer))\n\n if d[r] == 0:\n # if the pointer is now parallel to the door, our direction will no longer work.\n # draw a straight line to the last point\n nr = (r+1)%2\n while d[nr] != 0:\n pointer[nr] = pointer[nr] + dir[nr]\n d[nr] = d[nr] - dir[nr]\n hallway.append(tuple(pointer))\n\n counter += 1 #if the hallway is taking too long to generate, break loop (should never trigger)\n if counter > 10000:\n print(TimeoutError, \": could not generate hallway\")\n break\n\n return hallway\n\n def add_features(self, tile_data, room_bounds, debug=False):\n \"\"\"\n Adds features to the given tile_map, including enemy spawns,\n trasure spawns, and the level exit and start points.\n \"\"\"\n\n for i in range(3):\n for j in range(3):\n # looping through each grid space in the room array\n\n if i != 1 and j != 1: # skips the spawn room\n # we don't want to place treasures here!\n\n # generate a random number of treasure spawns in the room (biased to have less loot)\n t_max = int(self.bias_small_rand(self.min_treasure_spawn, self.max_treasure_spawn))\n if t_max > 0:\n for t in range(t_max): # for each spawn, generate a random point for the treasure\n item_coor = (random.randint(room_bounds[j][i][0][0]+1, room_bounds[j][i][1][0]-1),\n random.randint(room_bounds[j][i][0][1]+1, room_bounds[j][i][2][1]-1))\n # add that treasure point to the tile data\n tile_data[item_coor[0]][item_coor[1]] = self.TREASURE\n\n # generate a random number of enemies in the room\n # (by flipping the min and the max in the bias_small_rand function,\n # code biases room to have more enemies)\n e_max = int(self.bias_small_rand(self.max_enemy_spawn, self.min_enemy_spawn))\n if e_max > 0:\n for e in range(e_max): # for each of these enemies, place them randomly in the room\n # *NOTE* these could overlap with treasure tiles, but this will just result in less loot for the player\n enemy_coor = (random.randint(room_bounds[j][i][0][0]+1, room_bounds[j][i][1][0]-1),\n random.randint(room_bounds[j][i][0][1]+1, room_bounds[j][i][2][1]-1))\n tile_data[enemy_coor[0]][enemy_coor[1]] = self.ENEMY\n\n # finally add player spawn and level exit\n # player spawn is randomly placed in the center room\n spawn_coor = (random.randint(room_bounds[1][1][0][0]+2, room_bounds[1][1][1][0]-2),\n random.randint(room_bounds[1][1][0][1]+2, room_bounds[1][1][2][1]-2))\n tile_data[spawn_coor[0]][spawn_coor[1]] = self.SPAWN\n\n # the exit is randomly placed in any room\n # *NOTE* that both are placed at least one space from the wall to avoid blocking any doors\n exit_room = room_bounds[random.randint(0,2)][random.randint(0,2)]\n exit_coor = (random.randint(exit_room[0][0]+2, exit_room[1][0]-2),\n random.randint(exit_room[0][1]+2, exit_room[2][1]-2))\n tile_data[exit_coor[0]][exit_coor[1]] = self.EXIT\n\n if debug:\n self.debug(tile_data, \"Final Product\")\n\n return tile_data\n\n\n\n # Helper Functions\n\n def bias_small_rand(self, min, max):\n \"\"\"\n Generates a random value between the given minimum and maximum.\n Distribution is biased to return values closer to the minimum\n \"\"\"\n r = (random.random() * random.random() + random.random())/2\n return min + (max - min)*r\n\n def debug(self, arr, title=None):\n \"\"\"\n Creates an ASCII preview of the given tile matrix\n Possible characters to print with:\n ☺☻♥♦♣♠♫☼►◄↕‼¶§▬↨↑↓→∟↔▲▼\n 123456789:;<=>?@\n ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`\n abcdefghijklmnopqrstuvwxyz{|}~⌂\n \"\"\"\n\n if (title != None):\n print(\"Debugging Step:\", title)\n\n X = len(arr)\n Y = len(arr[0])\n print(X, \"x\", Y)\n\n print(\"~ \"*(X+2))\n for i in range(Y):\n print(\"[\", end=\" \")\n for j in range(X):\n tile = arr[j][i]\n # check for map tiles\n if tile == self.EMPTY_TILE:\n print(\" \", end=\" \")\n elif tile == self.CORNER_WALL_TILE:\n print(\"x\", end=\" \")\n elif tile == self.H_WALL_TILE:\n print(\"—\", end=\" \")\n elif tile == self.V_WALL_TILE:\n print(\"|\", end=\" \")\n elif tile == self.FLOOR_TILE:\n print(\".\", end=\" \")\n elif tile == self.HALLWAY:\n print(\"#\", end=\" \")\n elif tile == self.DOOR:\n print(\"D\", end=\" \")\n elif tile == self.SPAWN:\n print(\"S\", end=\" \")\n elif tile == self.EXIT:\n print(\"⌂\", end=\" \")\n elif tile == self.PLAYER:\n print(\"\\x0c\", end=\" \")\n elif tile == self.ENEMY:\n print(\"E\", end=\" \")\n elif tile == self.TREASURE:\n print(\"T\", end=\" \")\n else: # displays a tile without a specified symbol as (‼)\n print(\"\\x13\", end=\" \")\n\n print(\"]\")\n print(\"~ \"*(X+2))\n\n# uncomment for testing\n\"\"\"\nl = level_layout_generator(6)\nl.generate_layout(True)\n\"\"\"","repo_name":"DanDanCool/Ne111","sub_path":"src/level_generation.py","file_name":"level_generation.py","file_ext":"py","file_size_in_byte":18281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"1531101571","text":"import pandas as pd\n#import fasttext\nimport csv\nfrom utilz import preprocess_text\n\n\ndef convert_labels(val):\n val = val.strip(' ')\n return '__label__' + val\n\n\n\ndef prepare_dataset(path, new_path):\n train = pd.read_csv(path)\n# print(train.head())\n set_type = path.split('/')[-1].split('.csv')[0]\n train['label'] = train['class'].map(convert_labels)\n# print(train.head())\n print(f'label distribution for {set_type} set')\n print(train.label.value_counts())\n print(f'number of samples in {set_type} set')\n print(len(train))\n train['text'] = train['unprocessed_text'].replace('\\n',' ', regex=True).replace('\\t',' ', regex=True)\n train['text'] = train['text'].map(preprocess_text)\n train_df_fasttext = train[['label', 'text']]\n print(train_df_fasttext.head())\n print(f'writing out df to {new_path}')\n train_df_fasttext.to_csv(f'{new_path}', index=False, sep=' ', header=False, quoting=csv.QUOTE_NONE, quotechar=\"\", escapechar=\" \")\n return train_df_fasttext\n\n\n\ntrain_df = pd.read_csv('../data-orig/train.csv')\nprint(train_df.head())\nprint(train_df.columns)\nprint(train_df['class'].map(convert_labels))\ntrain_df_fasttext = prepare_dataset(path='../data-orig/train.csv', new_path='../data-fasttext/task4.train')\nval_df_fasttext = prepare_dataset(path='../data-orig/validation.csv', new_path='../data-fasttext/task4.val')\n#print(help(fasttext))\n# model = fasttext.train_supervised(input=\"../data-fasttext/task4.train\")\n# model.save_model(\"task4baseline.bin\")\n# tweet_example = val_df_fasttext.text[0]\n# print(model.predict(tweet_example))\n\n\n# why is sourcing data important\n# what is the baseline model\n# embeddings as input\n","repo_name":"izzykayu/RxSpace","sub_path":"preproc/prepare_for_fasttext.py","file_name":"prepare_for_fasttext.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"5036669033","text":"import numpy as np\nimport cv2\n\n\ndef nothing(x):\n pass\n\n\ncv2.namedWindow(\"Trackbar_Window\")\n\n# trackbar for gaussian blur\ncv2.createTrackbar(\"Gaussian1\", \"Trackbar_Window\", 1, 99, nothing)\ncv2.createTrackbar(\"Gaussian2\", \"Trackbar_Window\", 1, 99, nothing)\n\n# trackbar for canny\ncv2.createTrackbar(\"Canny1\", \"Trackbar_Window\", 0, 255, nothing)\ncv2.createTrackbar(\"Canny2\", \"Trackbar_Window\", 0, 255, nothing)\n\n# trackbar for dilate\ncv2.createTrackbar(\"Dilate1\", \"Trackbar_Window\", 1, 99, nothing)\ncv2.createTrackbar(\"Dilate2\", \"Trackbar_Window\", 1, 99, nothing)\n\n\nwhile True:\n img = cv2.imread(\"Pictures/piece05.png\")\n img_original = img.copy()\n\n # getting trackbar values for gaussian blur, canny and dilation\n gauss1 = cv2.getTrackbarPos(\"Gaussian1\", \"Trackbar_Window\")\n gauss2 = cv2.getTrackbarPos(\"Gaussian2\", \"Trackbar_Window\")\n thresh1 = cv2.getTrackbarPos(\"Canny1\", \"Trackbar_Window\")\n thresh2 = cv2.getTrackbarPos(\"Canny2\", \"Trackbar_Window\")\n dilate1 = cv2.getTrackbarPos(\"Dilate1\", \"Trackbar_Window\")\n dilate2 = cv2.getTrackbarPos(\"Dilate2\", \"Trackbar_Window\")\n\n # adjusting gaussian blur value\n if gauss1 % 2 == 0:\n gauss1 += 1\n if gauss2 % 2 == 0:\n gauss2 += 1\n\n # performing gray scale, gaussian blur, canny and dilation operations\n gray = cv2.cvtColor(img_original, cv2.COLOR_BGR2GRAY)\n gauss_blurred = cv2.GaussianBlur(gray, (gauss1, gauss2), 0)\n canny = cv2.Canny(gauss_blurred, thresh1, thresh2, 3)\n dilated = cv2.dilate(canny, (dilate1, dilate2), iterations=2)\n\n # finding contours\n contours, hierarchy = cv2.findContours(\n dilated.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE\n )\n\n # modifying the image shapes to stack together\n gray = np.stack((gray,) * 3, axis=-1)\n gauss_blurred = np.stack((gauss_blurred,) * 3, axis=-1)\n canny = np.stack((canny,) * 3, axis=-1)\n dilated = np.stack((dilated,) * 3, axis=-1)\n\n images = [gray, gauss_blurred, canny, dilated]\n win_names = [\"gray\", \"blurred\", \"edged\", \"dilated\"]\n\n # stacking window horizontally\n img_stack = np.hstack(images)\n img_stack = cv2.resize(img_stack, (900, 300))\n\n cv2.imshow(\"Trackbar_Window\", img_stack)\n\n print(\"Objects in the image:\", len(contours))\n\n k = cv2.waitKey(1)\n if k == ord(\"q\"):\n break\n\ncv2.destroyAllWindows()\n\n\"\"\"\n Objects in the image: 145\n Objects in the image: 145\n Objects in the image: 145\n Objects in the image: 70\n Objects in the image: 21252\n Objects in the image: 101\n Objects in the image: 121\n\"\"\"\n","repo_name":"sladersh/OpenCV-Fundamentals-HV","sub_path":"StudentShellScripts/Exercise_5.py","file_name":"Exercise_5.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"86284836336","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nfrom abc import abstractmethod\nfrom astropy import units\nfrom astropy.io import fits\nimport numpy as np\nimport re\n\nfrom sofia_redux.scan.scan.scan import Scan\nfrom sofia_redux.scan.coordinate_systems.offset_2d import Offset2D\nfrom sofia_redux.scan.coordinate_systems.coordinate_2d import Coordinate2D\nfrom sofia_redux.scan.utilities.utils import to_header_float\n\n__all__ = ['SofiaScan']\n\n\nclass SofiaScan(Scan):\n\n DEFAULT_FITS_DATE = \"1970-01-01T00:00:00.0\"\n\n def __init__(self, channels, reduction=None):\n \"\"\"\n Initialize a SOFIA scan.\n\n The SOFIA scan is an abstract class general to all SOFIA instruments.\n\n Parameters\n ----------\n channels : sofia_redux.scan.custom.sofia.channels.camera.SofiaCamera\n The instrument channels for the scan.\n reduction : sofia_redux.scan.reduction.reduction.Reduction, optional\n The reduction to which this scan belongs.\n \"\"\"\n self.hdul = None\n self.header_extension = 0\n self.header = None\n self.history = None\n super().__init__(channels, reduction=reduction)\n\n def copy(self):\n \"\"\"\n Return a copy of the scan.\n\n Returns\n -------\n SofiaScan\n \"\"\"\n return super().copy()\n\n @property\n def referenced_attributes(self):\n \"\"\"\n Return the names of attributes that are referenced during a copy.\n\n Returns\n -------\n attribute_names : set (str)\n \"\"\"\n attributes = super().referenced_attributes\n attributes.add('hdul')\n attributes.add('header')\n return attributes\n\n @property\n def info(self):\n \"\"\"\n Return the information object for the scan.\n\n The information object contains the reduction configuration and various\n parameters pertaining the this scan.\n\n Returns\n -------\n SofiaInfo\n \"\"\"\n return super().info\n\n @property\n def astrometry(self):\n \"\"\"\n Return the scan astrometry information.\n\n Returns\n -------\n info : SofiaAstrometryInfo\n \"\"\"\n return super().astrometry\n\n @staticmethod\n def get_lowest_quality(scans):\n \"\"\"\n Return the lowest quality processing stats from a set of scans.\n\n Parameters\n ----------\n scans : list (SofiaScan)\n A list of scans.\n\n Returns\n -------\n QualityFlags.QualityFlagTypes\n The lowest quality flag type.\n \"\"\"\n lowest_quality = np.inf\n lowest_scan = None\n for scan in scans:\n if isinstance(scan, SofiaScan):\n if scan.info.processing.quality_level.value < lowest_quality:\n lowest_quality = scan.info.processing.quality_level.value\n lowest_scan = scan\n\n if lowest_scan is not None:\n return lowest_scan.info.processing.flagspace.convert_flag(\n lowest_quality)\n else:\n return None\n\n @staticmethod\n def get_total_exposure_time(scans):\n \"\"\"\n Return the total exposure time in a set of scans.\n\n Parameters\n ----------\n scans : list (SofiaScan)\n A list of scans.\n\n Returns\n -------\n exposure_time : units.Quantity\n The total exposure time from all scans in seconds.\n \"\"\"\n exposure_time = 0.0 * units.Unit('s')\n if scans is None:\n return exposure_time\n for scan in scans:\n if isinstance(scan, SofiaScan):\n exposure_time += scan.info.instrument.exposure_time\n return exposure_time\n\n @staticmethod\n def has_tracking_error(scans):\n \"\"\"\n Report whether any scan in a set contains a telescope tracking error.\n\n Parameters\n ----------\n scans : list (SofiaScan)\n A list of scans.\n\n Returns\n -------\n tracking_error : bool\n `True` if any scan contains a telescope tracking error. `False`\n otherwise.\n \"\"\"\n if scans is None:\n return False\n for scan in scans:\n if isinstance(scan, SofiaScan):\n if scan.info.telescope.has_tracking_error:\n return True\n else:\n return False\n\n @staticmethod\n def get_earliest_scan(scans):\n \"\"\"\n Return the earliest scan in a list of scans determined by MJD.\n\n Parameters\n ----------\n scans : list (Scan)\n\n Returns\n -------\n Scan\n \"\"\"\n return SofiaScan.time_order_scans(scans)[0]\n\n @staticmethod\n def get_latest_scan(scans):\n \"\"\"\n Return the latest scan in a list of scans determined by MJD.\n\n Parameters\n ----------\n scans : list (Scan)\n\n Returns\n -------\n Scan\n \"\"\"\n return SofiaScan.time_order_scans(scans)[-1]\n\n def read(self, filename, read_fully=True):\n \"\"\"\n Read a filename to populate the scan.\n\n The read should validate the channels before instantiating integrations\n for reading.\n\n Parameters\n ----------\n filename : str\n The name of the file to read.\n read_fully : bool, optional\n If `True`, perform a full read (default)\n\n Returns\n -------\n None\n \"\"\"\n if isinstance(filename, fits.HDUList):\n self.hdul = filename\n else:\n self.hdul = fits.open(filename)\n self.read_hdul(self.hdul, read_fully=read_fully)\n self.close_fits()\n\n def close_fits(self):\n \"\"\"\n Close the scan FITS file.\n\n Returns\n -------\n None\n \"\"\"\n if self.hdul is None:\n return\n self.hdul.close()\n self.hdul = None\n\n def read_hdul(self, hdul, read_fully=True):\n \"\"\"\n Read an open FITS HDUL.\n\n Parameters\n ----------\n hdul : fits.HDUList\n The FITS HDU list to read.\n read_fully : bool, optional\n If `True` (default), fully read the file.\n\n Returns\n -------\n None\n \"\"\"\n self.info.parse_header(hdul[0].header.copy())\n self.channels.read_data(hdul)\n self.channels.validate_scan(self)\n self.integrations = []\n self.add_integrations_from_hdul(self.hdul)\n if self.size == 0:\n return\n self.info.sampling_interval = self[0].info.sampling_interval.copy()\n self.info.integration_time = self[0].info.integration_time.copy()\n\n def is_aor_valid(self):\n \"\"\"\n Checks whether the observation AOR ID is valid.\n\n Returns\n -------\n valid : bool\n \"\"\"\n return self.info.observation.is_aor_valid()\n\n def is_coordinate_valid(self, coordinate):\n \"\"\"\n Checks whether coordinates are valid.\n\n Parameters\n ----------\n coordinate : Coordinate2D\n\n Returns\n -------\n valid : bool\n \"\"\"\n return self.astrometry.coordinate_valid(coordinate)\n\n def is_requested_valid(self, header):\n \"\"\"\n Check if the requested coordinates are valid.\n\n Parameters\n ----------\n header : fits.Header\n\n Returns\n -------\n valid : bool\n \"\"\"\n return self.astrometry.is_requested_valid(header)\n\n def guess_reference_coordinates(self, header=None):\n \"\"\"\n Guess the reference coordinates of the scan from the header.\n\n Parameters\n ----------\n header : astropy.io.fits.Header, optional\n The header from which to guess the coordinates. If not supplied,\n is read from the stored configuration.\n\n Returns\n -------\n coordinates : EquatorialCoordinates\n \"\"\"\n return self.astrometry.guess_reference_coordinates(\n telescope=self.info.telescope, header=header)\n\n def edit_scan_header(self, header):\n \"\"\"\n Edit scan FITS header information.\n\n Parameters\n ----------\n header : astropy.io.fits.header.Header\n The header to edit.\n\n Returns\n -------\n None\n \"\"\"\n line = \" ----------------------------------------------------\"\n super().edit_scan_header(header)\n header['COMMENT'] = line\n header['COMMENT'] = \" Section for preserved SOFIA header data\"\n header['COMMENT'] = line\n\n if self.astrometry.file_date is not None:\n header['DATE'] = (self.astrometry.file_date,\n 'Scan file creation date.')\n if self.info.origin.checksum is not None:\n header['DATASUM'] = (self.info.origin.checksum,\n 'Data file checksum.')\n if self.info.origin.checksum_version is not None:\n header['CHECKVER'] = (self.info.origin.checksum_version,\n 'Checksum method version.')\n\n self.info.edit_header(header)\n\n header['COMMENT'] = line\n header['COMMENT'] = \" Section for scan-specific processing history\"\n header['COMMENT'] = line\n\n self.info.add_history(header, scans=None)\n\n def validate(self):\n \"\"\"\n Validate the scan after a read.\n\n Returns\n -------\n None\n \"\"\"\n if not self.configuration.get_bool('lab'):\n self.astrometry.validate_astrometry(self)\n super().validate()\n\n def get_telescope_vpa(self):\n \"\"\"\n Return the telescope VPA.\n\n The value represents the midpoint of the first and last frames of the\n first and last integrations respectively.\n\n Returns\n -------\n angle : astropy.units.Quantity\n \"\"\"\n return self.frame_midpoint_value('telescope_vpa')\n\n def get_instrument_vpa(self):\n \"\"\"\n Return the instrument VPA.\n\n The value represents the midpoint of the first and last frames of the\n first and last integrations respectively.\n\n Returns\n -------\n angle : astropy.units.Quantity\n \"\"\"\n return self.frame_midpoint_value('instrument_vpa')\n\n def get_id(self):\n \"\"\"\n Return the scan ID.\n\n Returns\n -------\n str\n \"\"\"\n obs_id = self.info.observation.obs_id\n if obs_id is None:\n return f'{self.info.astrometry.date}.UNKNOWN'\n elif obs_id.lower().startswith('unknown'):\n return f'{self.info.astrometry.date}.{obs_id[7:]}'\n else:\n return obs_id\n\n def get_pointing_data(self):\n \"\"\"\n Return pointing data information.\n\n Returns\n -------\n data : dict\n \"\"\"\n data = super().get_pointing_data()\n relative = self.get_native_pointing(self.pointing)\n si_offset = self.get_si_pixel_offset(relative)\n data['dSIBSX'] = si_offset.x\n data['dSIBSY'] = si_offset.y\n return data\n\n def get_flight_number(self):\n \"\"\"\n Return the flight number for the scan.\n\n Returns\n -------\n flight : int\n Returns the flight number or -1 if not found.\n \"\"\"\n mission_id = self.info.mission.mission_id\n if mission_id is None:\n return -1\n flight = re.search(r'_F(\\d+)', mission_id)\n if flight is None:\n return -1\n return int(flight.groups()[0])\n\n def get_scan_number(self):\n \"\"\"\n Return the scan number.\n\n Returns\n -------\n scan_number : int\n The scan number if found and -1 otherwise.\n \"\"\"\n if not isinstance(self.info.observation.obs_id, str):\n return -1\n scan_number = re.search(r'-(\\d+)', self.info.observation.obs_id)\n if scan_number is None:\n return -1\n return int(scan_number.groups()[-1])\n\n def get_table_entry(self, name):\n \"\"\"\n Return a parameter value for the given name.\n\n Parameters\n ----------\n name : str\n The name of the parameter to retrieve.\n\n Returns\n -------\n value\n \"\"\"\n if name == 'obstype':\n return self.info.observation.obs_type\n elif name == 'flight':\n return self.get_flight_number()\n elif name == 'scanno':\n return self.get_scan_number()\n elif name == 'date':\n return self.astrometry.date\n\n for group_name, group in self.info.available_info.items():\n if group is None: # pragma: no cover\n continue\n prefix = group.log_prefix\n if name.startswith(group.log_prefix):\n return group.get_table_entry(name[len(prefix):])\n\n return super().get_table_entry(name)\n\n def get_nominal_pointing_offset(self, native_pointing):\n \"\"\"\n Get the nominal point offset for a native pointing coordinate.\n\n The nominal pointing offset ignores the reference coordinate of the\n supplied `native_coordinate` and adds the offset values to the pointing\n offset stored in the configuration.\n\n Parameters\n ----------\n native_pointing : Offset2D\n The native pointing offset. The reference position is ignored.\n\n Returns\n -------\n nominal_pointing_offset: Coordinate2D\n \"\"\"\n offset = Coordinate2D(native_pointing)\n if self.configuration.is_configured('pointing'):\n pointing_offset = Coordinate2D(\n self.configuration.get_float_list('pointing'), unit='arcsec')\n offset.add(pointing_offset)\n return offset\n\n def get_si_arcseconds_offset(self, native_pointing):\n \"\"\"\n Get the offsets of the science instrument in arcseconds.\n\n Parameters\n ----------\n native_pointing : Offset2D\n The native pointing offset. The reference position is ignored.\n\n Returns\n -------\n Coordinate2D\n \"\"\"\n arc_offset = self.get_nominal_pointing_offset(native_pointing)\n arc_offset.change_unit('arcsec')\n return arc_offset\n\n def get_si_pixel_offset(self, native_pointing):\n \"\"\"\n Get the pixel offset of the science instrument.\n\n Parameters\n ----------\n native_pointing : Offset2D\n The native pointing offset. The reference position is ignored.\n\n Returns\n -------\n Coordinate2D\n \"\"\"\n si_offset = self.get_nominal_pointing_offset(native_pointing)\n angle = self.get_telescope_vpa() - self.get_instrument_vpa()\n\n Coordinate2D.rotate_offsets(si_offset, angle)\n Coordinate2D.rotate_offsets(si_offset, -self.channels.rotation)\n pixel_size = self.channels.get_si_pixel_size()\n\n pixel_coordinates = si_offset.coordinates / pixel_size.coordinates\n pixel_coordinates = pixel_coordinates.decompose().value\n reference = np.zeros(2, dtype=float)\n return Offset2D(reference, coordinates=pixel_coordinates)\n\n def get_pointing_string_from_increment(self, native_pointing):\n \"\"\"\n Return a string representing the scan pointing.\n\n Parameters\n ----------\n native_pointing : Offset2D\n\n Returns\n -------\n str\n \"\"\"\n si_offset = self.get_si_pixel_offset(native_pointing)\n result = super().get_pointing_string_from_increment(native_pointing)\n result += (f\"\\n\\n SIBS offset --> \"\n f\"{si_offset.x:.4f}, {si_offset.y:.4f} pixels\")\n return result\n\n def edit_pointing_header_info(self, header):\n \"\"\"\n Edit pointing information in a header.\n\n Parameters\n ----------\n header : astropy.io.fits.header.Header\n The FITS header to edit.\n\n Returns\n -------\n None\n \"\"\"\n super().edit_pointing_header_info(header)\n native_pointing = self.get_native_pointing_increment(self.pointing)\n si_offset = self.get_si_pixel_offset(native_pointing)\n dx, dy = si_offset.x, si_offset.y\n if isinstance(dx, units.Quantity): # pragma: no cover\n dx, dy = dx.decompose().value, dy.decompose().value\n\n offset = self.get_si_arcseconds_offset(native_pointing)\n xel, el = offset.x, offset.y\n header['SIBS_DX'] = dx, '(pixels) SIBS pointing increment in X.'\n header['SIBS_DY'] = dy, '(pixels) SIBS pointing increment in Y.'\n header['SIBS_DXE'] = (\n to_header_float(xel, 'arcsec'),\n \"(arcsec) SIBS cross-elevation offset\")\n header['SIBS_DE'] = (\n to_header_float(el, 'arcsec'),\n \"(arcsec) SIBS elevation offset\")\n\n @abstractmethod\n def add_integrations_from_hdul(self, hdul): # pragma: no cover\n \"\"\"\n Add integrations to this scan from a HDU list.\n\n Parameters\n ----------\n hdul : fits.HDUList\n\n Returns\n -------\n None\n \"\"\"\n pass\n","repo_name":"SOFIA-USRA/sofia_redux","sub_path":"sofia_redux/scan/custom/sofia/scan/scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":17228,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"} +{"seq_id":"8385514692","text":"import os\nimport io\nimport requests\nimport cv2\nimport time\nimport asyncio\nimport aiohttp\nfrom PIL import Image\nfrom LicensePlateExtractor import LicensePlateExtractor, LICENSE_DATA_PATH, FRAME_DATA_PATH\nimport multiprocessing\nfrom args import parse_args\n\nLICENSE_DATA_PATH = \"data/license_plates\"\nFRAME_DATA_PATH = \"data/frames\"\n\n\ndef mkdirp(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n\nmkdirp(LICENSE_DATA_PATH)\nmkdirp(FRAME_DATA_PATH)\n\n\nasync def process_image_worker(session, images, host):\n while True:\n print('before images.get()')\n image = await images.get()\n print('process queue item')\n # Convert the image to bytes\n image_bytes = io.BytesIO()\n image.save(image_bytes, format=\"JPEG\")\n url = f\"http://{host}\"\n print(f\"requesting {url}\")\n\n headers = {'Content-Type': 'image/jpeg'}\n\n async with session.post(url, data=image_bytes.getvalue(), headers=headers) as response:\n print('response')\n # print(f\"response status: {response.status_code}\")\n\n\nasync def main():\n args = parse_args()\n host_index = 0\n\n vid = cv2.VideoCapture(args.capture_id)\n\n plate_extractor = LicensePlateExtractor(\n args) if args.hosts and len(args.hosts) > 0 else None\n\n if args.debug_frame is not None and len(args.debug_frame):\n img = Image.open(args.debug_frame)\n plate_extractor.extract_and_save(img)\n return\n\n images = asyncio.Queue(maxsize=len(args.hosts)) if args.hosts and len(\n args.hosts) > 0 else None\n\n async with aiohttp.ClientSession() as session:\n workers = [asyncio.create_task(process_image_worker(session, images, args.hosts[index])) for index in range(\n len(args.hosts))] if args.hosts and len(args.hosts) else None\n try:\n while True:\n ret, frame = vid.read()\n if frame is None or frame.size == 0:\n continue\n # Convert the BGR frame to RGB\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n # Create a PIL image from the NumPy array\n img = Image.fromarray(frame_rgb)\n\n if args.record_frames:\n current_time_milliseconds = int(time.time() * 1000)\n fame_path = f\"{FRAME_DATA_PATH}/frame_{current_time_milliseconds}.jpg\"\n img.save(fame_path, FORMAT=\"JPEG\")\n print(f\"frame: {fame_path}\")\n if args.process_plates:\n if args.hosts is not None and len(args.hosts) > 0:\n await images.put(img)\n print(\"add queue item\")\n else:\n plate_extractor.extract_and_save(img)\n\n except KeyboardInterrupt:\n vid.release()\n print(\"\\nProgram terminated by user.\")\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n","repo_name":"vicapow/license-plates","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"37451155679","text":"import numpy as np\nfrom onmt.hyper.nets import MobiusGRU\nfrom onmt.hyper.nets import MobiusGRUCell\nfrom onmt.hyper.nets import MobiusLinear\nfrom onmt.hyper.nets import GlobalAttention_hype\nfrom onmt.hyper.nets import LogSoftmax_hype\nimport torch\nimport geoopt\nfrom torch.nn.utils.rnn import pack_padded_sequence as pack\nfrom torch.nn.utils.rnn import pad_packed_sequence as unpack\n\n\ndef batch_gen(batch_size=32, seq_len=10, max_no=100):\n while True:\n x = np.zeros((batch_size, seq_len, max_no), dtype=np.float32)\n y = np.zeros((batch_size, seq_len, max_no), dtype=np.float32)\n\n X = np.random.randint(5, max_no, size=(batch_size, seq_len - 1))\n start = np.zeros((batch_size, 1), dtype=X.dtype)\n X = np.hstack((start, X))\n Y = np.sort(X, axis=1)\n\n for ind, batch in enumerate(X):\n for j, elem in enumerate(batch):\n x[ind, j, elem] = 1\n\n for ind, batch in enumerate(Y):\n for j, elem in enumerate(batch):\n y[ind, j, elem] = 1\n yield x, y\n\n\nBATCH_SIZE = 64\nSTEP_SIZE = 10\nINPUT_SIZE = 75\nCELL_SIZE = 100\n\n\nclass rank_hype(torch.nn.Module):\n\n def __init__(self, input_size, cell_hize, step_size, bidirectional, c):\n super(rank_hype).__init__()\n self.ball = geoopt.PoincareBall(c=c)\n self.encoder = MobiusGRU(input_size=input_size,\n hidden_size=cell_hize,\n c=c,\n bidirectional=bidirectional)\n self.decoder = MobiusGRUCell(\n input_size=input_size,\n hidden_size=CELL_SIZE,\n c=c\n )\n self.attention = GlobalAttention_hype(\n dim=input_size,\n c=c\n )\n self.linear = MobiusLinear(CELL_SIZE, input_size)\n self.gen_func = LogSoftmax_hype\n self.setp_size = step_size\n def forward(self, src, tgt, src_lengths):\n tgt = tgt[:-1]\n lengths_list = src_lengths.view(-1).tolist()\n src = pack(src, lengths_list)\n memory_bank, h_last = self.encoder(src, src_lengths)\n decoder_output = []\n for idx, emb_t in enumerate(tgt.split(1)):\n if idx == 0:\n rnn_output = self.decoder(emb_t, h_last)\n else:\n rnn_output = self.decoder(emb_t, rnn_output)\n decoder_output.append(self.gen_func(self.linear(rnn_output)))\n\n\n\n","repo_name":"SkyFishMoon/HyAN","sub_path":"rank_test_hype.py","file_name":"rank_test_hype.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"73815008229","text":"\"\"\"Parser generator for ABNF grammars.\"\"\"\n\nimport sys\n\nif sys.version_info >= (3, 8):\n from importlib.metadata import PackageNotFoundError, metadata # pragma: no cover\nelse:\n from importlib_metadata import metadata, PackageNotFoundError # pragma: no cover\n\nfrom abnf.parser import GrammarError, LiteralNode, Node, NodeVisitor, ParseError, Rule\n\n__all__ = [\n \"Rule\",\n \"Node\",\n \"LiteralNode\",\n \"NodeVisitor\",\n \"ParseError\",\n \"GrammarError\",\n \"__version__\",\n]\n\ntry:\n __version__ = metadata(__name__)[\"version\"]\nexcept PackageNotFoundError: # pragma: no cover\n # package is not installed\n __version__ = \"\"\n","repo_name":"declaresub/abnf","sub_path":"src/abnf/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"71"} +{"seq_id":"31715258006","text":"# https://www.geeksforgeeks.org/longest-increasing-subsequence-dp-3/\r\n# dynamic programming\r\n# takes 2 sequences of numbers seperated by whitespace and prints the length and longest increasing subsequence\r\n\r\ninp = [int(x) for x in input().split()]\r\n\r\n# create an array to hold lengths of possible sub-sequences and indices of next sub-sequence\r\narr = [(1, i) for i in range(len(inp))]\r\nlongest_length = 0\r\nlongest_index = 0\r\n\r\nfor i in reversed(range(len(inp))):\r\n length = 1\r\n index = i\r\n\r\n for j in range(i+1, len(inp)):\r\n if inp[i] < inp[j]: # we have an increasing sub-sequence\r\n if length < arr[j][0] + 1: # we have a longer sub-sequence\r\n length = arr[j][0] + 1\r\n index = j\r\n\r\n if length > longest_length:\r\n longest_length = length\r\n longest_index = i\r\n\r\n arr[i] = (length, index)\r\n\r\nprint('Number of elements in longest increase subsequence:', longest_length)\r\n\r\nprint('Longest increasing subsequence is:', end=' ')\r\nwhile longest_index != arr[longest_index][1]:\r\n print(inp[longest_index], end=' ')\r\n longest_index = arr[longest_index][1]\r\n\r\nprint(inp[longest_index])\r\n","repo_name":"edwardsong05/competitive-programming","sub_path":"LongestIncreasingSubsequence.py","file_name":"LongestIncreasingSubsequence.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"38745852766","text":"import torch\nfrom tqdm import tqdm\nimport numpy as np\nimport torch.nn as nn\nfrom torchsummary import summary\nfrom cnn import ColorNet\nfrom dataset import *\nfrom util import *\n\n\n# Use GPU to train\ndevice = torch.device('cuda:0')\n# Uncomment to use CPU instead\n# device = torch.device('cpu')\n\nmax_pixel_val = torch.tensor(127) # AB channels have expected max value of 127\n\n# Hyperparameters\nlearning_rate = 5e-4\nweight_decay = 0\nnum_epoch = 180\n\nname = 'colorization_net'\nmodel = ColorNet().to(device)\n# visualizing the model\nprint('Your network:')\nsummary(model, (1,128,128))\n\n## nn.CrossEntropyLoss() was giving an error, using MSE for now\ncriterion = nn.MSELoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)\n\ndef train(model, train_loader, val_loader, num_epoch):\n trn_loss_hist = []\n val_loss_hist = []\n print('Beginning training')\n for i in range(num_epoch):\n model.train()\n running_loss = []\n for img_batch, true_ab in tqdm(train_loader):\n img_batch = img_batch.to(device)\n true_ab = true_ab.to(device)\n optimizer.zero_grad()\n output = model(img_batch)\n loss = criterion(output, true_ab)\n running_loss.append(loss.item())\n loss.backward()\n optimizer.step()\n print(\"\\n Epoch {} loss:{}\".format(i+1, np.mean(running_loss)))\n trn_loss_hist.append(np.mean(running_loss))\n val_loss = test(model, val_loader)\n print(\"\\n Val Loss:{}\".format(val_loss))\n val_loss_hist.append(val_loss)\n print('Done training')\n return trn_loss_hist, val_loss_hist\n\n\ndef test(model, loader):\n model.eval()\n running_loss = []\n with torch.no_grad():\n for img_batch, true_ab in tqdm(loader):\n img_batch = img_batch.to(device)\n true_ab = true_ab.to(device)\n output = model(img_batch)\n loss = criterion(output, true_ab)\n running_loss.append(loss.item())\n return np.mean(running_loss)\n\n\n# Expects a batch size of 1 for the test data\ndef evaluate_model(model, loader):\n model.eval()\n psnr_vals = []\n with torch.no_grad():\n for pred_img, true_img in tqdm(loader):\n pred_img = pred_img.to(device)\n true_img = true_img.to(device)\n output = model(pred_img)\n psnr = calc_psnr(output, true_img)\n psnr_vals.append(psnr)\n avg_psnr = np.mean(psnr_vals)\n print(\"\\n Avg PSNR: {}\".format(avg_psnr))\n return avg_psnr\n\n\ndef calc_psnr(pred_img, real_img):\n # Calculate MSE\n sq_diff = torch.square(real_img - pred_img)\n mse = (1 / (real_img.shape[0] * real_img.shape[1] * real_img.shape[2])) * torch.sum(sq_diff)\n return 20 * torch.log10(max_pixel_val) - 10 * torch.log10(mse)\n\ntrain(model, train_loader, val_loader, num_epoch)\ntorch.save(model.state_dict(), \"my_model_state.pth\")\nget_result(test_loader, model, device, folder='output_test')","repo_name":"macekj/eecs442colorize","sub_path":"train_nn.py","file_name":"train_nn.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"8809160192","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nSupri-D Product \nDate: 6/11/2021\n\n@author: Abdullah Alakeely, \n@ the code here is complementary for the, \n@thesis: Full-Field Analysis: A Machnine Learning Approach\n\nPlease cite the thesis when using the code.\n\n\n# papers used this code\n\nAlakeely, Abdullah , and Roland N. Horne. \"Simulating the \nBehavior of Reservoirs with Convolutional and Recurrent \nNeural Networks.\" SPE Res Eval & Eng 23 (2020):\n 0992–1005. doi: https://doi.org/10.2118/201193-PA\n\n\nAlakeely, Abdullah A., and Roland N. Horne. \"Application of Deep Learning \nMethods in Evaluating Well Production Potential Using Surface Measurements.\" Paper \npresented at the SPE Annual Technical Conference and Exhibition, Virtual, October 2020. \ndoi: https://doi.org/10.2118/201785-MS\n\nContact : alakeeaa_at_stanford_dot_edu\\\n\t\t\tor horne_at_stanford_dot_edu\n\"\"\"\n\nimport numpy as np\nfrom sklearn import preprocessing\nimport pandas as pd\nimport os\nimport keras\nimport datetime\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten\nfrom keras.layers import LSTM, Conv1D, SimpleRNN, GRU\nfrom keras.layers import TimeDistributed\n\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\n\nfrom matplotlib import rcParams\nimport matplotlib.pyplot as plt\n\nrcParams['font.family'] = 'sans-serif'\nrcParams['font.sans-serif'] = [\"Times New Roman\"]\n\nclass DataProcess:\n \n\n \n \"\"\" mapping experiment \n \n Attributes\n ----------\n \n # class attributes\n \n input_variables : list, default xinp. The inputs used in this experiment \n output_variables : list, default yout. The outputs used in this experiment \n \n # inputs:\n training_case : scalar, default 1. The case number used in this experiment.\n \n\n \n \n \"\"\"\n \n \n def __init__(self,xinp, yout, days = 365):\n \n \n # days during training\n \n self.days = days\n \n # input variables\n \n self.input_variables =xinp\n \n # output variables\n\n self.output_variables = yout\n \n # input variables size\n\n self.n_x = len(self.input_variables)\n \n \n # output variables size\n \n self.n_y = len(self.output_variables)\n \n self.case = self.load_well_data()\n \n \n # grab input data \n self.x_data = self.extract_variables(self.input_variables)\n \n # grab output data \n\n self.y_data = self.extract_variables(self.output_variables)\n \n # perform normalization of input data \n\n self.X, self.input_scaling_info = self.get_scaled(self.x_data)\n \n # perform normalization of output data \n\n self.y, self.output_scaling_info = self.get_scaled(self.y_data)\n\n def load_well_data(self):\n \"\"\"\n # change this to load new data\n \n data is loaded per well\n \"\"\"\n import pandas as pd\n \n # import the production data\n raw = pd.read_csv('data/l_condensate.csv')\n\n variables = ['minutes']+[list(raw.iloc[4].items())[x][1] for x in range(1,len(list(raw.iloc[4].items())))]\n data = pd.DataFrame()\n\n for col in range(len(list(raw.columns))):\n data[variables[col]] = raw[raw.columns[col]][6:]\n \n data['minutes'] = [x+1 for x in range(1377)]\n \n return data \n \n\n def extract_variables(self,variables, start = 0, end = 1370):\n \n # function that extract desired variables defined in variables\n trainx = np.zeros((end, len(variables)))\n \n for i in range(len(variables)):\n temp = np.reshape(self.case[variables[i]][start:start+end], -1)\n trainx[:,i] = np.array(temp)\n return trainx\n \n def get_scaled(self,matrix):\n \n # prepare an empty array to collect the scaled vectors\n \n X_t = np.empty((1, matrix.shape[-2], matrix.shape[-1]))\n \n # prepare an temporary variable \n\n scaling_ = []\n temp1 = np.zeros(matrix.shape)\n \n for colm in range(temp1.shape[-1]):\n \n scalerx = preprocessing.MinMaxScaler().fit(matrix[:,colm:colm+1])\n \n temp1[:,colm:colm+1] = scalerx.transform(matrix[:,colm:colm+1])\n scaling_.append(scalerx)\n\n X_t[:,] = temp1\n \n return X_t, scaling_\n \n \n def inverse_scaled(self, matrix, scaling_information):\n \n # prepare an empty array to collect the scaled vectors\n \n X_t = np.empty((1, matrix.shape[-2], matrix.shape[-1]))\n \n # prepare an temporary \n\n \n temp1 = np.zeros(matrix.shape)\n \n for colm in range(temp1.shape[-1]):\n \n \n temp1[:,colm:colm+1] = scaling_information[colm].inverse_transform(matrix[:,colm:colm+1])\n \n\n X_t[:,] = temp1\n \n return X_t\n \n def reshape_to_three_dimensions(self,data):\n \n return np.reshape(data,(data.shape[0],1,data.shape[1]))\n \n # split dataset into train/test sets\n def split_dataset(self, X, y, split = True, three_dimensions = True,\\\n end_test = 30):\n \n \n\n \n \n train_span = self.days\n \n \n X_train = X[0][:train_span]\n y_train = y[0][:train_span]\n \n X_tes = X[0][:train_span+end_test]\n y_tes = y[0][:train_span+end_test]\n \n\n if three_dimensions == True:\n trainx = self.reshape_to_three_dimensions(X_train) \n testx = self.reshape_to_three_dimensions(X_tes)\n else:\n trainx = X_train \n testx = X_tes\n trainy = y_train\n testy = y_tes\n return trainx, trainy, testx, testy\n\n\n\nclass Models:\n \n def __init__(self ,x_train, y_train, n_h_1 = 4, n_h_2 = 4, activation1 = 'elu',\\\n activation2 = 'elu', cell_type = 'RNN',\\\n batch_size = 32, lr = 0.01):\n self.cell_type = cell_type\n self.n_h_1 = n_h_1\n self.n_h_2 = n_h_2\n self.activation1 = activation1\n self.activation2 = activation2\n self.batch_size = batch_size\n self.lr = lr\n \n \n if self.Algorithm == 'Algorithm II':\n n_timesteps, n_features, n_outputs = x_train.shape[1], x_train.shape[-1],\\\n y_train.shape[-1]\n \n if self.cell_type == 'RNN':\n \n cell_t = SimpleRNN\n \n elif self.cell_type == 'GRU':\n \n cell_t = GRU\n \n elif self.cell_type == 'LSTM':\n \n cell_t = LSTM\n \n self.model = Sequential()\n self.model.add(TimeDistributed(Dense(self.n_h_1, activation='elu',input_shape=(n_timesteps,n_features)))) \n\n self.model.add(cell_t(self.n_h_2, activation='elu',input_shape=(n_timesteps,n_features))) \n self.model.add(Dense(n_outputs))\n\n \n\n elif self.Algorithm == 'Algorithm III':\n n_timesteps, n_features, n_outputs = x_train.shape[1], x_train.shape[-1],\\\n y_train.shape[-1]\n \n if self.cell_type == 'RNN':\n \n cell_t = SimpleRNN\n \n elif self.cell_type == 'GRU':\n \n cell_t = GRU\n \n elif self.cell_type == 'LSTM':\n \n cell_t = LSTM\n \n self.model = Sequential()\n self.model.add(cell_t(self.n_h_1, activation=self.activation1,input_shape=(n_timesteps,n_features))) \n self.model.add(Dense(self.n_h_2, activation=self.activation2,input_shape=(n_features,))) \n\n self.model.add(Dense(n_outputs))\n\n\n\n elif self.Algorithm == 'Algorithm V':\n n_timesteps, n_features, n_outputs = x_train.shape[1], x_train.shape[-1],\\\n y_train.shape[-1]\n \n if self.cell_type == 'RNN':\n \n cell_t = SimpleRNN\n \n elif self.cell_type == 'GRU':\n \n cell_t = GRU\n \n elif self.cell_type == 'LSTM':\n \n cell_t = LSTM\n \n self.model = Sequential()\n self.model.add(cell_t(self.n_h_1, activation=self.activation1,input_shape=(n_timesteps,n_features))) \n\n self.model.add(Dense(n_outputs))\n elif self.Algorithm == 'Algorithm IV':\n n_timesteps, n_features, n_outputs = x_train.shape[0], x_train.shape[-1],\\\n y_train.shape[-1]\n self.model = Sequential()\n self.model.add(Dense(self.n_h_1, activation=self.activation1,input_shape=(n_features,))) \n self.model.add(Dense(n_outputs))\n\n\n elif self.Algorithm == 'Algorithm I':\n n_timesteps, n_features, n_outputs = x_train.shape[0], x_train.shape[-1],\\\n y_train.shape[-1]\n self.model = Sequential()\n self.model.add(Dense(self.n_h_1, activation=self.activation1,input_shape=(n_features,))) \n self.model.add(Dense(self.n_h_2, activation=self.activation2)) \n self.model.add(Dense(n_outputs))\n\n\n\n elif self.Algorithm == 'Algorithm VI':\n n_timesteps, n_features, n_outputs = x_train.shape[1], x_train.shape[-1],\\\n y_train.shape[-1]\n \n if self.cell_type == 'RNN':\n \n cell_t = SimpleRNN\n \n elif self.cell_type == 'GRU':\n \n cell_t = GRU\n \n elif self.cell_type == 'LSTM':\n \n cell_t = LSTM\n \n self.model = Sequential()\n self.model.add(cell_t(self.n_h_1,return_sequences = True, activation='elu',input_shape=(n_timesteps,n_features))) \n self.model.add(cell_t(n_outputs, activation=self.activation1)) \n \n \n elif self.Algorithm == 'Algorithm VII':\n n_timesteps, n_features, n_outputs = x_train.shape[1], x_train.shape[-1],\\\n y_train.shape[-1]\n\n \n self.model = Sequential()\n self.model.add(Conv1D(padding = 'causal', filters = self.n_h_1,kernel_size = self.n_h_2, activation=self.activation1,input_shape=(n_timesteps,n_features))) \n self.model.add(Flatten())\n self.model.add(Dense(n_outputs))\n optimizer = keras.optimizers.Adam(lr=self.lr)\n \n self.model.compile(loss='mse', optimizer= optimizer)\n \n \n \n def train_model(self, epochs, verbose):\n \n self.model.fit(self.x_train, self.y_train, epochs=epochs, batch_size=self.batch_size, verbose=verbose, validation_split=0.1)\n \n\n\n\n\nclass Trial(DataProcess, Models):\n \n def __init__(self,xinp, yout, days = 365, n_h_1 = 4,\\\n n_h_2 = 4, activation1 = 'elu',activation2 = 'elu',cell_type = 'RNN', batch_size = 32,\\\n lr = 0.01, end_test = 30, results_directory = 'Experiment1',\\\n file_name = 'trial', Algorithms = 'Algorihtm I'):\n \n # # input variables used in the experiment\n \n # xinp= ['minutes','ChkSize' , 'WhP', 'WhT']\n \n # # output variables used in the experiment\n \n # yout = ['QGas1av', 'QoStk1av','Qw1av']\n \n \n try:\n os.makedirs(results_directory)\n except:\n pass\n # save \n try:\n scores_f = os.path.join(results_directory,'scores')\n os.makedirs(scores_f)\n except:\n pass\n try:\n self.results_df = pd.read_pickle(os.path.join(scores_f,file_name+'.pkl'))\n\n except:\n \n self.results_df = pd.DataFrame()\n \n self.results_df['trial'] = len(self.results_df)+1\n \n \n self.Algorithm = Algorithms\n \n \n DataProcess.__init__(self, xinp, yout,days)\n \n \n if self.Algorithm =='Algorithm VI' \\\n or self.Algorithm =='Algorithm V' \\\n or self.Algorithm =='Algorithm VII' \\\n or self.Algorithm =='Algorithm II'\\\n or self.Algorithm == 'Algorithm III':\n\n self.data = self.split_dataset(self.X, self.y, end_test = end_test)\n elif self.Algorithm =='Algorithm I' or self.Algorithm == 'Algorithm IV':# or 'Algorithm VI' or 'Algorithm V' or 'Algorithm VII' :\n self.data = self.split_dataset(self.X, self.y, three_dimensions = False,end_test = end_test)\n \n self.x_train = self.data[0]\n self.y_train = self.data[1]\n self.x_test = self.data[2]\n self.y_test = self.data[3]\n \n \n Models.__init__(self, self.x_train, self.y_train,n_h_1,n_h_2,activation1, activation2, cell_type, batch_size, lr)\n \n self.training_loss_hist = []\n self.val_loss_hist = []\n \n def train(self,verbose = 0, epochs = 10):\n \n self.train_model(epochs, verbose)\n self.training_loss_hist.extend(self.model.history.history['loss'])\n self.val_loss_hist.extend(self.model.history.history['val_loss'])\n\n \n def get_prediction(self, results_directory, file_name,plot_results = True, return_scores = True, record_predictions = True):\n \n self.yhat = self.inverse_scaled(self.model.predict(self.x_test), self.output_scaling_info)[0]\n \n self.ytrue = self.y_data[:len(self.yhat)]\n \n if record_predictions:\n try:\n predictions_f = os.path.join(results_directory,'predictions')\n os.makedirs(predictions_f)\n except:\n pass\n try:\n self.prediction_df = pd.read_pickle(os.path.join(predictions_f,file_name+'P.pkl'))\n \n except:\n \n self.prediction_df = pd.DataFrame()\n \n new_seg = {'index':np.transpose([x+1 for x in range(len(self.ytrue))]),\n 'True':np.transpose(self.ytrue),\n 'predicted':np.transpose(self.yhat),\n }\n self.prediction_df = self.prediction_df.append(new_seg, ignore_index=True)\n \n \n\n \n \n \n \n self.prediction_df.to_excel(os.path.join(predictions_f,file_name+'P.xlsx'))\n self.prediction_df.to_pickle(os.path.join(predictions_f,file_name+'P.pkl'))\n\n if plot_results:\n self.show_plot()\n \n if return_scores:\n return self.get_scores()\n\n\n \n\n \n \n def get_scores(self):\n train_score = self.prediction_score(self.ytrue[:len(self.y_train)],self.yhat[:len(self.y_train)], metric_to_use = 'r2')\n test_score = self.prediction_score(self.ytrue[len(self.y_train):len(self.yhat)],self.yhat[len(self.x_train):len(self.yhat)], metric_to_use = 'r2')\n score = self.prediction_score(self.ytrue,self.yhat, metric_to_use = 'r2')\n return train_score, test_score, score\n \n def show_plot(self):\n plt.figure(figsize = [10,6])\n self.prediction_visual_inspection(self.ytrue, self.yhat, self.days)\n \n \n \n \n def prediction_visual_inspection(self,true_signal, predicted_signal,days,\\\n r = 2, c = 2, font_s = 14,\\\n ):\n \n \n style = {'linestyle':'',\n 'marker':'o',\n 'markersize':2,\n }\n \n \n colors = ['r','g','b']\n number_of_plots = true_signal.shape[-1]\n y_limits = [true_signal.min()*0.95,true_signal.max()*1.05]\n x_limits = [-len(true_signal)*.05,len(true_signal)*1.05]\n \n for x in range(number_of_plots):\n plt.subplot(r,c,x+1)\n plt.plot(true_signal[:,x], color = 'gray', label = '$y$', **style)\n plt.plot(predicted_signal[:,x],color = colors[x], alpha = 0.6, label = '$\\hat{y}$',**style)\n y_limits = [true_signal[:,x].min()*0.95,true_signal[:,x].max()*1.05]\n x_limits = [-len(true_signal[:,x])*.05,len(true_signal[:,x])*1.05]\n plt.ylim(y_limits[0], y_limits[1])\n plt.xlim(x_limits[0], x_limits[1])\n \n plt.xlabel('Time', fontsize = font_s)\n plt.ylabel('bbl/d', fontsize = font_s)\n plt.xticks(fontsize = font_s-2)\n plt.yticks(fontsize = font_s-2)\n \n plt.legend(fontsize = font_s-2)\n plt.axvline(days, linestyle = '--', color = 'k', alpha = 0.5, linewidth = 2)\n plt.tight_layout()\n \n def prediction_score(self, true_signal, predicted_signal, metric_to_use = 'mse'):\n\n number_of_curves = true_signal.shape[-1]\n\n if metric_to_use == 'mse':\n try:\n \n scores_list = [mean_squared_error(true_signal[:,x],predicted_signal[:,x]) for x in range(number_of_curves)]\n except:\n pass\n \n return scores_list\n \n if metric_to_use == 'mae':\n try:\n scores_list = [mean_absolute_error(true_signal[:,x],predicted_signal[:,x]) for x in range(number_of_curves)]\n except:\n pass\n return scores_list\n \n if metric_to_use == 'r2':\n try:\n scores_list = [r2_score(true_signal[:,x],predicted_signal[:,x]) for x in range(number_of_curves)]\n except:\n pass\n return scores_list\n \n def learning_behavior(self, font_s = 14):\n colors = ['r','b']\n y_limits = [-.01,0.1]\n x_limits = [-len(self.training_loss_hist)*.05,len(self.training_loss_hist)*1.05]\n \n plt.plot([x+1 for x in range(len(self.training_loss_hist))],self.training_loss_hist, color = colors[0], label = 'loss')\n plt.plot([x+1 for x in range(len(self.val_loss_hist))],self.val_loss_hist,color = colors[1], alpha = 0.4, label = 'val')\n \n plt.ylim(y_limits[0], y_limits[1])\n plt.xlim(x_limits[0], x_limits[1])\n \n plt.xlabel('epochs', fontsize = font_s)\n plt.ylabel('Mean Squared Error', fontsize = font_s)\n plt.xticks(fontsize = font_s-2)\n plt.yticks(fontsize = font_s-2)\n\n plt.legend(fontsize = font_s-2)\n plt.tight_layout()\n \n\n\ndef run_one_trial(xinp,\n yout,\n select,\n days,\n neurons1,\n neurons2,\n memory_cell,\n activation1,\n activation2,\n batch_size,\n learning_rate,\n test_span,\n file_name,\n results_directory,\n epochs,\n stops,\n verbose,\n plot_results,\n record_predictions):\n \n algorithms_list = ['Algorithm I', # feed-forward -> feed-forward -> feed-forward\n 'Algorithm II', # feed-forward -> recurrent -> feed-forward\n 'Algorithm III', # recurrent -> feed-forward -> feed-forward\n 'Algorithm IV', # feed-forward -> feed-forward \n 'Algorithm VI', # recurrent -> recurrent\n 'Algorithm V', # recurrent -> feed-forward\n 'Algorithm VII'] # Conv. 1D --> feed-forward\n \n \n \n\n\n # paramers of the run \n startTime = datetime.datetime.now()\n \n\n params = {\n 'xinp' : xinp,\n 'yout' : yout,\n 'days' : days,\n 'n_h_1' : neurons1,\n 'n_h_2' : neurons2,\n 'activation1' : activation1,\n 'activation2' : activation2,\n 'cell_type' : memory_cell,\n 'batch_size' : batch_size,\n 'lr':learning_rate,\n 'end_test':test_span,\n 'file_name':file_name,\n 'results_directory':results_directory,\n 'Algorithms':algorithms_list[select-1]\n }\n \n params2 = {'epochs':epochs,\n 'verbose':verbose}\n \n \n \n plt.figure()\n try_combination = Trial(**params)\n # input variables used in the experiment\n \n try_combination.xinp = xinp\n try_combination.yout = yout\n\n \n # output variables used in the experiment\n \n yout = ['QGas1av', 'QoStk1av','Qw1av']\n \n for stop in range(stops):\n \n try_combination.train(**params2)\n \n # record and plot last training effort\n if stop == stops-1:\n plot_results = True\n record_predictions = True\n \n # get scores\n scores = try_combination.get_prediction(results_directory, file_name,\\\n plot_results = plot_results, record_predictions = record_predictions)\n \n temp = {'training_score%s'%str(stop+1):scores[0],\n 'testing_score%s'%str(stop+1):scores[1],\n 'curve_score%s'%str(stop+1):scores[2],\n 'mean_training_score%s'%str(stop+1):np.mean(scores[0]),\n 'mean_testing_score%s'%str(stop+1):np.mean(scores[1]),\n 'mean_curve_score%s'%str(stop+1):np.mean(scores[2])\n }\n params.update(temp,join='left')\n \n \n \n \n params.update(params2)\n \n params3 = {'inputs':xinp,\n 'outputs':yout}\n \n params.update(params3)\n \n plt.subplot(2,2,4)\n try_combination.learning_behavior()\n \n try:\n plots_f = os.path.join(results_directory,'plots')\n os.makedirs(plots_f)\n except:\n pass\n plt.savefig(os.path.join(plots_f,file_name+'%s.pdf'%str(len(try_combination.results_df)+1)))\n plt.show()\n \n \n \n try_combination.results_df['timeDiff'] = datetime.datetime.now() - startTime\n \n \n try_combination.results_df = try_combination.results_df.append(params, ignore_index=True)\n \n # save \n try:\n scores_f = os.path.join(results_directory,'scores')\n os.makedirs(scores_f)\n except:\n pass\n try_combination.results_df.to_pickle(os.path.join(scores_f,file_name+'.pkl'))\n try_combination.results_df.to_csv(os.path.join(scores_f,file_name+'.csv'))\n try_combination.results_df.to_excel(os.path.join(scores_f,file_name+'.xlsx'))\n \n \n print('R2 on training data = ', np.nanmean(scores[0]))\n print('R2 on testing data = ', np.nanmean(scores[1]))\n print('R2 on curve = ', np.nanmean(scores[2]))\n\n\n\n print('train : on Gas = {0:0.2f}, Oil = {1:0.2f}, and Water = {2:0.2f}'.format(*scores[0]))\n print('test : on Gas = {0:0.2f}, Oil = {1:0.2f}, and Water = {2:0.2f}'.format(*scores[1]))\n print('curve : score on Gas = {0:0.2f}, Oil = {1:0.2f}, and Water = {2:0.2f}'.format(*scores[2]))\n\n\n","repo_name":"alakeeaa/Three-Phase-Flow-form-Wellhead","sub_path":"supporting_modules.py","file_name":"supporting_modules.py","file_ext":"py","file_size_in_byte":23159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"35236454561","text":"import requests\n\n\ndef some_func():\n print('this is the func')\n\n\nif __name__ == '__main__':\n question = input('Would you like to search by (c)ity name or (z)ip?: ')\n city = input(\"What city or zip are you looking for?: \")\n package = {\n 'APPID': \"9ef3311b380d2586bf47ff522529e7a9\",\n }\n\n if question == 'c':\n package['q'] = city\n else:\n package['zip'] = city\n\n r = requests.post('http://api.openweathermap.org/data/2.5/weather', params=package)\n\n data = r.json()\n print(data['main'])\n","repo_name":"ccjoness/CG_Aug-32-Day-Class","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"36041302562","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/python\n\n\nfrom TwitterAPI import TwitterAPI\nfrom hcsr04sensor import sensor\nimport RPi.GPIO as GPIO\n\nimport sys\nimport Adafruit_DHT\nimport time\nimport random\n\nfrom auth_talktomangotree import (\n consumer_key,\n consumer_secret,\n access_token,\n access_token_secret\n)\n\nstringToTrack = '#talktomangotree'\n\napi = TwitterAPI(consumer_key, \n consumer_secret,\n access_token,\n access_token_secret)\n\ntrig_pin = 17\necho_pin = 27\ndistance_to_soil = 46.5\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(21, GPIO.IN)\n\nvalue = sensor.Measurement(trig_pin, echo_pin)\nraw_measurement = value.raw_distance()\n\nADJECTIVE2 = (\"fine\", \"happy\", \"good\", \"cool\", \"nice\", \"alive\")\nCOLOUR = (\"red\",\"blue\",\"green\",\"yellow\",\"black\",\"white\",\"grey\",\"pink\",\"purple\",\"rainbow\")\n\ndef tweet_check():\n if \"#temp\" in tweet.split():\n\n tweet_temp = \"Mangotree temp: \" + str(temperature) + \" C\"\n r = api.request('statuses/update', {'status': tweet_temp})\n print(tweet_temp)\n\n if \"#hight\" in tweet.split():\n tweet_hight = \"Mangotree hight: {0:0.1f} centimeters\".format(growing)\n r = api.request('statuses/update', {'status': tweet_hight})\n print(tweet_hight)\n\n if \"#soil\" in tweet.split():\n tweet_soil = \"Mangotree soil: \" + str(soil)\n r = api.request('statuses/update', {'status': tweet_soil})\n print(tweet_soil)\n\n if \"#status\" in tweet.split():\n tweet_status = (\"@\" + str(user) + \" Mangotree status -- \" + \"Air: \" + str(temperature) + \" C - Humitidy: \"\n + str(humidity) + \" % - Hight: {0:0.1f} cm\".format(growing) + \" Soil: \" + str(soil)\n + \" - I like to be \" + random.choice(COLOUR))\n r = api.request('statuses/update', {'status': tweet_status})\n print(tweet_status)\n \nwhile True:\n r = api.request('statuses/filter', {'track':stringToTrack})\n\n humidity, temperature = Adafruit_DHT.read_retry(11, 4)\n value = sensor.Measurement(trig_pin, echo_pin)\n raw_measurement = value.raw_distance()\n \n moisture_value = GPIO.input(21)\n if moisture_value == int(1):\n soil = str(\"Wet\")\n if moisture_value == int(0):\n soil = str(\"Dry\")\n\n metric_distance = value.distance_metric(raw_measurement)\n growing = distance_to_soil - metric_distance\n \n print('Twitter ready!')\n \n for item in r:\n tweet = item['text']\n user = item['user']['screen_name'] \n print(tweet)\n print(user)\n tweet_check()\n","repo_name":"larsgimse/talktomangotree","sub_path":"oldcode/talktomangotree_02.py","file_name":"talktomangotree_02.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"21031648448","text":"from typing import Any, Callable\nimport requests\nimport json\nfrom dateutil.parser import parse\nimport progressbar\nimport os\nimport re\nimport pickle\nimport itertools\nimport sys\n\n\ndef get_config():\n return json.load(open(\"config.json\"))\n\n\nclass Task:\n\n def __init__(self, parent: \"TodoList\", title: str, id: str,\n createdDateTime: object, **args):\n\n self.parent = parent\n self.title = title\n self.created = parse(createdDateTime)\n self.id = id\n self.args = args\n\n @property\n def tags(self) -> list[str]:\n return re.findall(r\"\\#(\\w+)\", self.title)\n\n\nclass TodoList:\n\n def __init__(self, task_list_gen: Callable[[\"TodoList\"], list[Task]],\n displayName: str, id: str, **args):\n\n self.name = displayName\n self.id = id\n self.args = args\n self.tasks = task_list_gen(self)\n\n\nclass AzureToDo:\n\n GRAPH_URL = \"https://graph.microsoft.com/v1.0/me/todo/\"\n LISTS_URL = GRAPH_URL + \"lists\"\n\n def __init__(self):\n config = get_config()\n self.headers = {\"Authorization\": config['access_token']}\n list_raw = requests.get(self.LISTS_URL, headers=self.headers)\n self.lists = [TodoList(self.get_tasks, **a)\n for a in\n progressbar.progressbar(list_raw.json()['value'])]\n\n def get_tasks(self, parent: TodoList) -> list[Task]:\n tasks_raw = requests.get(self.LISTS_URL + f\"/{parent.id}/tasks\",\n headers=self.headers)\n return [Task(parent, **t) for t in tasks_raw.json()['value']]\n\n def forget_token(self) -> None:\n del self.config['access_token']\n\n @property\n def projects(self) -> list[TodoList]:\n return [f for f in self.lists if f.name in\n self.config.get('projects', [])]\n\n @property\n def next_action(self) -> list[TodoList]:\n return [f for f in self.lists if f.name in\n self.config.get('next_action', [])]\n\n @property\n def waiting(self) -> list[TodoList]:\n return [f for f in self.lists if f.name in\n self.config.get('waiting', [])]\n\n\nclass CLI:\n\n def __init__(self, todo):\n\n self.todo = todo\n self.actions = {'l': {'desc': \"Show ToDo lists\", 'a': self.show_lists},\n 's': {'desc': \"Show stale tasks\", 'a': self.stale},\n 'q': {'desc': \"quit\", 'a': self.quit},\n 'h': {'desc': \"Show help mesage\", 'a': self.help}}\n\n def __call__(self, *args: Any, **kwds: Any) -> Any:\n\n while True:\n fn = self.actions.get(\n input(\"choose action (h for help, Ctrl+C to quit): \"),\n {'a': self.help})\n fn['a']()\n\n def show_lists(self) -> None:\n for n, l in enumerate(self.todo.lists):\n print(f\"({n:4}) {l.name}\")\n\n def help(self) -> None:\n print(\"available commands:\")\n for a in self.actions:\n print(f\"{a}: {self.actions[a]['desc']}\")\n\n def stale(self) -> None:\n N = int(input(\"How many stale tasks to show (default: 20)\") or 20)\n all_tasks = [t for t in itertools.chain.from_iterable(\n lst.tasks for lst in self.todo.lists)\n if t.args['status'] != 'completed']\n all_tasks.sort(key=lambda t: t.created)\n for t in all_tasks[:N]:\n print(f\"{t.created} -> {t.title} | {t.parent.name}\")\n\n def quit(self) -> None:\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n\n pf = 'tasks.pickle'\n if os.path.exists(pf):\n tasks = pickle.load(open(pf, 'rb'))\n else:\n tasks = AzureToDo()\n tasks.forget_token()\n with open(pf, 'wb') as bf:\n pickle.dump(tasks, bf)\n CLI(tasks)()\n","repo_name":"dhesse/powerGToDo","sub_path":"powerGToDo.py","file_name":"powerGToDo.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"27855403724","text":"a = 1\nb = 1\nc = 300000 # проверено в Python 3.10\nd = 300000\nprint(a is b, c is d)\n# В python все числа, используемые в программе, хранятся в отдельном метсе,\n# и поэтому когда создаётся переменная какого-либо числа, интерпритатор проверяет,\n# есть ли уже переменная с таким числом и если есть, просто указывает ссылку на него.\n# По этой причине они по сути являются одним и тем же\n\na, b = 'py', 'py'\nc = ''.join(['p', 'y'])\nprint(a is b, a == c, a is c, a == c)\n# То же самое, что и с числами\n# Причина по которой интерпритатор посчитал последнюю строку за другую (не смотря на то что она такая же как и первые две),\n# в том, что изначаль она была пустой, а результат получился путём преобразований в методе join, из-за чего это стала\n# другая строка","repo_name":"yegorkavegetablya/-","sub_path":"Homework/Practice2/tasks/task9.py","file_name":"task9.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"38207828376","text":"import numpy as np\nfrom time import time\nfrom datetime import datetime\nimport time\nfrom nltk.corpus import stopwords\nfrom random import shuffle\nfrom sklearn.linear_model import RidgeClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import LinearSVC\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import Perceptron, LogisticRegression, PassiveAggressiveClassifier\nfrom sklearn.naive_bayes import BernoulliNB, MultinomialNB, GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neighbors import NearestCentroid\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import metrics\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import MinMaxScaler\n\n\nimport gensim\nfrom gensim.models.word2vec import Word2Vec # the word2vec model gensim class\nLabeledSentence = gensim.models.doc2vec.LabeledSentence\nfrom gensim.models import Doc2Vec\nfrom tqdm import tqdm\ntqdm.pandas(desc=\"progress-bar\")\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.preprocessing import scale\n\nimport pandas as pd\nimport itertools\nfrom sklearn import metrics\nfrom sklearn.model_selection import cross_val_predict, StratifiedKFold\nimport random\nfrom gensim.models import word2vec\nfrom os.path import join, exists, split\nimport os\nfrom sklearn.metrics import accuracy_score, auc, roc_curve, log_loss, confusion_matrix\nimport matplotlib.pyplot as plt\nimport re\nimport nltk\nimport csv\nimport string\nfrom gensim.models import Phrases\nfrom gensim.models import Word2Vec\n\n\ndef plot_confusion_matrix(data, title='_Confusion Matrix_', cmap=plt.cm.Blues, name=''):\n\n plt.imshow(data, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n labels = np.array(['Negative', 'Positive'])\n tick_marks = np.arange(len(labels))\n plt.xticks(tick_marks, labels, rotation=45)\n plt.yticks(tick_marks, labels)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig(\"./Plots/\" + name + title + '.png', bbox_inches='tight')\n\n\ndef plot_roc_curve(fpr, tpr, roc_auc, name=''):\n plt.figure()\n plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver Operating Characteristic')\n plt.legend(loc=\"lower right\")\n plt.savefig(\"./Plots/\" + name + '_ROC_Curve.png',\n bbox_inches='tight')\n\n\nclass TextClassifier:\n def __init__(self):\n print('{}: Loading the data'.format((time.asctime(time.localtime(time.time())))))\n # print '{}: Loading the data'.format((time.asctime(time.localtime(time.time()))))\n sentences = []\n labels = []\n sentences_len = []\n true_index = []\n false_index = []\n stripComment = lambda x: x.strip().lower()\n replaceComments = lambda x: x.replace(\";\", ' ').replace(\":\", ' ').replace('\"', ' ').replace('-', ' ').\\\n replace(',', ' ').replace('.', ' ').replace(\"/\", ' ').replace('(', ' ').replace(')', ' ')\n splitCommant = lambda x: x.split(\" \")\n stop = stopwords.words('english')\n stopWordsComment = lambda x: [i for i in x if i not in stop]\n data = pd.read_excel('FinalFeatures.xlsx')\n comment_index = 0\n for index, comment in data.iterrows():\n train_data = comment['comment_body']\n sentence = stripComment(train_data)\n sentence = replaceComments(sentence)\n sentence = splitCommant(sentence)\n sentence = stopWordsComment(sentence)\n remove_list = []\n for i, word in enumerate(sentence):\n if '\\r\\r' in word or word == '':\n remove_list.append(i)\n sentence = [i for j, i in enumerate(sentence) if j not in remove_list]\n sentences.append(sentence)\n labels.append(comment['IsEfficient'])\n sentences_len.append(len(train_data))\n if comment['IsEfficient'] == 1:\n true_index.append(comment_index)\n else:\n false_index.append(comment_index)\n comment_index += 1\n # words = set(itertools.chain(*sentences))\n\n # choose random index for test set\n true_test_index = random.sample(true_index, 110)\n false_test_index = random.sample(false_index, 740)\n\n # create test and train sets\n true_test = list(sentences[i] for i in true_test_index)\n true_label = list(labels[i] for i in true_test_index)\n false_test = list(sentences[i] for i in false_test_index)\n false_label = list(labels[i] for i in false_test_index)\n\n true_train_index = [index for index in true_index if index not in true_test_index]\n false_train_index = [index for index in false_index if index not in false_test_index]\n true_train = list(sentences[i] for i in true_train_index)\n true_train_label = list(labels[i] for i in true_train_index)\n false_train = list(sentences[i] for i in false_train_index)\n false_train_label = list(labels[i] for i in false_train_index)\n\n X_POS = list(itertools.chain(true_train, true_test))\n # Y_train = list(itertools.chain(true_train_label, false_train_label))\n X_NEG = list(itertools.chain(false_train, false_test))\n\n X_POS = self.labelizeComments(X_POS, 'POS')\n X_NEG = self.labelizeComments(X_NEG, 'NEG')\n\n final_sentences = list(itertools.chain(X_POS, X_NEG))\n\n print('{}: Start calculating Doc2Vec'.format((time.asctime(time.localtime(time.time())))))\n number_of_features = 100\n model = Doc2Vec(min_count=2, window=10, size=number_of_features, negative=5, workers=7, iter=55) # documents=final_sentences,\n model.build_vocab(final_sentences)\n #\n print('{}: Start train Doc2Vec'.format((time.asctime(time.localtime(time.time())))))\n for epoch in range(50):\n # model.train(shuffle(final_sentences))\n model.train(final_sentences, total_examples=model.corpus_count, word_count=2)\n #\n model.save('d2v100.d2v')\n # model = Doc2Vec.load('comment.d2v')\n\n print('{}: Finish calculating Doc2Vec'.format((time.asctime(time.localtime(time.time())))))\n # Create train numpy\n data_size = len(sentences)\n true_size = len(true_train_index) + len(true_test_index)\n false_size = len(false_train_index) + len(false_test_index)\n self.data = np.zeros((data_size, number_of_features))\n self.labels = np.zeros(data_size)\n \n for i in range(true_size):\n prefix_train_pos = 'POS_' + str(i)\n self.data[i] = model.docvecs[prefix_train_pos]\n self.labels[i] = 1\n\n j = 0\n for i in range(true_size, true_size + false_size):\n prefix_train_neg = 'NEG_' + str(j)\n self.data[i] = model.docvecs[prefix_train_neg]\n self.labels[i] = -1\n j += 1\n\n print(self.labels)\n\n # for Non-Negative values - if we want to train Multinumial NB\n min_max_scale = MinMaxScaler()\n self.data = min_max_scale.fit_transform(self.data)\n\n comments_id = data['comment_id'].values\n\n i = 0\n w2v_id = []\n for sample in self.data:\n w2v_id_sample = sample.tolist()\n w2v_id_sample.append(comments_id[i])\n w2v_id.append(w2v_id_sample)\n i += 1\n\n index = range(number_of_features)\n index.append('comment_id')\n train_vecs_d2vPD = pd.DataFrame.from_records(w2v_id, columns=index)\n final_features = pd.merge(data, train_vecs_d2vPD, on='comment_id')\n final_features.to_csv('100_d2v_scale.csv', encoding='utf-8')\n\n return\n\n def labelizeComments(self, comment, label_type):\n labelized = []\n for i, v in tqdm(enumerate(comment)):\n label = '%s_%s' % (label_type, i)\n labelized.append(LabeledSentence(v, [label]))\n return labelized\n\n ###############################################################################\n # benchmark classifiers\n def benchmark(self, model, model_name='default'):\n print('_' * 80)\n print('{}: Traininig: {}'.format((time.asctime(time.localtime(time.time()))), model_name))\n print(model)\n t0 = time.time()\n # Cross validation part\n k = 100\n predicted = cross_val_predict(model, self.data, self.labels, cv=k)\n score = metrics.accuracy_score(self.labels, predicted)\n train_time = time.time() - t0\n print(\"train and test time: {}\".format(train_time))\n\n print(\"confusion matrix:\")\n print(metrics.confusion_matrix(self.labels, predicted, labels=[-1, 1]))\n\n\n model_descr = str(model).split('(')[0]\n print(\"Accuracy: {} (+/- {})\".format(score.mean(), score.std() * 2))\n\n auc = metrics.roc_auc_score(self.labels, predicted, average='samples')\n print('AUC: {}'.format(auc))\n\n return [model_descr, score, auc, train_time]\n\n def ModelsIteration(self):\n results = []\n for model, name in (\n (RidgeClassifier(tol=1e-2, solver=\"sag\"), \"Ridge Classifier\"),\n (Perceptron(n_iter=50), \"Perceptron\"),\n (PassiveAggressiveClassifier(n_iter=50), \"Passive-Aggressive\"),\n (KNeighborsClassifier(n_neighbors=10), \"kNN\"),\n (RandomForestClassifier(n_estimators=10), \"Random forest\"),\n (SVC(C=1e-8, kernel='rbf'), \"SVM with RBF Kernel\")):\n\n print('=' * 80)\n print(name)\n results.append(self.benchmark(model, name))\n\n for penalty in [\"l2\", \"l1\"]:\n print('=' * 80)\n print(\"%s penalty\" % penalty.upper())\n # Train Liblinear model\n results.append(self.benchmark(LinearSVC(loss='squared_hinge', penalty=penalty,\n dual=False, tol=1e-3), 'LinearSVC'))\n\n # Train SGD model\n results.append(self.benchmark(SGDClassifier(alpha=.0001, n_iter=50, penalty=penalty), 'SGDClassifier'))\n\n # Train SGD with Elastic Net penalty\n print('=' * 80)\n print(\"Elastic-Net penalty\")\n results.append(self.benchmark(SGDClassifier(alpha=.0001, n_iter=50, penalty=\"elasticnet\")))\n\n # Train NearestCentroid without threshold\n print('=' * 80)\n print(\"NearestCentroid (aka Rocchio classifier)\")\n results.append(self.benchmark(NearestCentroid()))\n\n # Train sparse Naive Bayes classifiers\n print('=' * 80)\n print(\"Naive Bayes\")\n # results.append(self.benchmark(MultinomialNB(alpha=.01), 'MultinomialNB'))\n results.append(self.benchmark(BernoulliNB(alpha=.01), 'BernoulliNB'))\n # results.append(self.benchmark(GaussianNB(), 'GaussianNB'))\n\n print('=' * 80)\n print(\"LinearSVC with L1-based feature selection\")\n # The smaller C, the stronger the regularization.\n # The more regularization, the more sparsity.\n results.append(self.benchmark(LinearSVC(), 'classification'))\n\n print('=' * 80)\n print('Logistic Regression')\n results.append(self.benchmark(LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, penalty='l2', random_state=None,\n tol=0.0001), 'Logistic Regression'))\n\n return results\n\n\nif __name__ == '__main__':\n text_classiier = TextClassifier()\n text_classiier.ModelsIteration()\n","repo_name":"reutapel/Characterizing-Efficient-Referrals-in-Social-Networks","sub_path":"classifier/doc2vec.py","file_name":"doc2vec.py","file_ext":"py","file_size_in_byte":11798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"23311447744","text":"print(\"Dwse arithmo\")\r\n# arxikopoiw se mia metavlhth x ton input apo ton xrhsth\r\nx=int(input())\r\n#eksetazw ean o arithmos pou dothike einai entos tou zhtoumenou diasthmatos\r\nwhile (x>1000000):\r\n print(\"Dwse arithmo mikrotero apo to 1000000\")\r\n x=int(input())\r\n#arxikopoihsh ginomenou (to opoio thelw na ginei iso me th metavlhth dothike)\r\np=1\r\n#arxikopoihsh enos string to opoio tha mou dwsei to zhtoumeno ginomeno\r\nString=\"\"\r\n#loop to opoio mas dinei arithmous ews to 1.000.000(to i einai bash kai j o ektheths)\r\nfor i in range(2,1000000):\r\n #ean h bash mou einai ews 1000\r\n if(i<=1000):\r\n #orizw ektheth to j o opoios kumenetai apo 1-30(anapoda)\r\n for j in range(30,0,-1):\r\n #\r\n if((i%3!=0 and i%5!=0 and i%7!=0 and i%2!=0) or( i==2 or i==3 or i==5 or i==7)):\r\n #ean to mod tou x me to i^j einai 0 (to opoio arxikopoiw se mia metavlhth)\r\n y=i**j\r\n if(x%y==0 ) :\r\n #ftiaxnw to string tou output mou \r\n String+=\"(\"+str(i)+\"**\"+str(j)+\")\"\r\n #pollaplasiazw to p me thn dunamh mexri na ftasw ston arithmo mou(x)\r\n p*=y\r\n #diairw to input me th dunamh kai ftiaxnw neo x\r\n x/=y\r\n else:\r\n if((i%3!=0 and i%5!=0 and i%7!=0 and i%2!=0) or( i==2 or i==3 or i==5 or i==7)):\r\n #omoiws to else otan h bash einai megaluterh tou 1000 kai o ektheths 1\r\n j=1\r\n if(x%y==0) :\r\n String+=\"(\"+str(i)+\"**\"+str(j)+\")\"\r\n p*=y\r\n x/=y\r\n #elegxw ean to x pou exw den einai 1 kai to lopi den einai keno!! emafanise to parakatw\r\nif(x!=1 and String!=\"\"):\r\n print(\"O arithmos autos \"+str(String)+\" san apotelesma o xwrismos ginetai \"+str(x)+\" kai den borei na diairethei me allous arithmous\")\r\nif(String!=\"\"):\r\n print(String,p )\r\nelse:\r\n print(\"Den exw apotelesma\")\r\n\r\n","repo_name":"githubakis/Ergasia","sub_path":"Askisi2.py","file_name":"Askisi2.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"40763097218","text":"import sys\ninput = sys.stdin.readline\n\nsys.setrecursionlimit(10000000)\n\ndef dfs(i):\n global result\n \n cycle.append(i)\n visited[i] = True\n \n if visited[students[i]]:\n if students[i] in cycle:\n \n # 팀을 구한 학생의 수를 result에 더한다.\n result += len(cycle[cycle.index(students[i]):])\n return\n \n else:\n dfs(students[i])\n \n\nT = int(input())\n\nfor _ in range(T):\n n = int(input())\n result = 0\n \n students = [0] + list(map(int, input().split()))\n visited = [False] * (n + 1)\n \n for i in range(1, n+1):\n if not visited[i]:\n cycle = []\n dfs(i)\n \n # 전체 학생 수 - 팀을 구한 학생 수 = 팀에 속하지 못한 학생 수(답)\n print(n - result)","repo_name":"kimyubi/ps","sub_path":"코테 스터디/1주차/9466 텀 프로젝트.py","file_name":"9466 텀 프로젝트.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"13646180819","text":"#Importação da biblioteca reactiveX\r\nfrom rx import create, operators\r\n\r\n#Função para emissão dos dados pelo o Observável\r\ndef frequency(observer1, scheduler):\r\n #Para cada chamada da função ON_NEXT um item é emitido. Os valores passados por parâmetro são referentes a quantidade de presenças do aluno\r\n observer1.on_next(90)\r\n observer1.on_next(-22)\r\n observer1.on_next(68)\r\n observer1.on_next(-70)\r\n observer1.on_next(181)\r\n observer1.on_next(150)\r\n observer1.on_next(50)\r\n observer1.on_next(70)\r\n observer1.on_next(80)\r\n observer1.on_next(40)\r\n #A Função ON_COMPLETE é chamada quando não tem mais itens a serem emitidos\r\n observer1.on_completed()\r\n\r\n#A função \"create\" cria um observável que transmitirá os dados pela função \"frequency\"\r\nobservable_1 = create(frequency)\r\n\r\n#O framework reactiveX permite a criação de conjunto de operadores a serem aplicados nos stream's criados\r\nconjunto1 = observable_1.pipe(\r\n #O operador \"filter\" restringe os dados que são passados, baseado no filtro aplicado, 0 = Quantidade mínima de presenças e 180 é quantidade máxima\r\n operators.filter(lambda i: i >=0 and i <=180),\r\n)\r\n\r\n#No conjunto 2, temos além do filtro do conjunto 1, o operador MAP que permite a manipulação dos dados da fonte\r\nconjunto2 = observable_1.pipe(\r\n operators.filter(lambda i: i >=0 and i <=180),\r\n #Manipulação para transformar os dados quantitativos de presença em percentual\r\n operators.map(lambda j: (j * 100)/180)\r\n)\r\n\r\n#No conjunto 3, além de incrementar os operadores do conjunto 1 e 2, também aplica mais um filtro para permitir a emissão dos dados segundo o critério estabelecido\r\nconjunto3 = observable_1.pipe(\r\n operators.filter(lambda i: i >=0 and i <=180),\r\n operators.map(lambda j: (j * 100)/180),\r\n operators.filter(lambda k: k > 75)\r\n)\r\n\r\n#A função \"subscribe\" exibe os dados partindo do consumo dos operadores no conjunto especificado\r\n#Para exibir separadamente o consumo dos operadores para cada conjunto, basta remover o comentário um de cada vez\r\n\r\n#Para o conjunto 1\r\nconjunto1.subscribe(\r\n on_next = lambda i: print(\"Aluno Aprovado com Média {0}\".format(i)),\r\n on_error = lambda e: print(\"ERRO: {0}\".format(e)),\r\n on_completed = lambda: print(\"Concluído!\"),\r\n)\r\n\r\n#Para o conjunto 2\r\n'''conjunto2.subscribe(\r\n on_next = lambda i: print(\"Aluno Aprovado com Média {0} %\".format(i)),\r\n on_error = lambda e: print(\"ERRO: {0}\".format(e)),\r\n on_completed = lambda: print(\"Concluído!\"),\r\n)'''\r\n\r\n#Para o conjunto 3\r\n'''conjunto3.subscribe(\r\n on_next = lambda i: print(\"Aluno Aprovado com Média {0} %\".format(i)),\r\n on_error = lambda e: print(\"ERRO: {0}\".format(e)),\r\n on_completed = lambda: print(\"Concluído!\"),\r\n)'''","repo_name":"wellingtonAS/PLP","sub_path":"Frequencia_Escolar.py","file_name":"Frequencia_Escolar.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"30655180120","text":"import serial\nimport time\nimport csv\nfrom datetime import datetime\nimport random\nimport pymongo\n\n# Initialize MongoDB connection (replace with your MongoDB connection string)\nclient = pymongo.MongoClient(\"mongodb+srv://eyashita_1o:chunmun1010@cluster0.ljpgyjo.mongodb.net/\")\ndb = client[\"Dashboard\"]\ncollection = db[\"Household_data\"]\n\nstart_date = datetime(2021, 1, 1)\nend_date = datetime(2023, 12, 31)\ntimestamp = start_date + (end_date - start_date) * random.random()\nlast_upload_time = timestamp\n\nwhile True:\n try:\n voltage1 = round(random.uniform(209, 231), 2)\n frequency1 = random.randint(45, 55)\n current1 = round(random.uniform(40, 60), 2)\n power_factor1 = random.uniform(0.855, 0.945)\n power1 = round(voltage1 * current1 * power_factor1, 2)\n energy1 = round(power1 / 1000, 2) # Energy in KWh\n \n voltage2 = round(random.uniform(209, 231), 2)\n frequency2 = random.randint(45, 55)\n current2 = round(random.uniform(40, 60), 2)\n power_factor2 = random.uniform(0.855, 0.945)\n power2 = round(voltage2 * current2 * power_factor2, 2)\n energy2 = round(power2 / 1000, 2) # Energy in KWh\n \n voltage3 = round(random.uniform(209, 231), 2)\n frequency3 = random.randint(45, 55)\n current3 = round(random.uniform(40, 60), 2)\n power_factor3 = random.uniform(0.855, 0.945)\n power3 = round(voltage3 * current3 * power_factor3, 2)\n energy3 = round(power3 / 1000, 2) # Energy in KWh\n \n voltage4 = round(random.uniform(209, 231), 2)\n frequency4 = random.randint(45, 55)\n current4 = round(random.uniform(40, 60), 2)\n power_factor4 = random.uniform(0.855, 0.945)\n power4 = round(voltage4 * current4 * power_factor4, 2)\n energy4 = round(power4 / 1000, 2) # Energy in KWh\n \n voltage5 = round(random.uniform(209, 231), 2)\n frequency5 = random.randint(45, 55)\n current5 = round(random.uniform(40, 60), 2)\n power_factor5 = random.uniform(0.855, 0.945)\n power5 = round(voltage5 * current5 * power_factor5, 2)\n energy5 = round(power5 / 1000, 2) # Energy in KWh\n \n voltage6 = round(random.uniform(209, 231), 2)\n frequency6 = random.randint(45, 55)\n current6 = round(random.uniform(40, 60), 2)\n power_factor6 = random.uniform(0.855, 0.945)\n power6 = round(voltage6 * current6 * power_factor6, 2)\n energy6 = round(power6 / 1000, 2) # Energy in KWh\n \n voltage7 = round(random.uniform(209, 231), 2)\n frequency7 = random.randint(45, 55)\n current7 = round(random.uniform(40, 60), 2)\n power_factor7 = random.uniform(0.855, 0.945)\n power7 = round(voltage7 * current7 * power_factor7, 2)\n energy7 = round(power7 / 1000, 2) # Energy in KWh\n \n voltage8 = round(random.uniform(209, 231), 2)\n frequency8 = random.randint(45, 55)\n current8 = round(random.uniform(40, 60), 2)\n power_factor8 = random.uniform(0.855, 0.945)\n power8 = round(voltage8 * current8 * power_factor8, 2)\n energy8 = round(power8 / 1000, 2) # Energy in KWh\n\n data = [\n {'Voltage-1': voltage1, 'Current-1': current1, 'Power-1': power1, 'Energy-1': energy1, 'Frequency-1': frequency1, 'Powerfactor-1': power_factor1,\n 'Voltage-2': voltage2, 'Current-2': current2, 'Power-2': power2, 'Energy-2': energy2, 'Frequency-2': frequency2, 'Powerfactor-2': power_factor2,\n 'Voltage-3': voltage3, 'Current-3': current3, 'Power-3': power3, 'Energy-3': energy3, 'Frequency-3': frequency3, 'Powerfactor-3': power_factor3,\n 'Voltage-4': voltage4, 'Current-4': current4, 'Power-4': power4, 'Energy-4': energy4, 'Frequency-4': frequency4, 'Powerfactor-4': power_factor4,\n 'Voltage-5': voltage5, 'Current-5': current5, 'Power-5': power5, 'Energy-5': energy5, 'Frequency-5': frequency5, 'Powerfactor-5': power_factor5,\n 'Voltage-6': voltage6, 'Current-6': current6, 'Power-6': power6, 'Energy-6': energy6, 'Frequency-6': frequency6, 'Powerfactor-6': power_factor6,\n 'Voltage-7': voltage7, 'Current-7': current7, 'Power-7': power7, 'Energy-7': energy7, 'Frequency-7': frequency7, 'Powerfactor-7': power_factor7,\n 'Voltage-8': voltage8, 'Current-8': current8, 'Power-8': power8, 'Energy-8': energy8, 'Frequency-8': frequency8, 'Powerfactor-8': power_factor8,}\n ]\n\n # Get the current timestamp\n timestamp = datetime.now().strftime('%d-%m-%Y %H:%M:%S')\n\n # Iterate over the data and insert each document into MongoDB\n for row in data:\n row['Timestamp'] = timestamp\n collection.insert_one(row)\n\n print('Data has been written to MongoDB.')\n\n except IndexError:\n print('index ERROR')\n except ValueError:\n print('value ERROR')\n \n # Short delay to avoid busy-waiting\n time.sleep(1)\n","repo_name":"Eyashita/SmartHome_Dashboard","sub_path":"generating_data.py","file_name":"generating_data.py","file_ext":"py","file_size_in_byte":4974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"31115904490","text":"\"\"\"\nSet of helper functions to trace cubic and conic arcs described by true type fonts outlines. Each glyph can be\nconverted to a list of vertex arrays, each one representing a closed contour of the glyph outline. Vertex arrays\ncan be further processed to obtain a list of line segments that can be drawn in a plot or using a GL_LINES draw\nprimitive.\n\"\"\"\nimport freetype\nimport numpy as np\nfrom bezier_sampling import bezier_curve\n\n__author__ = \"Javier Felip Leon\"\n__copyright__ = \"Copyright 2020, Javier Felip Leon\"\n__credits__ = [\"Javier Felip Leon\"]\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__maintainer__ = \"Javier Felip Leon\"\n__email__ = \"javier.felip.leon@gmail.com\"\n__status__ = \"Development\"\n\n\ndef trace_cubic_off_points(c_points, c_tags, idx, n_segments=5):\n \"\"\"\n Traces the bezier cubic arc represented by the contour off point at idx into n_segments line segments. Details of\n font outline in: https://www.freetype.org/freetype2/docs/glyphs/glyphs-6.html\n\n Two successive conic off points force the creation of a virtual on point at the exact middle of the two off\n points to sample the conic curve. Cubic points must be in pairs and have to be surrounded by on points. To avoid\n line segment duplication this function only traces line segments if the c_points[idx] is the second of a pair of\n off cubic points.\n\n :param c_points: Contour points arranged in a (N,2) array.\n :param c_tags: N-sized array with a FreeType tag for each point in c_points.\n :param idx: Index of the contour point used to create the traced segments.\n :param n_segments: Number of segments used to trace the bezier arc. Defaults to 5.\n :return: Set of line segments (M, 2)\n \"\"\"\n sampled_pts = np.array([])\n idx_prev = idx - 1\n idx_next = idx + 1\n # If it is the last point of the contour. Use the first as the pivot\n if idx == len(c_points) - 1:\n idx_next = 0\n\n # Only trace if the point is the second of a pair of off cubic points.\n if c_tags[idx_prev] == freetype.FT_Curve_Tag_Cubic and c_tags[idx] == freetype.FT_Curve_Tag_Cubic:\n sampled_pts = bezier_curve([c_points[idx_prev - 1],\n c_points[idx_prev],\n c_points[idx],\n c_points[idx_next]], n_segments)\n return sampled_pts.reshape(-1)\n\n\ndef trace_conic_off_points(c_points, c_tags, idx, n_segments=5):\n \"\"\"\n Traces the bezier arc represented by the contour off point at idx into n_segments line segments. Details of font\n outline in: https://www.freetype.org/freetype2/docs/glyphs/glyphs-6.html\n\n Two successive conic off points force the creation of a virtual on point at the exact middle of the two off\n points to sample the conic curve. Cubic points must be in pairs and have to be surrounded by on points.\n\n :param c_points: Contour points arranged in a (N,2) array.\n :param c_tags: N-sized array with a FreeType tag for each point in c_points.\n :param idx: Index of the contour point used to create the traced segments.\n :param n_segments: Number of segments used to trace the bezier arc. Defaults to 5.\n :return: Set of line segments (M, 2)\n \"\"\"\n # If it is the last point of the contour. Use the first as the pivot\n idx_prev = idx - 1\n idx_next = idx + 1\n if idx == len(c_points) - 1:\n idx_next = 0\n\n if c_tags[idx_prev] == freetype.FT_Curve_Tag_On:\n point_start = c_points[idx_prev]\n elif c_tags[idx_prev] == freetype.FT_Curve_Tag_Conic:\n point_start = (c_points[idx] + c_points[idx_prev]) / 2\n else:\n raise ValueError(\"While tracing point index %d. Previous point with unsupported type: \" % idx\n + str(c_tags[idx_prev]))\n\n if c_tags[idx_next] == freetype.FT_Curve_Tag_On:\n point_end = c_points[idx_next]\n elif c_tags[idx_next] == freetype.FT_Curve_Tag_Conic:\n point_end = (c_points[idx] + c_points[idx_next]) / 2\n else:\n raise ValueError(\"While tracing point index %d. Previous point with unsupported type: \" % idx\n + str(c_tags[idx_next]))\n\n sampled_pts = bezier_curve([point_start, c_points[idx], point_end], n_segments)\n return sampled_pts.reshape(-1)\n\n\ndef glyph2vertices(glyph, n_segments=5):\n \"\"\"\n Sample glyph outline vertices checking points on the curve and control points for conic and cubic bezier arcs as \n described in: https://www.freetype.org/freetype2/docs/glyphs/glyphs-6.html\n Vertex coordinates are scaled such that the vertical advance is 1 unit and the return is a list of arrays, \n each list element represents a closed contour. Each closed contour is a numpy array of size Nx2 representing the \n set of 2d vertices that form the contour outline.\n \n :param glyph: FreeFont glyph to be traced \n :param n_segments: Number of segments used to sample each bezier arc. Defaults to 5.\n :return: A list of sampled contours. Each contour is a numpy array of Nx2 vertices representing a glyph contour.\n \"\"\"\n # Get the points describing the outline\n points = np.array(glyph.outline.points, dtype=np.float32)\n\n # Get contour start indices\n contours = glyph.outline.contours\n\n # Obtain the point tags from the glyph outline description.\n tags = []\n for i, t in enumerate(glyph.outline.tags):\n tags.append(freetype.FT_CURVE_TAG(t))\n\n # Process each contour separately\n prev_c = -1\n c_draw_contours = []\n for c in contours:\n # Extract the points and tags for the current contour\n c_points = points[prev_c + 1:c + 1]\n c_draw_points = np.array([])\n c_tags = tags[prev_c + 1:c + 1]\n\n # Generate points depending on their tag\n for i in range(len(c_points)):\n # If the point is on, just add it\n if c_tags[i] == freetype.FT_Curve_Tag_On:\n c_draw_points = np.concatenate((c_draw_points, c_points[i]))\n\n # If the point is off conic\n elif c_tags[i] == freetype.FT_Curve_Tag_Conic:\n sampled_pts = trace_conic_off_points(c_points, c_tags, i, n_segments)\n c_draw_points = np.concatenate((c_draw_points, sampled_pts))\n\n # If the point is off cubic\n elif c_tags[i] == freetype.FT_Curve_Tag_Cubic:\n sampled_pts = trace_cubic_off_points(c_points, c_tags, i, n_segments)\n c_draw_points = np.concatenate((c_draw_points, sampled_pts))\n\n # Normalize vertices (scale to fit 1 unit height bbox)\n c_draw_points = c_draw_points / glyph.metrics.vertAdvance\n\n c_draw_contours.append(c_draw_points)\n prev_c = c\n\n # Return a list of contour vertices\n return c_draw_contours\n\n\ndef vertices2lines(contours):\n \"\"\"\n Generate a set of 2d lines from a set of closed contours.\n :param contours: List of 2d closed contours. Each contour is an array of flattened 2d points.\n :return: Array of vertices representing lines.\n \"\"\"\n lines = np.array([])\n\n for contour in contours:\n # Draw a line between each contour sampled sequential point\n for i in range(0, len(contour), 2):\n line = contour[i:i+4]\n lines = np.concatenate((lines, line))\n\n # Close the contour\n if len(contour) % 2 != 0:\n lines = np.concatenate((lines, contour[-2:2]))\n else:\n lines = np.concatenate((lines, contour[0:2]))\n\n return lines.flatten()\n","repo_name":"jfelip/font_tracer","sub_path":"font2vertices.py","file_name":"font2vertices.py","file_ext":"py","file_size_in_byte":7473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"41058951303","text":"# -*- coding: utf-8 -*-\n\"\"\"\n kingdomlib.views\n ~~~~~~~~~~~~~~~~\n\"\"\"\n\nfrom werkzeug.datastructures import MultiDict\nfrom flask import request\nfrom flask_wtf import FlaskForm\n\nfrom .errors import FormError\n\n\nclass SimpleView(object):\n def __init__(self, name=None):\n self.name = name or ''\n self.deferred = []\n\n def route(self, rule, **options):\n def wrapper(f):\n self.deferred.append((f, rule, options))\n return f\n return wrapper\n\n def register(self, bp, url_prefix=None):\n if url_prefix is None:\n url_prefix = f'/{self.name}'\n\n for f, rule, options in self.deferred:\n endpoint = options.pop('endpoint', f.__name__)\n bp.add_url_rule(url_prefix + rule, endpoint, f, **options)\n\n\nclass SimpleForm(FlaskForm):\n @classmethod\n def create_api_form(cls, obj=None):\n formdata = MultiDict(request.form.to_dict())\n form = cls(formdata=formdata, obj=obj, meta={'csrf': False})\n form._obj = obj\n if not form.validate():\n raise FormError(form)\n return form\n\n def _validate_obj(self, key, value):\n obj = getattr(self, '_obj', None)\n return obj and getattr(obj, key) == value\n","repo_name":"xingl01/kingdomlib","sub_path":"kingdomlib/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"32619466451","text":"# coding=utf-8\nfrom django.shortcuts import render, HttpResponse\nfrom .models import OrderItem,Order\nfrom .forms import OrderCreateForm\nfrom cart.cart import Cart\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib.auth import get_user_model\nfrom django.template.loader import render_to_string\nfrom django.http import HttpResponse\nimport weasyprint\nfrom django.shortcuts import get_object_or_404\nfrom io import BytesIO\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.core.mail import EmailMessage\n\n#from .tasks import order_created\n\n@login_required\n\ndef order_create(request):\n cart = Cart(request)\n if request.method == 'POST':\n form = OrderCreateForm(request.POST)\n if form.is_valid():\n order = form.save(commit=False)\n form.user = request.user\n form.save()\n for item in cart:\n OrderItem.objects.create(user=request.user,order=order,\n product=item['product'],\n price=item['price'],\n quantity=item['quantity'])\n\n\n email = ('Order nr. {}'.format(order.id),'messagelll','settings.EMAIL_HOST_USER',[order.email])\n subject = 'Pinnochios Pizza & Subs - Order nr. {}'.format(order.id)\n message = ''\n email = EmailMessage(subject,message,'elena.andresprado@gmail.com',[order.email])\n html = render_to_string('orders2/order2/pdf.html', {'order': order})\n out = BytesIO()\n stylesheets=[weasyprint.CSS(settings.STATIC_ROOT + '/css/pdf.css')]\n weasyprint.HTML(string=html).write_pdf(out,stylesheets=stylesheets)\n email.attach('order_{}.pdf'.format(order.id),out.getvalue(),'application/pdf')\n email.send()\n # launch asynchronous task\n #order_created.delay(order.id)\n return render(request,\n 'orders2/order2/created.html',\n {'order': order})\n else:\n form = OrderCreateForm()\n return render(request,\n 'orders2/order2/create.html',\n {'cart': cart, 'form': form})\n\ndef created_list(request): \n user = request.user\n my_orders = Order.objects.filter(user=user)\n my_orderitems = OrderItem.objects.filter(user=user)\n\n return render(request,'orders2/order2/order_detail.html',{'my_orders':my_orders,'user':user,'my_orderitems':my_orderitems,})\ndef orderitems_list(request, id):\n user = request.user\n my_orderitems = OrderItem.objects.filter(user=user)\n orderitems = get_object_or_404(OrderItem, id=id)\n return render(request,'orders2/order2/created_list2.html',{'orderitems':orderitems,'my_orderitems':my_orderitems,})\n\n\n\ndef admin_order_pdf(request, order_id):\n order = get_object_or_404(Order, id=order_id)\n html = render_to_string('orders2/order2/pdf.html',{'order': order})\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'filename=\\\"order_{}.pdf\"'.format(order.id)\n weasyprint.HTML(string=html).write_pdf(response,stylesheets=[weasyprint.CSS(settings.STATIC_ROOT + '/css/pdf.css')])\n return response\n","repo_name":"Eapu/pizza","sub_path":"orders2/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"33916121673","text":"import sys\nsys.stdin = open(\"6692_input.txt\")\n\nT = int(input())\n\nfor test_case in range(1, T+1):\n N = int(input())\n answer = 0\n\n for i in range(N):\n pi, xi = map(float, input().split())\n answer += round(pi * xi, 7)\n\n print('#{}'.format(test_case), end=' ')\n print(\"%0.6f\" % answer)","repo_name":"HSx3/SWEA","sub_path":"D3/6692_다솔이의월급상자.py","file_name":"6692_다솔이의월급상자.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"31243569113","text":"from PyQt5.QtWidgets import QScrollArea,QFrame,QGridLayout,QHBoxLayout,QPushButton,QLabel\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtCore import pyqtSignal,Qt\nfrom service import makeMd5,addToLoop,session,headers\nimport aiohttp\nimport os\n\n# 一个用于继承的类,方便多次调用。\nclass ScrollArea(QScrollArea):\n \"\"\"包括一个ScrollArea做主体承载一个QFrame的基础类。\"\"\"\n scrollDown = pyqtSignal()\n\n def __init__(self, parent=None):\n super(ScrollArea, self).__init__()\n self.parent = parent\n self.frame = QFrame()\n self.frame.setObjectName('frame')\n\n # 用于发出scroll滑到最底部的信号。\n self.verticalScrollBar().valueChanged.connect(self.sliderPostionEvent)\n\n self.setWidgetResizable(True)\n\n self.setWidget(self.frame)#必须设置Widget\n\n def noInternet(self):\n # 设置没有网络的提示。\n self.noInternetLayout = QGridLayout()\n self.setLayout(self.mainLayout)\n\n self.Tip = QLabel(\"您已进入没有网络的异次元,打破次元壁 →\", self)\n self.TipButton = QPushButton(\"打破次元壁\", self)\n self.TipButton.setObjectName(\"TipButton\")\n\n self.TipLayout = QHBoxLayout()\n self.TipLayout.addWidget(self.Tip)\n self.TipLayout.addWidget(self.TipButton)\n\n # self.indexAllSings.setLayout(self.TipLayout)\n\n self.noInternetLayout.addLayout(self.TipLayout, 0, 0, Qt.AlignCenter|Qt.AlignTop)\n\n self.frame.setLayout(self.noInternetLayout)\n\n def sliderPostionEvent(self):\n if self.verticalScrollBar().value() == self.verticalScrollBar().maximum():\n self.scrollDown.emit()\n\n def maximumValue(self):\n return self.verticalScrollBar().maximum()\n\ncacheFolder = \"cache/imgs\"\ndef checkOneFolder(folderName:str):\n if not os.path.exists(folderName):\n os.makedirs(folderName)\n def _check(func):\n def _exec(*args):\n try:\n func(*args)\n except Exception as e:\n print(e)\n return _exec\n return _check\n## 对的初步探索。\n# 暂只接受http(s)和本地目录。\nclass PicLabel(QLabel):\n\n def __init__(self,parent=None,src=None, width=200, height=200, pixMask=None):\n super().__init__(parent)\n\n self.src = None\n self.width = width\n self.height = height\n\n self.pixMask = None\n if pixMask:\n self.pixMask = pixMask\n if src:\n self.setSrc(src)\n\n if self.width:\n self.setMaximumSize(self.width, self.height)\n self.setMinimumSize(self.width, self.height)\n\n @checkOneFolder(cacheFolder)\n def setSrc(self, src):\n src = str(src)\n if 'http' in src or 'https' in src:\n cacheList = os.listdir(cacheFolder)\n\n name = makeMd5(src)\n localSrc = cacheFolder+'/'+name\n if name in cacheList:\n self.setSrc(localSrc)\n self.src = localSrc\n return\n\n self.loadImg(src,name)\n else:\n self.src = src\n pix = QPixmap(src)\n pix.load(src)\n pix = pix.scaled(self.width, self.height)\n # mask需要与pix是相同大小。\n if self.pixMask:\n mask = QPixmap(self.pixMask)\n mask = mask.scaled(self.width, self.height)\n pix.setMask(mask.createHeuristicMask())\n\n self.setPixmap(pix)\n\n def getSrc(self):\n \"\"\"返回该图片的地址。\"\"\"\n return self.src\n\n @addToLoop\n async def loadImg(self,src,name):\n try:\n async with session[\"session\"].get(src,headers=headers,timeout=60) as response:\n if response.status == 200:\n image_content = await response.read()\n else:\n raise aiohttp.ClientError()\n except Exception as e:\n print(e)\n return\n\n width = self.width\n height = self.height\n\n pic = QPixmap()\n pic.loadFromData(image_content)\n localSrc = cacheFolder + '/' + name\n pic.save(localSrc, 'jpg')\n pic = pic.scaled(width, height)\n\n self.src = localSrc\n\n # 上遮罩。\n if self.pixMask:\n mask = QPixmap()\n mask.load(self.pixMask)\n mask = mask.scaled(width, height)\n\n pic.setMask(mask.createHeuristicMask())\n\n self.setPixmap(pic)","repo_name":"lukoou3/MusicPlayer","sub_path":"widgets/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"8902618864","text":"from heapq import *\n\ndef kth_closest(points, k):\n store = []\n kth = []\n for x, y in points:\n d = (x ** 2) + (y ** 2)\n heappush(store, [d, x, y])\n\n for i in range(k):\n kth.append(store[i][1:])\n\n return kth\n\n\nprint(kth_closest([[2,4], [7,1], [0,9], [3,2]], 0))\n","repo_name":"mearafGitHub/Algorithims","sub_path":"KthClosest.py","file_name":"KthClosest.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"24895222820","text":"from pygame import *\nfrom random import randint\nclass GameSprite(sprite.Sprite):\n #class constructor\n def __init__(self, player_image, player_x, player_y, size_x, size_y, player_speed):\n super().__init__() #sprite.Sprite.__init__(self)\n self.image = transform.scale(image.load(player_image), (size_x, size_y))\n self.speed = player_speed\n \n #every sprite must have the rect property – the rectangle it is fitted in\n self.rect = self.image.get_rect()\n self.rect.x = player_x\n self.rect.y = player_y\n\n #method drawing the character on the window\n def reset(self):\n window.blit(self.image, (self.rect.x, self.rect.y))\n\n#child class\nclass Paddle (GameSprite):\n #method to control the sprite with arrow keys\n def update_right(self):\n keys = key.get_pressed()\n if keys[K_UP] and self.rect.y > 5:\n self.rect.y -= self.speed\n if keys[K_DOWN] and self.rect.y < win_height - 150:\n self.rect.y += self.speed\n def update_left(self):\n keys = key.get_pressed()\n if keys[K_w] and self.rect.y > 5:\n self.rect.y -= self.speed\n if keys[K_s] and self.rect.y < win_height - 150:\n self.rect.y += self.speed\n\n#interface\nBLACK = (13, 12, 12)\nwin_width = 800\nwin_height = 700\nwindow = display.set_mode((win_width, win_height))\nwindow.fill(BLACK)\n\n\n#create sprites (paddle and balls)\npaddleA_img = \"paddle1.png\"\npaddleB_img = \"paddle2.png\"\nball_img = \"ball.png\"\n\npaddleA = Paddle (paddleA_img, 20, 200, 30, 150, 10)\npaddleB = Paddle (paddleB_img, 750, 200, 30, 150, 10)\nball = GameSprite(ball_img, 330, 200, 50, 50, 50)\n\n\n#game loop\ngame = True\nfinish = False\nclock = time.Clock()\nFPS = 60\n\n#fonts\nfont.init()\nfont = font.Font(\"pdark.ttf\", 35)\nlose1 = font.render('PLAYER 1 LOST!', True, (180, 0, 0))\nlose2 = font.render('PLAYER 2 LOST!', True, (180, 0, 0))\n\nspeed_x = 7\nspeed_y = 7\n\nwhile game:\n for e in event.get():\n if e.type ==QUIT:\n game = False\n\n if finish != True:\n window.fill (BLACK)\n paddleA.update_left()\n paddleB.update_right()\n\n ball.rect.x += speed_x\n ball.rect.y += speed_y\n\n if sprite.collide_rect(paddleA, ball) or sprite.collide_rect(paddleB, ball):\n speed_x *= -1\n speed_y *= 1\n\n #ball bounces when hit the up or bottom wall\n if ball.rect.y > win_height-50 or ball.rect.y < 0:\n speed_y *= -1\n\n #if ball flies behind this paddle, display loss condition for player left\n if ball.rect.x < 0:\n finish = False\n window.blit(lose1, (200, 200))\n\n #if the ball flies behind this paddle, display loss condition for player right\n if ball.rect.x > win_width:\n finish = False\n window.blit(lose2, (200, 200))\n\n paddleA.reset()\n paddleB.reset()\n ball.reset()\n\n display.update()\n clock.tick(FPS)","repo_name":"programercoehntan/Ping-Pong-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"33822897217","text":"def zeros(n):\n n = 10\n product = 1\n while n > 0:\n product *= n\n n -= 1\n\n result = str(product)[::-1]\n counter = 0\n for digit in result:\n if int(digit) == 0:\n counter += 1\n else:\n break\n return counter\n","repo_name":"rt-jmoors/codewars","sub_path":"number_of_trailing_zeros_n_factorial.py","file_name":"number_of_trailing_zeros_n_factorial.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"26271058466","text":"############################################\n# Parser for mystuwe.de #\n############################################\nfrom webcrawler import WebCrawler\nfrom stuweparser import StuweParser\nfrom datetime import *\n\nmorgenstelle = \"http://www.my-stuwe.de/mensa/mensa-morgenstelle-tuebingen\"\nwilhelm = \"http://www.my-stuwe.de/mensa/mensa-wilhelmstrasse-tuebingen/\"\nalldaysWillhelm = \"http://www.my-stuwe.de/mensa/mensa-wilhelmstrasse-tuebingen/?woche=\"+str(datetime.today().isocalendar()[1] + 1)\nalldays = \"http://www.my-stuwe.de/mensa/mensa-morgenstelle-tuebingen/?woche=\"+str(datetime.today().isocalendar()[1] + 1)\n\n\nprint(\"Crawling: \" + alldays)\ncrawler = WebCrawler(alldays)\nprint(\"Crawling: \" + alldaysWillhelm)\ncrawler2 = WebCrawler(alldaysWillhelm)\n\nprint(\"Start xml generation\")\n\nparser = StuweParser(crawler.getHTML())\nparser2 = StuweParser(crawler2.getHTML())\n\ntry:\n\t#parser.generateXML()\n\tparser.generateWeekXML(\"overviewMorgen.xml\")\n\tparser2.generateWeekXML(\"overviewWillhelm.xml\")\n\tprint(\"XML generated\")\nexcept Exception as e:\n\tprint(\"An error occurred while generating xml file\")\n\tprint(e)","repo_name":"NE4Y/Mensa-App-Tuebingen","sub_path":"Python XML Generator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"11146119212","text":"import torch\nimport numpy as np\nfrom torch.utils.data import DataLoader\nimport pandas as pd\nfrom tqdm import tqdm\n\nimport sys, os\n\nsys.path.insert(0, os.path.join(os.getcwd(), 'DNABERT', 'src'))\nsys.path.insert(1, '../')\n\nis_python_3_6 = sys.version_info[0] == 3 and sys.version_info[1] == 6\nif is_python_3_6:\n from transformers import BertModel, BertConfig, DNATokenizer\n\nparent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.insert(2, parent_dir)\n\n \nfrom config.config import DNABERTCONFIG\n\n\n# Class for embedding generation for both dnabert and genslm\nclass Embeddings:\n def __init__(self, conf):\n self.config = conf\n self.model_type = \"dnabert\"\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # gets saved sequences\n def get_sequences(self, data_df):\n ens_ids = data_df['GeneID'].values.tolist()\n sequences = data_df['Seq'].values.tolist()\n scores = data_df['Avg Expr Lvl'].values.tolist()\n return ens_ids, sequences, scores\n\n # gets embeddings for dnabert or genslm\n def get_embeddings(self, ens_ids, sequences, scores):\n print(\"Getting embeddings for {} sequences\".format(len(sequences)))\n # load dnabert model from pretrained_model \n cfg = BertConfig.from_pretrained(DNABERTCONFIG())\n self.tokenizer = DNATokenizer.from_pretrained('dna{}'.format(self.config['kmer-size']))\n self.model = BertModel.from_pretrained(self.config['pretrained-model'], config=cfg).to(self.device)\n # set model to eval mode\n self.model.eval()\n self.max_length = self.config['max-sequence-length']\n return self.get_bert_embeddings(ens_ids, sequences, scores)\n\n def get_bert_embeddings(self, ens_ids, sequences, scores):\n embedding_list = []\n for i, sequence in enumerate(tqdm(sequences)):\n # append individual sequence embedding to embedding list\n embedding_list.append(self.individual_sequence_embedding(ens_ids[i], sequence, scores[i]))\n if (i+1) % 4000 == 0:\n print(\"Processed {} sequences\".format(i+1))\n torch.save(embedding_list, self.config['embedding_dir'] + '/embeddings_{}.pt'.format(i+1))\n #if i == len(sequences) - 1:\n # torch.save(embedding_list, self.config['embedding_dir'] + '/embeddings_{}.pt'.format(i+1))\n return embedding_list\n \n def individual_sequence_embedding(self, ens_id, sequence, score):\n total = 1\n for i in range(0,len(sequence),512):\n # get embedding for each 512 length chunk of sequence\n model_input = self.tokenizer.encode_plus(self.get_kmers_from_sequence(sequence), add_special_tokens=True, max_length=self.max_length)[\"input_ids\"]\n model_input = torch.tensor(model_input, dtype=torch.long, device=self.device)\n model_input = model_input.unsqueeze(0)\n with torch.no_grad():\n last_hidden_states = self.model(model_input)\n if i == 0:\n embedding_value = last_hidden_states[1]\n else:\n embedding_value += last_hidden_states[1]\n total += 1\n return (ens_id, embedding_value/total, score)\n \n # join kmers into space separated string \n def get_kmers_from_sequence(self, sequence, k=6):\n return ' '.join([sequence[i:i+k] for i in range(len(sequence)-k+1)])\n","repo_name":"Rahul12344/Transcrormer","sub_path":"src/preprocess/generate_embeddings_from_kmers.py","file_name":"generate_embeddings_from_kmers.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"21952570038","text":"import itertools\r\nimport re\r\nfrom copy import deepcopy\r\nimport pytest\r\nimport string\r\n\r\n\r\nclass CreateDictionary:\r\n\r\n def __init__(self):\r\n pass\r\n\r\n @staticmethod\r\n def merge_pairs1(key, value):\r\n\r\n pairs = dict()\r\n for (k, v) in itertools.zip_longest(key, value):\r\n pairs[k] = v\r\n pairs = dict(filter(lambda x: x[0] is not None, pairs.items()))\r\n\r\n return pairs\r\n\r\n @staticmethod\r\n def merge_pairs2(key, values):\r\n\r\n pairs = {k: v for (k, v) in itertools.zip_longest(key, values) if k is not None}\r\n\r\n return pairs\r\n\r\n @staticmethod\r\n def merge_pairs3(key, values):\r\n\r\n pairs = dict()\r\n for i, k in enumerate(key):\r\n if i >= len(values):\r\n pairs[k] = None\r\n else:\r\n pairs[k] = values[i]\r\n\r\n return pairs\r\n\r\n\r\nclass CheckValid:\r\n\r\n def __init__(self, mi=1, mx=20):\r\n self.min = mi\r\n self.mx = mx\r\n\r\n @staticmethod\r\n def check_latin1(txt):\r\n s1 = txt[0]\r\n s2 = txt[-1]\r\n s3 = deepcopy(txt)\r\n if ('a' <= s1 <= 'z') or ('A' <= s1 <= 'Z'):\r\n if s2.isalnum():\r\n txt = re.sub(r'[^a-zA-z0-9-\\n.\\n]', \"\", txt)\r\n if s3 == txt:\r\n return True\r\n return False\r\n\r\n @staticmethod\r\n def check_latin2(txt):\r\n txt = txt.lower()\r\n s1 = txt[0]\r\n s2 = txt[-1]\r\n\r\n if 'a' <= s1 <= 'z':\r\n if s2.isalnum():\r\n txt = txt.replace(\"-\", 'a')\r\n txt = txt.replace(\".\", 'a')\r\n s3 = deepcopy(txt)\r\n txt = re.sub(r'[^a-z0-9]', \"\", txt)\r\n if s3 == txt:\r\n return True\r\n return False\r\n\r\n @staticmethod\r\n def check_latin3(txt):\r\n txt = txt.lower()\r\n char_list_lower = [ch for ch in string.ascii_lowercase]\r\n char_list_number = [str(i) for i in range(10)]\r\n valid_char = ['-', '.']\r\n valid_char += char_list_lower + char_list_number\r\n s1 = txt[0]\r\n s2 = txt[-1]\r\n\r\n if s1 in char_list_lower:\r\n if s2 in char_list_lower or s2 in char_list_number:\r\n mark = 1\r\n\r\n for ch in txt:\r\n if ch not in valid_char:\r\n print(ch)\r\n mark = 0\r\n if mark == 1:\r\n return True\r\n\r\n return False\r\n\r\n def check_length(self, txt):\r\n\r\n if len(txt) < self.min or len(txt) > self.mx:\r\n return False\r\n return True\r\n\r\n def check1(self, txt):\r\n\r\n if CheckValid.check_latin1(txt) and self.check_length(txt):\r\n return True\r\n return False\r\n\r\n def check2(self, txt):\r\n\r\n if CheckValid.check_latin2(txt) and self.check_length(txt):\r\n return True\r\n return False\r\n\r\n def check3(self, txt):\r\n\r\n if CheckValid.check_latin3(txt) and self.check_length(txt):\r\n return True\r\n return False\r\n\r\n\r\nclass ReadLogs:\r\n\r\n def __init__(self, fp='./access.log'):\r\n\r\n self.lis = list()\r\n with open(fp, 'r') as fl:\r\n for line in fl:\r\n self.lis.append(line)\r\n\r\n def top_logs(self, k=10):\r\n\r\n umap = {}\r\n for item in self.lis:\r\n ip = item.split(\"-\")[0]\r\n ip = ip[:-1].strip()\r\n if ip not in umap:\r\n umap[ip] = 0\r\n umap[ip] += 1\r\n\r\n umap = {key: value for key, value in sorted(umap.items(), key=lambda item: item[1])}\r\n\r\n top_ip = []\r\n for i, key in enumerate(umap):\r\n if i == k:\r\n return top_ip\r\n top_ip.append(key)\r\n\r\n return top_ip\r\n","repo_name":"abhiishekpal/assignment","sub_path":"startus.py","file_name":"startus.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"2922200012","text":"# ====================================================================\n# Script para compilar e carregar script do ESP8266\n# ====================================================================\n# 1 eh o codigo de erro retornado pelo os.system() caso ambos os comandos do arduino-cli deem errado\n# sys.exit() ira retornar 2 na chamada os.system() do main.py indicando erro na execucao\nfrom cmath import log\nimport os\nimport sys\nfrom client_mqtt import *\nfrom usuario import *\nfrom log import *\n\narquivo = \"Arduíno\" \nlogMessage = \"\"\n\nescreve_terminal(\"Comencando processo de compilacao e carga do script do ESP8266\")\n\n\n\n# Altera o usuario/senha no script do esp8266\ntry:\n f1 = open(\"mqtt_esp8266/mqtt_esp8266.ino\", \"r\")\nexcept OSError as e:\n escreve_terminal(\"Script do ESP8266 nao encontrado\")\n escreve_log(arquivo + \",\" + \"Scrip do ESP8266 nao encontrado\")\n sys.exit(2)\n \nlinhas = f1.readlines()\nf1.close()\n\nf1 = open(\"mqtt_esp8266/mqtt_esp8266.ino\", \"w\")\nfor linha in linhas:\n if \"String user\" in linha:\n texto = 'String user = \"' + user + '\";\\n'\n f1.write(texto)\n elif \"String passwd\" in linha:\n texto = 'String passwd = \"' + passwd + '\";\\n'\n f1.write(texto) \n else:\n f1.write(linha)\nf1.close()\n\n# Compila o script\nif os.system(\"arduino-cli compile --fqbn esp8266:esp8266:nodemcuv2 mqtt_esp8266\") != 1:\n escreve_terminal(\"O script do ESP8266 foi compilado com sucesso\") \nelse:\n escreve_terminal(\"Erro no processo de compilacao do script do ESP8266\") \n escreve_log(arquivo + \",\" + \"Erro de compilacao do Script do ESP8266\")\n sys.exit(2)\n \n# Localiza a porta COM que esta conectada ao ESP8266\nos.system(\"arduino-cli board list > log.txt\")\nf1 = open(\"log.txt\", \"r\")\nlinhas = f1.readlines()\nlinha = linhas[2]\nporta = linha.split(\" \")[0]\nf1.close()\nos.remove(\"log.txt\")\n\nescreve_terminal(\"A porta identificada do ESP8266 foi a \" + porta)\n\n# Carrega o script\nif os.system(\"arduino-cli upload -p \" + porta + \" --fqbn esp8266:esp8266:nodemcuv2 mqtt_esp8266\") != 1:\n escreve_terminal(\"O script do ESP8266 foi carregado com sucesso\")\n escreve_log(arquivo + \",\" + \"Scrip do ESP8266 carregado com sucesso\")\nelse:\n escreve_terminal(\"Erro no processo de carga do script do ESP8266\") \n escreve_log(arquivo + \",\" + \"Erro de carregamento do script do ESP8266\")\n sys.exit(2)","repo_name":"LuizBudeu/LabDig2_FPGAsteroids","sub_path":"esp_scripts_logs/arduino.py","file_name":"arduino.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"25035275161","text":"# https://leetcode.com/problems/maximum-product-subarray/\nfrom typing import List\n\n\nclass Solution:\n def maxProduct(self, nums: List[int]) -> int:\n if len(nums) == 1:\n return nums[0]\n row1 = [float(\"-inf\") for _ in range(len(nums))]\n row2 = [float(\"-inf\") for _ in range(len(nums))]\n row1[0] = nums[0]\n row2[0] = nums[0]\n for idx in range(1, len(nums)):\n row1[idx] = max(\n nums[idx],\n nums[idx] * nums[idx - 1],\n nums[idx] * row1[idx - 1],\n nums[idx] * row2[idx - 1],\n )\n row2[idx] = min(\n nums[idx] * row1[idx - 1],\n nums[idx] * row2[idx - 1],\n key=abs,\n )\n return max(row1)\n\n\nif __name__ == \"__main__\":\n assert 4 == Solution().maxProduct([3, -1, 4])\n assert 2 == Solution().maxProduct([0, 2])\n assert 108 == Solution().maxProduct([-1, -2, -9, -6])\n assert 24 == Solution().maxProduct([-2, 3, -4])\n assert 0 == Solution().maxProduct([-2, 0, -1])\n assert 24 == Solution().maxProduct([2, -5, -2, -4, 3])\n","repo_name":"mainden7/leetcode_problems","sub_path":"1.dynamic_programming/max_product.py","file_name":"max_product.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"39897347621","text":"__author__ = 'michaelpodolin'\n\nfrom django.conf.urls import url\nfrom promotions import views\n\nurlpatterns = [\n url(r'^index/', views.index, name='index'),\n url(r'^profile/', views.profile, name='profile'),\n url(r'^pt/', views.pt, name='pt'),\n]","repo_name":"mpodolin/CAPApp","sub_path":"promotions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"34113051193","text":"import json,os\n\nfrom flask import request\nfrom flask_restplus import Resource\nfrom flask_jwt_extended import jwt_required\n\nfrom ..util.enums import OrganizationType\n\nfrom ..util.dto import ResponseDto, OrganizationDto,JsonSerializable #Dto's\nfrom ..util.dto import Organization,Stakeholder,Provider ,Address,StakeholderFilter#Api Models\nfrom ..service.organization_service import getAllOrganizations,getOrganizationById,registerOrganization,getAllProviders,getProviderById,getAllStakeholders,getStakeholderById,registerProvider,registerStakeholder,getStakeholderDetails\n\napi = OrganizationDto.api\n_organization_dto = OrganizationDto.organization \n_stakeholder_dto = OrganizationDto.stakeholder\n_provider_dto = OrganizationDto.provider\n_stakeholder_details_dto = OrganizationDto.stakeholder_details\n \n\n\n@api.route('/')\nclass OrganizationList(Resource):\n @api.doc('list_of_available_organizations')\n @api.marshal_list_with(_organization_dto, envelope='data')\n #@jwt_required\n def get(self):\n \"\"\"List all available organizations\"\"\"\n return getAllOrganizations()\n\n\n@api.route('/provider')\nclass Provider(Resource):\n @api.doc('provider register')\n @api.expect(_provider_dto, validate=True)\n #@jwt_required\n def post(self):\n \"\"\"Register Provider\"\"\" \n \n data = request.json\n if(data is not None):\n _organizationDto=Organization()\n _organizationDto.name = data['name']\n _organizationDto.email = data['email']\n _organizationDto.phone_number = data['phone_number']\n _organizationDto.user_id = data['user_id']\n _organizationDto.organization_type = OrganizationType.PROVIDER.value\n\n # Address Dto\n _organizationDto.address=Address()\n _organizationDto.address.address1= data['address1']\n _organizationDto.address.address2= data['address2']\n _organizationDto.address.country= data['country']\n _organizationDto.address.zip_code= data['zip_code']\n _organizationDto.address.state= data['state']\n\n # Provider Dto\n _organizationDto.provider=Provider()\n \n\n organization = registerOrganization(_organizationDto) \n if(organization is not None):\n\n return \n\n@api.route('/')\nclass ProviderById(Resource):\n \n @api.doc('provider')\n @api.marshal_with(_provider_dto, envelope='data')\n #@jwt_required\n def get(self,provider_id): \n return getProviderById(provider_id) \n \n@api.route('/allProviders')\nclass Providers(Resource):\n\n @api.doc('all providers')\n @api.marshal_list_with(_provider_dto, envelope='data')\n #@jwt_required\n def get(self):\n return getAllProviders()\n\n\n@api.route('/stakeholder')\nclass Stakeholder(Resource):\n\n @api.doc('stakeholder register')\n @api.expect(_stakeholder_dto, validate=True)\n #@jwt_required\n def post(self):\n \"\"\"Register Stakeholder\"\"\" \n data = request.json\n if(data is not None):\n _organizationDto=Organization()\n _organizationDto.name = data['name']\n _organizationDto.email = data['email']\n _organizationDto.phone_number = data['phone_number']\n _organizationDto.user_id = data['user_id']\n _organizationDto.organization_type = OrganizationType.STAKEHOLDER.value\n\n # Address Dto\n _organizationDto.address=Address()\n _organizationDto.address.address1= data['address1']\n _organizationDto.address.address2= data['address2']\n _organizationDto.address.country= data['country']\n _organizationDto.address.zip_code= data['zip_code']\n _organizationDto.address.state= data['state']\n\n # Stakeholder Dto\n _organizationDto.stakeholder=Stakeholder()\n _organizationDto.stakeholder.budget=data['budget']\n\n organization = registerOrganization(_organizationDto) \n if(organization is not None):\n\n return\n\n\n@api.route('/')\nclass StakeholderById(Resource):\n \n @api.doc('stakeholder')\n @api.marshal_with(_stakeholder_dto, envelope='data')\n #@jwt_required\n def get(self,stakeholder_id):\n return getStakeholderById(stakeholder_id) \n\n\n@api.route('/allStakeholders')\nclass Stakeholders(Resource):\n\n @api.doc('all stakeholders')\n @api.marshal_list_with(_stakeholder_dto, envelope='data')\n #@jwt_required\n def get(self): \n return getAllStakeholders() \n\n@api.route('/getStakeholdersDetails/id=/filter=')\nclass StakeholdersFilter(Resource):\n @api.doc('stakeholder details') \n @api.marshal_list_with(_stakeholder_details_dto, envelope='data')\n #@jwt_required\n def get(self,stakeholder_id,filters):\n \n stakeholderFilter=StakeholderFilter()\n stakeholderFilter.id = int(stakeholder_id)\n\n receivedFilters = filters.split('&&')\n filtersDic = dict()\n\n for filter in receivedFilters:\n filtersDic.update({filter.split('=')[0].replace(\" \", \"\").lower() : int(filter.split('=')[1].lower())})\n\n stakeholderFilter.provider= filtersDic.get('provider')\n stakeholderFilter.department= filtersDic.get('department')\n stakeholderFilter.doctor= filtersDic.get('doctor')\n\n stakeholders = getStakeholderDetails(stakeholderFilter)\n \n\n return stakeholders \n ","repo_name":"zrehman-ssi/SIB-DevOps","sub_path":"backend/app/main/controller/organization_controller.py","file_name":"organization_controller.py","file_ext":"py","file_size_in_byte":5542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"11192994971","text":"from odoo import exceptions, fields, models, api, _\r\n\r\nclass MachineTemplate(models.Model):\r\n _name = 'equipment.template'\r\n\r\n tender_id = fields.Many2one('crm.lead')\r\n tender_contract_id = fields.Many2one('tender.contract')\r\n product_id = fields.Many2one('product.template', domain=\"[('product_type', 'in', ['equipment'])]\", required=True,\r\n related=False, store=True, readonly=False)\r\n name = fields.Text(compute='_get_name',\r\n required=True, related=False, store=False, readonly=False)\r\n depreciation = fields.Float(compute='_get_depreciation', readonly=True, required=True)\r\n standard_price = fields.Float(compute='_get_standard_price', readonly=True)\r\n real_standard_price = fields.Float(required=True)\r\n year_number = fields.Selection([\r\n ('1', 1),\r\n ('2', 2),\r\n ('3', 3),\r\n ('4', 4),\r\n ('5', 5),\r\n ('6', 6),\r\n ('7', 7),\r\n ], copy=False, index=True, required=True, default='3')\r\n\r\n display_type = fields.Selection([\r\n ('line_section', \"Section\"),\r\n ('line_note', \"Note\")], default=False, help=\"Technical field for UX purpose.\")\r\n\r\n @api.depends('product_id')\r\n def _get_standard_price(self):\r\n for line in self:\r\n line.standard_price = line.product_id.standard_price\r\n\r\n @api.depends('product_id')\r\n def _get_name(self):\r\n for line in self:\r\n line.name = line.product_id.name\r\n\r\n @api.depends('year_number', 'product_id', 'real_standard_price')\r\n def _get_depreciation(self):\r\n for line in self:\r\n line.depreciation = line.real_standard_price / int(line.year_number)\r\n","repo_name":"nadir-messaoudene/odoo16_addons","sub_path":"tender/models/equipment_template.py","file_name":"equipment_template.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"73791989031","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Wczytanie danych z pliku CSV\ndf = pd.read_csv('cena_metra.csv', encoding='ISO-8859-2', sep=';', engine='python')\n\n\n\n# Utworzenie kolumny 'data' jako połączenie roku i kwartału\ndf['data'] = df['Rok'].astype(str) + '-' + (df['Kwartal']*3-2).astype(str).str.zfill(2) + '-01'\ndf['data'] = pd.to_datetime(df['data'])\n\n# Sortowanie danych względem daty\ndf = df.sort_values(by='data')\n\n# Rysowanie wykresu\nplt.figure(figsize=(15, 7))\nplt.plot(df['data'], df['Wartosc'], marker='o')\nplt.title('Cena za 1 m2 powierzchni użytkowej budynku mieszkalnego')\nplt.xlabel('Data')\nplt.ylabel('Cena (zł)')\nplt.grid(True)\nplt.xticks(rotation=45)\nplt.tight_layout()\nplt.show()\n\nprint(df['data'], df['Wartosc'])","repo_name":"PeterPirog/Timeseries-forecast","sub_path":"mieszkania/01_wykres.py","file_name":"01_wykres.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"33854830023","text":"# Write a code to load a .json file and print JSON data. Display error if a file is empty or data is not in a json format\n\nimport json\n\ntry:\n with open(\"JSON/File.json\",'r') as file :\n data = json.load(file)\n print(data)\n\nexcept Exception :\n print(\"Error occurred \")","repo_name":"Jeel-Doshi/Training","sub_path":"Python/JSON/Program02.py","file_name":"Program02.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"2839455610","text":"from distutils.version import LooseVersion\nfrom feather import read_dataframe as read_feather\nimport pandas as pd\n\nfrom zpylib import data_path as dp\nfrom zpylib import datatools\nfrom zpylib import train_colnames\n\ndef build_version_index(series):\n name = series.name\n values = series.unique().tolist()\n values.sort(key=LooseVersion)\n return pd.DataFrame({name: values, 'new_'+name: range(len(values))})\n\ndef load_features(col):\n return read_feather(dp('raw/train.feather'), columns=[col])\n\ndef make_metadata(col):\n print(\"Beginning column \" + col)\n newcol = 'new_'+col\n X = load_features(col)\n series = X[col]\n md = pd.DataFrame(series.value_counts(dropna=False))\n md['counts'] = md[col]\n md[col] = md.index\n if col in VERSION_COLS:\n idx_df = build_version_index(series)\n md = md.merge(idx_df, how='left')\n md.sort_values(newcol, inplace=True)\n else:\n md[newcol] = range(md.shape[0])\n md.to_csv(dp('metadata/' + col), index=False)\n\n# Constants\nPREDCOLS = train_colnames()\nVERSION_COLS = datatools.identify_version_features()\n[make_metadata(col) for col in PREDCOLS]","repo_name":"zkurtz/kaggle_malware_2019","sub_path":"build_data/raw_feature_metadata_build.py","file_name":"raw_feature_metadata_build.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"24619855682","text":"from kitsune.products import api\nfrom kitsune.products.tests import ProductFactory, TopicFactory\nfrom kitsune.sumo.tests import TestCase\nfrom kitsune.sumo.urlresolvers import reverse\n\n\nclass TestProductSerializerSerialization(TestCase):\n def test_product_with_no_image_doesnt_blow_up(self):\n p = ProductFactory(image=None)\n serializer = api.ProductSerializer()\n native = serializer.to_representation(p)\n self.assertEqual(native[\"image\"], None)\n\n def test_product_with_image_works(self):\n # The factory will make a fictional image for the product\n p = ProductFactory()\n serializer = api.ProductSerializer()\n data = serializer.to_representation(p)\n self.assertEqual(data[\"image\"], p.image.url)\n\n\nclass TestTopicListView(TestCase):\n def test_it_works(self):\n p = ProductFactory()\n url = reverse(\"topic-list\", kwargs={\"product\": p.slug})\n res = self.client.get(url)\n self.assertEqual(res.status_code, 200)\n\n def test_expected_output(self):\n p = ProductFactory()\n t1 = TopicFactory(product=p, visible=True, display_order=1)\n t2 = TopicFactory(product=p, visible=True, display_order=2)\n\n url = reverse(\"topic-list\", kwargs={\"product\": p.slug})\n res = self.client.get(url)\n self.assertEqual(res.status_code, 200)\n\n self.assertEqual(\n res.data,\n {\n \"count\": 2,\n \"next\": None,\n \"previous\": None,\n \"results\": [\n {\n \"title\": t1.title,\n \"slug\": t1.slug,\n },\n {\n \"title\": t2.title,\n \"slug\": t2.slug,\n },\n ],\n },\n )\n","repo_name":"mozilla/kitsune","sub_path":"kitsune/products/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":1209,"dataset":"github-code","pt":"71"} +{"seq_id":"34001007314","text":"\"\"\"A command line version of Minesweeper\"\"\"\nimport random\nimport re\nimport time\nfrom string import ascii_lowercase\n\n\nclass Siatka:\n \"\"\"Jest to siatka ktora przetrzymuje informacje co znajduje sie na planszy.\"\"\"\n\n rozmiar_siatki = 5\n numer_min = 2\n miny = []\n flagi = []\n plansza = []\n plansza_widoczna_dla_gracza = []\n\n def get_rozmiar_siatki(self):\n return self.rozmiar_siatki\n\n def get_numer_min(self):\n return self.numer_min\n\n # TODO: def __init__ for assignment of all variables\n\n def stworz_plansze(self, poczatkowa_komorka):\n pusta_siatka = [['0' for i in range(self.rozmiar_siatki)] for i in range(self.rozmiar_siatki)]\n\n self.ustaw_miny(poczatkowa_komorka)\n for i, j in self.miny:\n pusta_siatka[i][j] = 'X'\n\n self.plansza = self.pobierz_dystanse_do_min(pusta_siatka)\n\n def stworz_plansze_dla_gracza(self):\n self.plansza_widoczna_dla_gracza = [[' ' for i in range(self.rozmiar_siatki)] for i in\n range(self.rozmiar_siatki)]\n\n def pokaz_plansze(self, siatka):\n poziomo = ' ' + (4 * self.rozmiar_siatki * '-') + '-'\n\n # Wypisz litery u gory, np. 'a b c d e f g h i'\n najwyzsza_kolumna = ' '\n\n for i in ascii_lowercase[:self.rozmiar_siatki]:\n najwyzsza_kolumna = najwyzsza_kolumna + i + ' '\n\n print(najwyzsza_kolumna + '\\n' + poziomo)\n\n # Wypisz liczby po lewej stronie siatki\n for idx, i in enumerate(siatka):\n rzad = '{0:2} |'.format(idx + 1)\n\n for j in i:\n rzad = rzad + ' ' + j + ' |'\n\n print(rzad + '\\n' + poziomo)\n\n print('')\n\n def pobierz_losowa_komorke(self):\n a = random.randint(0, self.rozmiar_siatki - 1)\n b = random.randint(0, self.rozmiar_siatki - 1)\n\n return a, b\n\n def pobierz_sasiednie_komorki(self, numer_rzedu, numer_kolumny):\n sasiednie_komorki = []\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n if i == 0 and j == 0:\n continue\n elif -1 < (numer_rzedu + i) < self.rozmiar_siatki and -1 < (numer_kolumny + j) < self.rozmiar_siatki:\n sasiednie_komorki.append((numer_rzedu + i, numer_kolumny + j))\n\n return sasiednie_komorki\n\n def ustaw_miny(self, start):\n sasiednie_komorki = self.pobierz_sasiednie_komorki(*start)\n\n for i in range(self.numer_min):\n komorka = self.pobierz_losowa_komorke()\n while komorka == start or komorka in self.miny or komorka in sasiednie_komorki:\n komorka = self.pobierz_losowa_komorke()\n self.miny.append(komorka)\n\n def pobierz_dystanse_do_min(self, siatka):\n for numer_rzedu, rzad in enumerate(siatka):\n for numer_kolumny, komorka in enumerate(rzad):\n if komorka != 'X':\n # Pobierz wartosci sasiednich komorek\n wartosci = [siatka[r][c] for r, c in self.pobierz_sasiednie_komorki(numer_rzedu, numer_kolumny)]\n\n # Policz jak wiele jest min\n siatka[numer_rzedu][numer_kolumny] = str(wartosci.count('X'))\n\n return siatka\n\n def pokaz_komorki(self, numer_rzedu, numer_kolumny):\n # Wyjdz z funkcji jesli komorka jest juz pokazana\n if self.plansza_widoczna_dla_gracza[numer_rzedu][numer_kolumny] != ' ':\n return\n\n # Pokaz obecna komorke\n self.plansza_widoczna_dla_gracza[numer_rzedu][numer_kolumny] = self.plansza[numer_rzedu][numer_kolumny]\n\n # Pobierz sasiadow jesli komorka jest pusta\n if self.plansza[numer_rzedu][numer_kolumny] == '0':\n for rzad, kolumna in self.pobierz_sasiednie_komorki(numer_rzedu, numer_kolumny):\n # Powtorz funkcje dla kazdego sasiada ktory nie ma flagi\n if self.plansza_widoczna_dla_gracza[rzad][kolumna] != 'F':\n self.pokaz_komorki(rzad, kolumna)\n\n\nclass Gra:\n \"\"\"Klasa ktora zarzadza gra.\"\"\"\n\n instrukcja = (\"Wpisz kolumne a nastepnie numer rzedu (np. b1). \"\n \"Aby postawic lub usunac flage, dodaj 'f' do komorki (np. b1f)\")\n\n def parsuj_dane_wejsciowe(self, wejsciowy_string, rozmiar_siatki):\n komorka = ()\n flaga = False\n wiadomosc = \"Niepoprawna komorka. \" + self.instrukcja + '\\n'\n\n wzorzec = r'([a-{}])([0-9]+)(f?)'.format(ascii_lowercase[rozmiar_siatki - 1])\n poprawny_string = re.match(wzorzec, wejsciowy_string)\n\n if wejsciowy_string == 'help':\n wiadomosc = self.instrukcja\n\n elif poprawny_string:\n numer_rzedu = int(poprawny_string.group(2)) - 1\n numer_kolumny = ascii_lowercase.index(poprawny_string.group(1))\n flaga = bool(poprawny_string.group(3))\n\n if -1 < numer_rzedu < rozmiar_siatki:\n komorka = (numer_rzedu, numer_kolumny)\n wiadomosc = ''\n\n return {'komorka': komorka, 'flaga': flaga, 'wiadomosc': wiadomosc}\n\n\n def zagraj(self):\n siatka = Siatka()\n siatka.stworz_plansze_dla_gracza()\n siatka.pokaz_plansze(siatka.plansza_widoczna_dla_gracza)\n\n print(self.instrukcja + \" Wpisz 'help' aby pokazac ta wiadomosc ponownie.\\n\")\n start_czasu = time.time()\n\n while True:\n wpisane_dane = input('Podaj komorke: ')\n wynik = self.parsuj_dane_wejsciowe(wpisane_dane, siatka.rozmiar_siatki)\n\n wiadomosc = wynik['wiadomosc']\n komorka = wynik['komorka']\n\n if komorka:\n print('\\n\\n')\n numer_rzedu, numer_kolumny = komorka\n obecna_komorka = siatka.plansza_widoczna_dla_gracza[numer_rzedu][numer_kolumny]\n flaga = wynik['flaga']\n\n if not siatka.plansza:\n # Utowrzy siatke z minami\n siatka.stworz_plansze(komorka)\n\n if flaga:\n # Dodaj flage jesli komorka jest pusta\n if obecna_komorka == ' ':\n siatka.plansza_widoczna_dla_gracza[numer_rzedu][numer_kolumny] = 'F'\n siatka.flagi.append(komorka)\n # Usun flage jesli flaga jest juz ustawiona\n elif obecna_komorka == 'F':\n siatka.plansza_widoczna_dla_gracza[numer_rzedu][numer_kolumny] = ' '\n siatka.flagi.remove(komorka)\n else:\n wiadomosc = 'Nie mozna wstawic tu flagi'\n\n # Jesli jest tu flaga, pokaz wiadomosc\n elif komorka in siatka.flagi:\n wiadomosc = 'Tu jest flaga'\n\n elif siatka.plansza[numer_rzedu][numer_kolumny] == 'X':\n print('Przegrales.\\n')\n siatka.pokaz_plansze(siatka.plansza)\n return\n\n elif obecna_komorka == ' ':\n siatka.pokaz_komorki(numer_rzedu, numer_kolumny)\n\n else:\n wiadomosc = \"Ta komorka jest juz odkryta\"\n\n if set(siatka.flagi) == set(siatka.miny):\n minutes, seconds = divmod(int(time.time() - start_czasu), 60)\n print(\n 'Wygrales! '\n 'Trwalo to {} minut and {} sekund.\\n'.format(minutes,\n seconds))\n siatka.pokaz_plansze(siatka.plansza)\n return\n\n siatka.pokaz_plansze(siatka.plansza_widoczna_dla_gracza)\n print(wiadomosc)\n\n\ngra = Gra()\ngra.zagraj()\n","repo_name":"PatrykIT/MineSweeper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7715,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"32090061087","text":"from django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\n\nfrom forms import MerchantForm\nfrom models import Merchant\n\n\n@login_required\ndef index(request):\n context = {\n 'page_header': \"Merchants\",\n 'page_title': \"Merchants\",\n 'merchant_list': Merchant.objects.all()\n }\n\n return render(\n request,\n 'merchants/index.html',\n context\n )\n\n\n@login_required\ndef add_edit(request, id=None):\n obj = None\n if id:\n obj = get_object_or_404(Merchant, pk=id)\n\n if request.method == \"POST\":\n form = MerchantForm(request.POST, instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, 'Record has been saved successfully.')\n if id:\n return HttpResponseRedirect(reverse(\"internal:merchants:index\"))\n return HttpResponseRedirect(\".\")\n else:\n messages.error(request, 'Failed to save record. Please correct the errors below.', extra_tags='danger')\n else:\n form = MerchantForm(instance=obj)\n\n context = {\n 'page_header': (\"Edit Other Expenses ID: %s\" % id) if id else \"Add New Merchant\",\n 'page_title': (\"Edit Other Expenses ID: %s\" % id) if id else \"Add New Merchant\",\n 'form': form\n }\n\n return render(\n request,\n 'merchants/add_edit.html',\n context\n )\n","repo_name":"rpribadi/GemahRipah","sub_path":"gemah_ripah/merchants/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"23915056847","text":"from .SentenceNode import SentenceNode\nfrom .SentenceGraph import SentenceGraph\n\ndef cross_link_ner_nodes(graph1, graph2):\n common_ners = set.intersection(\n set(graph1.ner_roots.keys()), set(graph2.ner_roots.keys())\n )\n\n for ner in common_ners:\n for e in graph2.ner_clusters[ner]:\n graph2.node_from_sent(e.sent).children.append(graph1.ner_roots[ner])\n for e in graph1.ner_clusters[ner]:\n graph1.node_from_sent(e.sent).children.append(graph2.ner_roots[ner])\n\nclass DocumentGraph(object):\n\n def __init__(\n self, \n doc, \n doc_id=0, \n root_token=\"Root\", \n directed=True,\n featurizer=None,\n self_loop=False,\n ): \n\n self.doc = doc\n self.doc_id = doc_id\n self.directed = directed\n self.featurizer = featurizer\n self.self_loop = self_loop\n\n self.graph = SentenceNode(\n sent=root_token, sent_id=doc_id, featurizer=featurizer, self_loop=False,\n )\n\n self.sub_graphs = list()\n for i, para in enumerate(self.doc):\n sub_graph = SentenceGraph(\n para, \n para_id=(doc_id, i), \n directed=directed,\n featurizer=featurizer,\n self_loop=self_loop,\n )\n self.sub_graphs.append(sub_graph)\n self.graph.children.append(sub_graph.root)\n\n for i in range(len(self.sub_graphs)-1):\n for j in range(i+1, len(self.sub_graphs)):\n cross_link_ner_nodes(self.sub_graphs[i], self.sub_graphs[j])\n\n def __repr__(self):\n return str(self.doc_id)\n","repo_name":"krish2100/RC_ReinforcementLearning","sub_path":"DocGraphMultiHopper/DocumentGraph/DocumentGraph.py","file_name":"DocumentGraph.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"23927984993","text":"import os\nimport time\nimport ifctester\nimport ifctester.reporter\nimport ifcopenshell\nfrom flask import Flask, request, send_from_directory\n\napp = Flask(__name__)\n\n\nclass Ifc:\n ifc = None\n filepath = None\n\n @classmethod\n def get(cls, filepath=None):\n if filepath is None or filepath == cls.filepath:\n return cls.ifc\n cls.filepath = filepath\n cls.ifc = ifcopenshell.open(filepath)\n return cls.ifc\n\n\n@app.route(\"/\")\ndef index():\n with open(\"www/index.html\") as template:\n return template.read()\n\n\n@app.route(\"/.\")\ndef get_asset(asset, ext):\n if ext in (\"js\", \"css\"):\n return send_from_directory(\"www\", asset + \".\" + ext)\n\n\n@app.route(\"/audit\", methods=[\"POST\"])\ndef audit():\n filename = ifcopenshell.guid.new()\n ids_filepath = os.path.join(\"uploads\", filename + \".ids\")\n ifc_filepath = os.path.join(\"uploads\", filename + \".ifc\")\n request.files.get(\"ids\").save(ids_filepath)\n request.files.get(\"ifc\").save(ifc_filepath)\n\n start = time.time()\n specs = ifctester.open(ids_filepath)\n ifc = Ifc.get(ifc_filepath)\n print(\"Finished loading:\", time.time() - start)\n start = time.time()\n specs.validate(ifc)\n print(\"Finished validating:\", time.time() - start)\n start = time.time()\n\n os.remove(ids_filepath)\n os.remove(ifc_filepath)\n\n engine = ifctester.reporter.Json(specs)\n engine.report()\n return engine.to_string()\n","repo_name":"IfcOpenShell/IfcOpenShell","sub_path":"src/ifctester/webapp/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":1412,"dataset":"github-code","pt":"71"} +{"seq_id":"35247392007","text":"from __future__ import print_function\n\nimport mock\nimport os\n\nfrom chromite.cbuildbot import constants\nfrom chromite.lib import cros_test_lib\nfrom chromite.lib import git\nfrom chromite.scripts import cbuildbot\n\n\n# pylint: disable=protected-access\n\n\nclass IsDistributedBuilderTest(cros_test_lib.TestCase):\n \"\"\"Test for cbuildbot._IsDistributedBuilder.\"\"\"\n\n # pylint: disable=W0212\n def testIsDistributedBuilder(self):\n \"\"\"Tests for _IsDistributedBuilder() under various configurations.\"\"\"\n parser = cbuildbot._CreateParser()\n argv = ['x86-generic-paladin']\n (options, _) = cbuildbot._ParseCommandLine(parser, argv)\n options.buildbot = False\n\n build_config = dict(pre_cq=False,\n manifest_version=False)\n chrome_rev = None\n\n def _TestConfig(expected):\n self.assertEquals(expected,\n cbuildbot._IsDistributedBuilder(\n options=options,\n chrome_rev=chrome_rev,\n build_config=build_config))\n\n # Default options.\n _TestConfig(False)\n\n build_config['pre_cq'] = True\n _TestConfig(True)\n\n build_config['pre_cq'] = False\n build_config['manifest_version'] = True\n # Not running in buildbot mode even though manifest_version=True.\n _TestConfig(False)\n options.buildbot = True\n _TestConfig(True)\n\n for chrome_rev in (constants.CHROME_REV_TOT,\n constants.CHROME_REV_LOCAL,\n constants.CHROME_REV_SPEC):\n _TestConfig(False)\n\n\nclass FetchInitialBootstrapConfigRepoTest(cros_test_lib.MockTempDirTestCase):\n \"\"\"Test for cbuildbot._FetchInitialBootstrapConfig.\"\"\"\n\n\n def setUp(self):\n self.config_dir = os.path.join(self.tempdir, 'config')\n\n self.PatchObject(constants, \"SOURCE_ROOT\", self.tempdir)\n self.PatchObject(constants, \"SITE_CONFIG_DIR\", self.config_dir)\n self.mockGit = self.PatchObject(git, \"RunGit\")\n\n def testDoesClone(self):\n # Test\n cbuildbot._FetchInitialBootstrapConfigRepo('repo_url', None)\n # Verify\n self.mockGit.assert_called_once_with(\n self.config_dir, ['clone', 'repo_url', self.config_dir])\n\n def testDoesCloneBranch(self):\n # Test\n cbuildbot._FetchInitialBootstrapConfigRepo('repo_url', 'test_branch')\n # Verify\n self.assertEqual(\n self.mockGit.mock_calls,\n [mock.call(self.config_dir, ['clone', 'repo_url', self.config_dir]),\n mock.call(self.config_dir, ['checkout', 'test_branch'])])\n\n def testNoCloneForRepo(self):\n # Setup\n os.mkdir(os.path.join(self.tempdir, '.repo'))\n # Test\n cbuildbot._FetchInitialBootstrapConfigRepo('repo_url', None)\n # Verify\n self.assertEqual(self.mockGit.call_count, 0)\n\n def testNoCloneIfExists(self):\n # Setup\n os.mkdir(os.path.join(self.tempdir, 'config'))\n # Test\n cbuildbot._FetchInitialBootstrapConfigRepo('repo_url', None)\n # Verify\n self.assertEqual(self.mockGit.call_count, 0)\n","repo_name":"metux/chromium-suckless","sub_path":"third_party/chromite/scripts/cbuildbot_unittest.py","file_name":"cbuildbot_unittest.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"71"} +{"seq_id":"21299827996","text":"def docstr_demo():\n \"\"\"\n this funtion is to demonstrate how docstings work\n :return: This function returns nothing\n \"\"\"\n pass\n\n\ndef greater(a, b):\n \"\"\"\n this function takes 2 numbers and returns the greater number\n parameter: int, float\n\n :return: return the greater number\n \"\"\"\n if a > b:\n return a\n else:\n return b\n\n\ndef greaater_3(a, b, c):\n rslt_1 = greater(a, b)\n rslt_2 = greater(rslt_1, c)\n return rslt_2\n\n\ndef printGrade(score):\n if score < 0 or score > 100:\n print(\"invalid entry\")\n return None\n elif score > 90:\n print(\"You scored an A\")\n elif 90 >= score > 70:\n print(\"you scored a B\")\n elif 70 >= score > 60:\n print(\"you scored a C\")\n elif 60 >= score > 45:\n print(\"you scored a D\")\n else:\n print(\"you scored an F\")\n\n\nprint(\"this is to demo function control\")\nname = \"olumide\"\nif name.count(\"e\") >= 1:\n printGrade(110)\n\n\n# default variable\ndef RectangleArea(L=1, B=2):\n samp_var = 14\n return L * B\n\n\n# global and local variables\nglobalVar = 1\n\n\ndef func():\n LocalVar = 2\n print(globalVar) # accesed inside the function\n print(LocalVar)\n print(\"This is the way global var is accesed within the funciton\")\n return LocalVar\n\n\ndef main():\n docstr_demo()\n greater()\n greaater_3()\n RectangleArea()\n globalVar = 1\n func()","repo_name":"isadesina/my_python_codes","sub_path":"functions_lessons.py","file_name":"functions_lessons.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"70466374629","text":"#! /usr/bin/env python\n\nimport argparse\nimport os\nfrom collections import OrderedDict\nimport json\nfrom astropy.io import fits\nimport numpy as np\nfrom scipy.interpolate import interp1d\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '--beagle-file',\n help=\"Name of the Beagle file containing the model SEDs for which EWs must be computed.\",\n action=\"store\", \n type=str, \n nargs=\"+\",\n dest=\"beagle_file\", \n required=True\n )\n\n parser.add_argument(\n '--json-file',\n help=\"JSON file containing the list of emission lines for which EWs will be computed.\",\n action=\"store\", \n type=str, \n dest=\"json_file\", \n required=True\n )\n\n\n # Get parsed arguments\n args = parser.parse_args() \n\n # Load the list of lines for which we compute the EWs\n with open(args.json_file) as f:\n lines = json.load(f, object_pairs_hook=OrderedDict)\n \n for f in args.beagle_file:\n\n hdulist = fits.open(f) \n wl = np.ravel(hdulist['FULL SED WL'].data[0][:])\n n_rows = len(hdulist[1].data.field(1))\n\n # Create an empty dictionary of numpy array which will containe the EWs\n EWs = OrderedDict()\n integrated_fluxes = OrderedDict()\n for line in lines:\n EWs[line] = np.zeros(n_rows, dtype=np.float32)\n\n SEDs = hdulist['FULL SED'].data[:,:]\n\n # Cycle across all the lines for which we compute the EWs\n for key, value in lines.iteritems():\n\n # Compute the average left continuum\n il0 = np.searchsorted(wl, value[\"continuum_left\"][0]) ; il0 -= 1\n il1 = np.searchsorted(wl, value[\"continuum_left\"][1])\n if il0 == il1:\n il0 -= 1\n flux_left = np.ravel(np.trapz(SEDs[:,il0:il1+1], x=wl[il0:il1+1], axis=1)) / (wl[il1]-wl[il0])\n wl_left = 0.5*(wl[il0]+wl[il1])\n\n # Compute the average right continuum\n ir0 = np.searchsorted(wl, value[\"continuum_right\"][0]) ; ir0 -= 1\n ir1 = np.searchsorted(wl, value[\"continuum_right\"][1])\n if ir0 == ir1:\n ir0 -= 1\n flux_right = np.ravel(np.trapz(SEDs[:,ir0:ir1+1], x=wl[ir0:ir1+1], axis=1)) / (wl[ir1]-wl[ir0])\n wl_right = 0.5*(wl[ir0]+wl[ir1])\n\n # Approximate the continuum with a straght line\n grad = (flux_right-flux_left)/(wl_right-wl_left)\n intercept = flux_right - grad*wl_right\n \n #### Compute EW\n i0 = np.searchsorted(wl, value[\"wl_range\"][0]) ; i0 -= 1\n i1 = np.searchsorted(wl, value[\"wl_range\"][1])\n n_wl = i1-i0+1\n\n # Repeat the wl array along the 0 axis to be able to run the algorithms over all rows at the same time\n wl_repeat = np.repeat(wl[np.newaxis, i0:i1+1], n_rows, axis=0)\n\n # Interpolate the SED at the edges to have to right integration limits\n SED = np.copy(SEDs[:,i0:i1+1])\n SED[:,0] = SED[:,0] + (SED[:,1]-SED[:,0])/(wl_repeat[:,1]-wl_repeat[:,0]) * (value[\"wl_range\"][0]-wl_repeat[:,0])\n SED[:,-1] = SED[:,-2] + (SED[:,-2]-SED[:,-1])/(wl_repeat[:,-2]-wl_repeat[:,-1]) * (value[\"wl_range\"][1]-wl_repeat[:,-2])\n\n # Build the actual wl array over which you will perform the integration\n wl_ = np.zeros(n_wl)\n wl_[0] = value[\"wl_range\"][0]\n wl_[-1] = value[\"wl_range\"][1]\n wl_[1:-1] = wl[i0+1:i1]\n wl_ = np.repeat(wl_[np.newaxis,:], n_rows, axis=0)\n\n # Linear function approximating the continuum\n grad_ = np.repeat(grad[:, np.newaxis], n_wl, axis=1)\n intercept_ = np.repeat(intercept[:, np.newaxis], n_wl, axis=1)\n \n integrand = 1.0 - SED/(grad_*wl_+intercept_)\n EW = np.ravel(np.trapz(integrand, x=wl_, axis=1))\n EWs[key] = -EW\n\n integrand = SED-(grad_*wl_+intercept_)\n integrated_flux = np.ravel(np.trapz(integrand, x=wl_, axis=1))\n integrated_fluxes[key] = integrated_flux\n\n\n # Create a list of columns from the dictionary containing the EWs\n cols = list()\n for key, value in EWs.iteritems():\n col = fits.Column(name=str(key), format='E', array=value)\n cols.append(col)\n\n # Create a new binary table HDU \n columns = fits.ColDefs(cols)\n new_hdu = fits.BinTableHDU.from_columns(columns)\n new_hdu.name = 'EQUIVALENT WIDTHS'\n\n # Add the new HDU to the Beagle file\n if new_hdu.name in hdulist:\n hdulist[new_hdu.name] = new_hdu\n else:\n hdulist.append(new_hdu)\n\n # Create a list of columns from the dictionary containing the integrated fluxes\n cols = list()\n for key, value in integrated_fluxes.iteritems():\n col = fits.Column(name=str(key), format='E', array=value)\n cols.append(col)\n\n # Create a new binary table HDU \n columns = fits.ColDefs(cols)\n new_hdu = fits.BinTableHDU.from_columns(columns)\n new_hdu.name = 'INTEGRATED FLUXES'\n\n # Add the new HDU to the Beagle file\n if new_hdu.name in hdulist:\n hdulist[new_hdu.name] = new_hdu\n else:\n hdulist.append(new_hdu)\n\n hdulist.writeto(f, overwrite=True)\n\n hdulist.close()\n\n\n\n","repo_name":"jacopo-chevallard/home","sub_path":"bin/add_EWs_to_beagle_output.py","file_name":"add_EWs_to_beagle_output.py","file_ext":"py","file_size_in_byte":5417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"22217325308","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nEmailParser\n===========\nEmailParser supplies the following methods:\n* EmailParser.parse(): extract the recipient, the subject, the payload and all\n valid urls from an email string.\n \n*Note*: Please always follow PEP8.\n\n@author: Philipp Konrad\n'''\nfrom builtins import object\nimport email\nimport unittest\nimport re\nimport doctest\n\n\nclass EmailParser(object):\n '''Please refer to the classmethod `EmailParser.parse()`.'''\n\n @classmethod\n def parse(cls, string_message):\n '''\n EmailParser.parse()\n ===================\n The classmethod takes a string representing an email and returns\n all containing urls in a list::\n\n >>> email_string = open('test_googlealert/test1', 'r').read()\n >>> parsed_data = EmailParser.parse(email_string)\n >>> parsed_data['urls']\n ['http://www.militaryaerospace.com/articles/2013/03/DARPA-machine-learning.html']\n\n Another possibility is to parse a whole email and it's content.\n The library returns a dictionary with the fields `urls` and \n `email_body`::\n\n >>> email_string = open('test_googlealert/test2', 'r').read()\n >>> parsed_data = EmailParser.parse(email_string)\n >>> parsed_data['urls']\n ['http://www.wu.ac.at', 'http://www.tuwien.ac.at/en/']\n >>> parsed_data['email_payload']\n 'This is an email body.'\n\n Finally, the classmethod's returned dictionary contains an \n `email_subject` key and an `email_recipient`::\n\n >>> email_string = open('test_googlealert/test3', 'r').read()\n >>> parsed_data = EmailParser.parse(email_string)\n >>> parsed_data['email_subject']\n 'Very important news!'\n\n >>> parsed_data['email_recipient']\n 'bicycle_repair_man@pythonmail.com'\n '''\n parsed_message = email.message_from_string(string_message)\n returned_data = dict()\n valid_urls = None\n\n if cls._is_email(parsed_message):\n email_payload = parsed_message.get_payload()\n valid_urls = cls._find_valid_urls(email_payload)\n\n returned_data['email_subject'] = parsed_message['subject']\n returned_data['email_recipient'] = parsed_message['To']\n returned_data['urls'] = valid_urls\n returned_data['email_payload'] = cls._clean_payload(email_payload,\n valid_urls)\n else:\n raise TypeError('The supplied string is not a valid email.')\n\n assert len(returned_data) == 4\n return returned_data\n\n @classmethod\n def _is_email(cls, possible_email_obj):\n try:\n if '@' in possible_email_obj['To']:\n is_email = True\n except TypeError as e:\n is_email = False\n\n return is_email\n\n @classmethod\n def _url_is_from_google(cls, url):\n return url.startswith('http://news.google') or \\\n url.startswith('http://www.google')\n\n @classmethod\n def _find_valid_urls(cls, email_payload):\n found_urls = re.findall('(http://.*)', email_payload)\n\n retval = []\n for url in found_urls:\n if not cls._url_is_from_google(url):\n if url.endswith('>'):\n retval.append(url.rstrip('>'))\n else:\n retval.append(url)\n\n return retval\n\n @classmethod\n def _clean_payload(cls, email_payload, urls_to_clean):\n\n returned_string = []\n\n # iterate over email payload and only add lines without url to\n # returned body.\n for line in email_payload.splitlines():\n\n for url in urls_to_clean:\n if url in line or 'http://' in line:\n break\n else:\n returned_string.append(line)\n\n return '\\n'.join(returned_string)\n\n\nif __name__ == '__main__':\n doctest.testmod()\n","repo_name":"weblyzard/ewrt","sub_path":"src/eWRT/ws/googlealert/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3961,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"71"} +{"seq_id":"73155198951","text":"import random\r\nimport copy\r\nimport numpy as np\r\n\r\n\r\nclass Ant:\r\n\r\n def __init__(self, index):\r\n self.index = index # 蚂蚁的序号\r\n self.init_Ant()\r\n\r\n # 初始化蚂蚁的各个数据结构,随机选择一个出发点\r\n def init_Ant(self):\r\n self.path = [] # 当前路径\r\n self.total_distance = 0\r\n self.unvisited = np.ones(city_size, int) # 还没被访问的城市(1代表未访问,0代表已访问)\r\n\r\n first_city = random.randint(0, city_size - 1) # 随机一个起始城市序号\r\n # 更新信息\r\n self.current_city = first_city\r\n self.path.append(first_city)\r\n self.unvisited[first_city] = 0\r\n\r\n # 选择下一个城市\r\n # return 0 :全部访问完\r\n # return 1:还要继续游历\r\n def move_to_next(self):\r\n\r\n # 第一步:计算当前城市到其他城市的概率\r\n p_numerator = np.zeros(city_size) # 选择概率的分子\r\n p_denominator = 0.0 # 选择概率的分母(分子之和)\r\n trans_p = np.zeros(city_size) # 概率矩阵\r\n\r\n for i in range(city_size):\r\n # 如果第i个城市已经访问过,则再访问i的概率为0\r\n if self.unvisited[i] == 0:\r\n p_numerator[i] = 0\r\n else:\r\n # 随机比例规则公式\r\n p_numerator[i] = np.dot(pheromone_tb[self.current_city][i], a) * np.dot(eta[self.current_city][i], b)\r\n p_denominator += p_numerator[i] # 将分子累加\r\n\r\n # 分母等于0说明每个城市都已经访问过了\r\n if p_denominator != 0:\r\n trans_p = p_numerator / p_denominator\r\n else:\r\n return 0\r\n\r\n # 第二步:根据概率轮盘赌选下一个城市\r\n next_city = np.random.choice(np.array(range(city_size)), size=1, replace=True, p=trans_p)[0]\r\n\r\n # 第三步:移动到下一个城市\r\n self.path.append(next_city)\r\n self.unvisited[next_city] = 0\r\n self.current_city = next_city\r\n return 1\r\n\r\n # 计算路径总距离\r\n def get_distance(self):\r\n for i in range(-1, len(self.path) - 1):\r\n self.total_distance += map[self.path[i]][self.path[i + 1]]\r\n\r\n def travel(self):\r\n # 初始化蚂蚁,选择出发点\r\n self.init_Ant()\r\n\r\n flag = 1\r\n # 蚂蚁开始周游\r\n while flag == 1:\r\n flag = self.move_to_next()\r\n\r\n # 计算路径,输出结果\r\n self.get_distance()\r\n\r\n\r\n# 更新信息素\r\ndef update_pheromone():\r\n global ant_pop\r\n global pheromone_tb\r\n\r\n # 信息素增加矩阵\r\n pheromone_increase = np.zeros((city_size, city_size))\r\n\r\n for ant in ant_pop:\r\n path_temp = copy.deepcopy(ant.path)\r\n for i in range(-1, len(path_temp) - 1):\r\n start = path_temp[i]\r\n end = path_temp[i + 1]\r\n pheromone_increase[start][end] += Q / ant.total_distance\r\n # 信息量更新公式\r\n pheromone_tb = (1 - r) * pheromone_tb + pheromone_increase\r\n\r\n\r\ndef searchTSP():\r\n global best_distance, best_path\r\n # 迭代搜索\r\n i = time\r\n while best_distance > 21 and i > 0:\r\n i -= 1\r\n \"\"\"\r\n print(\"第%d次迭代:\" % (time - i + 1))\r\n print(\"信息素矩阵:\")\r\n print(pheromone_tb)\r\n \"\"\"\r\n\r\n for ant in ant_pop:\r\n ant.travel()\r\n if ant.total_distance < best_distance:\r\n best_distance = ant.total_distance\r\n best_path = copy.deepcopy(ant.path)\r\n\r\n # 本代的所有蚂蚁都周游完,再更新信息素\r\n update_pheromone()\r\n\r\n \"\"\"print(\"本次迭代的结果:\")\r\n print(best_path)\r\n print(best_distance)\r\n\r\n print(\"搜索完成,最佳路径及其长度为:\")\r\n print(best_distance)\r\n print(best_path)\r\n \"\"\"\r\n return best_distance, time - i + 1\r\n\r\n\r\nif __name__ == '__main__':\r\n # 几个常量\r\n a = 2\r\n b = 2\r\n r = 0.5\r\n Q = 21\r\n\r\n city_size = 10\r\n ant_size = 9\r\n\r\n # 地图矩阵(对角线上实际为0,为了计算eta矩阵方便,这里对角线设置为1)\r\n map = np.array([\r\n [1, 4, 11, 7, 15, 9, 1, 8, 10, 5],\r\n [4, 1, 12, 3, 6, 5, 10, 7, 4, 7],\r\n [11, 12, 1, 14, 7, 2, 3, 1, 2, 11],\r\n [7, 3, 14, 1, 6, 13, 8, 15, 7, 2],\r\n [15, 6, 7, 6, 1, 6, 5, 2, 16, 1],\r\n [9, 5, 2, 13, 6, 1, 2, 18, 10, 9],\r\n [1, 10, 3, 8, 5, 2, 1, 14, 5, 3],\r\n [8, 7, 1, 15, 2, 18, 14, 1, 2, 8],\r\n [10, 4, 2, 7, 16, 10, 5, 2, 1, 14],\r\n [5, 7, 11, 2, 1, 9, 3, 8, 14, 1]\r\n ], dtype='int').reshape([city_size, city_size])\r\n\r\n # 能见度矩阵\r\n eta = 1 / map\r\n\r\n time = 100 # 最大迭代次数\r\n\r\n parameters = [[2, 2, 0.5, 21, 7], [2, 50, 0.5, 21, 7], [50, 2, 0.5, 21, 7], [2, 2, 0.95, 21, 7],\r\n [2, 2, 0.05, 21, 7], [2, 2, 0.5, 500, 7], [2, 2, 0.5, 21, 3]]\r\n for parameter in parameters:\r\n a = parameter[0]\r\n b = parameter[1]\r\n r = parameter[2]\r\n Q = parameter[3]\r\n ant_size = parameter[4]\r\n\r\n best_distances = []\r\n record = []\r\n best_count = 0\r\n\r\n # 对每种参数组合执行50次实验\r\n for j in range(50):\r\n # 最佳路径及其距离\r\n best_path = []\r\n best_distance = 80\r\n # 初始化信息素矩阵,全是为1组成的矩阵\r\n pheromone_tb = np.ones((city_size, city_size))\r\n # 初始化蚂蚁群\r\n ant_pop = [Ant(index) for index in range(ant_size)]\r\n\r\n # 蚁群搜索\r\n dis, num = searchTSP()\r\n\r\n best_distances.append(dis)\r\n record.append(num)\r\n if dis == 21:\r\n best_count += 1\r\n\r\n print(\"参数为(顺序是a,b,r,Q,ant_size):\")\r\n print(parameter)\r\n print(\"最佳距离与迭代次数为:\")\r\n print(best_distances)\r\n print(record)\r\n print(\"得到最优解的次数:%d / 50\" % best_count)\r\n print(\"平均值\")\r\n print(np.mean(best_distances))\r\n print(np.mean(record))\r\n print(\"中位数\")\r\n print(np.median(best_distances))\r\n print(np.median(record))\r\n\r\n","repo_name":"xxLL-xh/artificial-intelligence","sub_path":"人工智能导论/9.群智能算法:蚁群算法解决“旅行商”问题/code/ants.py","file_name":"ants.py","file_ext":"py","file_size_in_byte":6274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"34094858818","text":"from PyQt5.QtWidgets import QApplication, QPushButton, QGridLayout, QWidget, QLineEdit, QLabel, QTextEdit\nfrom PyQt5.QtGui import QPixmap, QPalette, QColor\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport sys\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.options import Options\nimport urllib.request\n\n\nclass Calculator():\n def __init__(self, equation):\n super().__init__()\n\n self.url = 'https://www.wolframalpha.com/'\n self.options = Options()\n self.options.add_argument(\"--headless\")\n self.driver = webdriver.Chrome(\n '/Users/mkopczynski/Downloads/differential-equations-wolfram-main/chromedriver', chrome_options=self.options)\n self.equation = equation\n self.plot_error = 0\n\n def calculate(self):\n self.driver.get(self.url)\n self.input_field = self.driver.find_element(\n By.XPATH, '//*[@id=\"__next\"]/div/div[1]/div/div/div[1]/section/form/div/div/input')\n self.input_field.send_keys(self.equation)\n self.input_field.send_keys(Keys.RETURN)\n time.sleep(5)\n\n try:\n self.header = self.driver.find_element(\n By.XPATH, '//*[contains(text(), \"Differential equation solution\")]')\n self.solution_image = self.header.find_element(\n By.XPATH, 'following::img[1]')\n self.solution = self.solution_image.get_attribute('alt')\n solution_url = self.solution_image.get_attribute('src')\n urllib.request.urlretrieve(solution_url, 'solution_img.png')\n except:\n print('No avaliable solution')\n\n try:\n self.plot1_header = self.driver.find_element(\n By.XPATH, '//*[contains(text(), \"Slope field\")]')\n self.plot1 = self.plot1_header.find_element(\n By.XPATH, 'following::img[1]')\n plot1_url = self.plot1.get_attribute('src')\n urllib.request.urlretrieve(plot1_url, 'plot1')\n except:\n print('There is no Slope field plot')\n self.plot_error = 1\n try:\n self.plot2_header = self.driver.find_element(\n By.XPATH, '//*[contains(text(), \"Plots of sample individual solution\")]')\n self.plot2 = self.plot2_header.find_element(\n By.XPATH, 'following::img[1]')\n plot2_url = self.plot2.get_attribute('src')\n urllib.request.urlretrieve(plot2_url, 'plot2')\n except:\n print('There is no Plots of sample individual solution')\n self.plot_error = 2\n try:\n self.plot3_header = self.driver.find_element(\n By.XPATH, '//*[contains(text(), \"Sample solution family\")]')\n self.plot3 = self.plot3_header.find_element(\n By.XPATH, 'following::img[1]')\n plot3_url = self.plot3.get_attribute('src')\n urllib.request.urlretrieve(plot3_url, 'plot3')\n except:\n print('There is no Sample solution family plot')\n self.plot_error = 3\n\n self.driver.quit()\n print(self.plot_error)\n return self.solution, self.plot_error\n \nclass Application(QWidget):\n def __init__(self):\n super().__init__()\n\n # Configure matplotlib\n plt.rcParams['figure.facecolor'] = '#333333'\n plt.rcParams['axes.facecolor'] = '#333333'\n plt.rcParams['savefig.facecolor'] = '#333333'\n plt.rcParams['text.color'] = 'white'\n plt.rcParams['xtick.color'] = 'white'\n plt.rcParams['ytick.color'] = 'white'\n plt.rcParams['axes.labelcolor'] = 'white'\n plt.rcParams['font.weight'] = 'bold' # set font to bold in matplotlib\n\n # Create UI elements\n self.input_field = QLineEdit(self)\n self.button = QPushButton('Calculate', self)\n self.label = QLabel(self)\n self.textedit = QTextEdit(self)\n self.figure1 = Figure()\n self.canvas1 = FigureCanvas(self.figure1)\n self.figure2 = Figure()\n self.canvas2 = FigureCanvas(self.figure2)\n self.figure3 = Figure()\n self.canvas3 = FigureCanvas(self.figure3)\n\n self.plot1_label = QLabel(self)\n self.plot1_label.setText('Slope field')\n self.plot2_label = QLabel(self)\n self.plot2_label.setText('Plots of sample individual solution')\n self.plot3_label = QLabel(self)\n self.plot3_label.setText('Sample solution family')\n\n # Connect button to calculate method\n self.button.clicked.connect(self.calculate)\n\n # Create layout and add UI elements\n layout = QGridLayout()\n layout.addWidget(self.input_field, 0, 0)\n layout.addWidget(self.button, 0, 1)\n layout.addWidget(self.label, 1, 0, 1, 2)\n layout.addWidget(self.textedit, 2, 0, 1, 2)\n\n layout.addWidget(self.plot3_label, 3, 0)\n layout.addWidget(self.canvas3, 4, 0)\n\n layout.addWidget(self.plot1_label, 3, 1)\n layout.addWidget(self.canvas1, 4, 1)\n\n layout.addWidget(self.plot2_label, 3, 2)\n layout.addWidget(self.canvas2, 4, 2)\n\n self.setLayout(layout)\n\n def calculate(self):\n\n input_text = self.input_field.text()\n input_text = 'diff equation ' + input_text\n self.calculator = Calculator(input_text)\n self.output_text, self.plot_error = self.calculator.calculate()\n self.textedit.setText(\n f\"Solution: {self.output_text}\")\n\n try:\n if self.plot_error == 1:\n img1 = mpimg.imread('blank')\n ax1 = self.figure1.add_subplot(111)\n ax1.set_axis_off()\n ax1.imshow(img1)\n self.canvas1.draw()\n else:\n img1 = mpimg.imread('plot1')\n ax1 = self.figure1.add_subplot(111)\n ax1.set_axis_off()\n ax1.imshow(img1)\n self.canvas1.draw()\n except:\n print('There is no plot 1')\n\n try:\n if self.plot_error == 2:\n img2 = mpimg.imread('blank')\n ax2 = self.figure2.add_subplot(111)\n ax2.set_axis_off()\n ax2.imshow(img2)\n self.canvas2.draw()\n else:\n img2 = mpimg.imread('plot2')\n ax2 = self.figure2.add_subplot(111)\n ax2.set_axis_off()\n ax2.imshow(img2)\n self.canvas2.draw()\n except:\n print(\"There is no plot 2\")\n\n try:\n if self.plot_error == 3:\n img3 = mpimg.imread('blank')\n ax3 = self.figure3.add_subplot(111)\n ax3.set_axis_off()\n ax3.imshow(img3)\n self.canvas3.draw()\n else:\n img3 = mpimg.imread('plot3')\n ax3 = self.figure3.add_subplot(111)\n ax3.set_axis_off()\n ax3.imshow(img3)\n self.canvas3.draw()\n except:\n print(\"There is no plot 3\")\n\n pixmap = QPixmap('solution_img.png')\n self.label.setPixmap(pixmap)\n\ndef main() -> None:\n app = QApplication(sys.argv)\n\n # Set the dark theme palette\n palette = QPalette()\n palette.setColor(QPalette.Window, QColor(53, 53, 53))\n palette.setColor(QPalette.WindowText, QColor(255, 255, 255))\n palette.setColor(QPalette.Base, QColor(25, 25, 25))\n palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))\n palette.setColor(QPalette.ToolTipBase, QColor(255, 255, 255))\n palette.setColor(QPalette.ToolTipText, QColor(255, 255, 255))\n palette.setColor(QPalette.Text, QColor(255, 255, 255))\n palette.setColor(QPalette.Button, QColor(53, 53, 53))\n palette.setColor(QPalette.ButtonText, QColor(255, 255, 255))\n palette.setColor(QPalette.BrightText, QColor(255, 0, 0))\n palette.setColor(QPalette.Highlight, QColor(142, 45, 197).lighter())\n palette.setColor(QPalette.HighlightedText, QColor(0, 0, 0))\n app.setPalette(palette)\n\n # Set the dark theme style sheet\n app.setStyleSheet(\n \"QPushButton { background-color: #333333; color: #ffffff; font-weight: bold; }\"\n \"QLineEdit { background-color: #333333; color: #ffffff; font-weight: bold; }\"\n \"QTextEdit { background-color: #333333; color: #ffffff; font-weight: bold; }\"\n \"QLabel { font-weight: bold; }\"\n)\n\n window = Application()\n window.show()\n\n sys.exit(app.exec_())\n\nif __name__ == \"__main__\":\n main()","repo_name":"kopczyn12/wno-labs-uni","sub_path":"wolfram_calculator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"27956674244","text":"# Tip Calculator\n\nprint(\"Welcome to the tip calculator \")\n\n#Total_ tip =total price of bill * percentage of tip\n\ntotal_Bill = input(\"What was the bill total? \\n\")\n\ntotal_Bill = float(total_Bill)\n\ntip_Percentage = input(\"What percentage of tip did you want to leave (Please do not include a '%' symbol only numbers)\")\n\ntip_Percentage = float(tip_Percentage)\n\ntip = tip_Percentage / 100\n\ntip *= total_Bill\n\n\nprint(f\"The tip would be: $ {round(tip,2)} \")\n\n#Things learned is that you cannot use the float() tag inside the input field when applying to variable. I had to do it seperate. Not sure if done incorrectly, or if there is something I'm not understanding. ","repo_name":"Amwence/100-days-of-code","sub_path":"Day 2/Day 2 Project Tip Calculator.py","file_name":"Day 2 Project Tip Calculator.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"33618954776","text":"from abc import ABC, abstractmethod, abstractproperty\nfrom pokemon.pokemon import Pokemon\n\n\nclass PokemonMove(ABC):\n def __init__(self, base_power, accuracy, category, type_):\n self.base_power = base_power\n self.accuracy = accuracy\n self.category = category\n self.crit_ratio = 1/16\n self.type = type_\n\n def damage(self, attacker: Pokemon, defender: Pokemon):\n damage = 2 + ((((2 * attacker.level) / 5) + 2)\n * self.base_power * (attacker.stats['atk'] / defender.stats['def'])) / 50\n\n stab = 1.5 if self.type in attacker.types else 1\n\n return damage*stab\n\n\n","repo_name":"yoursred/twitchgame","sub_path":"pokemon/move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"7759471082","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\n\nfrom spider_mycwpjt.items import SpiderMycwpjtItem\n\nclass WeisuenSpider(CrawlSpider):\n name = 'weisuen'\n allowed_domains = ['sohu.com']\n start_urls = ['http://news.sohu.com/']\n # rules = (\n # Rule(LinkExtractor(allow='.shtml', allow_domains=('sohu.com')),\n # callback='parse_item',\n # follow=True),\n # )\n\n rules = (\n Rule(LinkExtractor(allow=('.*?/n.*?shtml'), allow_domains=('sohu.com')),\n callback='parse_item', follow=True),\n )\n\n def parse_item(self, response):\n print(\"parse_item...\")\n i = SpiderMycwpjtItem()\n i['name'] = response.xpath('/html/head/title/text()').extract_first()\n # i['link'] = response.xpath(\"//a4/@href\").extract_first()\n #i['domain_id'] = response.xpath('//input[@id=\"sid\"]/@value').extract()\n #i['name'] = response.xpath('//div[@id=\"name\"]').extract()\n #i['description'] = response.xpath('//div[@id=\"description\"]').extract()\n return i\n","repo_name":"jinzekid/codehub","sub_path":"python/py3_6venv/spider_mycwpjt/spider_mycwpjt/spiders/weisuen.py","file_name":"weisuen.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"12563894472","text":"import pandas as pd\nfrom zipfile import ZipFile\nimport plotly.express as px\nimport requests, io\n\nzip_file_url= 'https://vicroadsopendatastorehouse.vicroads.vic.gov.au/opendata/Road_Safety/ACCIDENT.zip'\nr = requests.get(zip_file_url)\nfiles = ZipFile(io.BytesIO(r.content))\n\n\nselect=['ACCIDENT_NO', 'ACCIDENTDATE', 'ACCIDENTTIME', 'Accident Type Desc', 'Day Week Description', 'NO_PERSONS_KILLED',\n 'DCA Description', 'Light Condition Desc', 'Road Geometry Desc', 'SEVERITY',\n 'SPEED_ZONE']\ndf = pd.read_csv(files.open(\"ACCIDENT.csv\"), usecols=select)\ndf['ACCIDENTDATE'] = pd.to_datetime(df['ACCIDENTDATE'], infer_datetime_format=True)\ndf['ACCIDENTTIME'] = pd.to_datetime(df['ACCIDENTTIME'], infer_datetime_format=True)\ndf['ACCIDENTYEAR'] = df['ACCIDENTDATE'].dt.year\ndf['ACCIDENTHOUR'] = df['ACCIDENTTIME'].dt.hour\n\nfoo = pd.read_csv(files.open('NODE.csv'), usecols = ['ACCIDENT_NO', 'LGA_NAME', 'Lat', 'Long']).drop_duplicates(subset=['ACCIDENT_NO'])\ndf = df.merge(foo,\n left_on='ACCIDENT_NO',right_on='ACCIDENT_NO', how='inner')\n\nfoo = pd.read_csv(files.open('ROAD_SURFACE_COND.csv'), usecols=['ACCIDENT_NO', 'SURFACE_COND', 'Surface Cond Desc']).drop_duplicates(subset=['ACCIDENT_NO'])\ndf = df.merge(foo, left_on='ACCIDENT_NO',right_on='ACCIDENT_NO', how='left')\n\nfoo = pd.read_csv(files.open('ACCIDENT_LOCATION.csv'), usecols=['ACCIDENT_NO', 'ROAD_NAME'])\ndf = df.merge(foo, left_on='ACCIDENT_NO',right_on='ACCIDENT_NO', how='left')\n\nfoo = pd.read_csv(files.open('VEHICLE.csv'), usecols=['ACCIDENT_NO', 'VEHICLE_YEAR_MANUF'])\nfoo = foo[foo['VEHICLE_YEAR_MANUF'] > 1900]\n# We keep the oldest car involved in the accident\nfoo = foo.groupby('ACCIDENT_NO').min().reset_index()\ndf = df.merge(foo[['ACCIDENT_NO', 'VEHICLE_YEAR_MANUF']], left_on='ACCIDENT_NO',right_on='ACCIDENT_NO', how='left')\n\nfoo = pd.read_csv(files.open('SUBDCA.csv'), usecols=['ACCIDENT_NO', 'Sub Dca Code Desc'])\nfoo = foo.groupby('ACCIDENT_NO').agg(','.join).reset_index()\ndf = df.merge(foo[['ACCIDENT_NO', 'Sub Dca Code Desc']], left_on='ACCIDENT_NO',right_on='ACCIDENT_NO', how='left')\n\ndel(foo)\n\ndf['crashes'] = 1\ndf = df[df.SPEED_ZONE < 200]\n\ntop_roads = df.groupby('ROAD_NAME').size().sort_values(ascending=False).head(30).index\n\n\norder = df[['Accident Type Desc', 'crashes']].groupby('Accident Type Desc').sum().sort_values('crashes', ascending=False).index\norder_dca = df[['DCA Description', 'crashes']].groupby('DCA Description').sum().sort_values('crashes', ascending=False).index\n","repo_name":"ymiftah/VicCrash","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"19120406323","text":"import numpy as np \nimport scipy as sp\nfrom shap import KernelExplainer\nfrom shap.common import IdentityLink, LogitLink, Model\n\n# This class is needed in the interim to override the varying_groups for KernelExplainer. \n# This was needed to handle text column in the training data. https://github.com/slundberg/shap/pull/1211\nclass KernelExplainerWrapper(KernelExplainer):\n def __init__(self, model, data, link=IdentityLink(), **kwargs):\n super().__init__(model, data, link, **kwargs) \n \n \n @staticmethod\n def not_equal(i, j):\n if isinstance(i, str) or isinstance(j, str):\n return 0 if i == j else 1\n return 0 if np.isclose(i, j, equal_nan=True) else 1\n \n def varying_groups(self, x):\n if not sp.sparse.issparse(x):\n varying = np.zeros(self.data.groups_size)\n for i in range(0, self.data.groups_size):\n inds = self.data.groups[i]\n x_group = x[0, inds]\n if sp.sparse.issparse(x_group):\n if all(j not in x.nonzero()[1] for j in inds):\n varying[i] = False\n continue\n x_group = x_group.todense()\n num_mismatches = np.sum(np.frompyfunc(self.not_equal, 2, 1)(x_group, self.data.data[:, inds]))\n varying[i] = num_mismatches > 0\n varying_indices = np.nonzero(varying)[0]\n return varying_indices\n else:\n varying_indices = []\n # go over all nonzero columns in background and evaluation data\n # if both background and evaluation are zero, the column does not vary\n varying_indices = np.unique(np.union1d(self.data.data.nonzero()[1], x.nonzero()[1]))\n remove_unvarying_indices = []\n for i in range(0, len(varying_indices)):\n varying_index = varying_indices[i]\n # now verify the nonzero values do vary\n data_rows = self.data.data[:, [varying_index]]\n nonzero_rows = data_rows.nonzero()[0]\n if nonzero_rows.size > 0:\n background_data_rows = data_rows[nonzero_rows]\n if sp.sparse.issparse(background_data_rows):\n background_data_rows = background_data_rows.toarray()\n num_mismatches = np.sum(np.abs(background_data_rows - x[0, varying_index]) > 1e-7)\n # Note: If feature column non-zero but some background zero, can't remove index\n if num_mismatches == 0 and not \\\n (np.abs(x[0, [varying_index]][0, 0]) > 1e-7 and len(nonzero_rows) < data_rows.shape[0]):\n remove_unvarying_indices.append(i)\n mask = np.ones(len(varying_indices), dtype=bool)\n mask[remove_unvarying_indices] = False\n varying_indices = varying_indices[mask]\n return varying_indices","repo_name":"ompatri/amazon-sagemaker-examples","sub_path":"autopilot/model-explainability/kernel_explainer_wrapper.py","file_name":"kernel_explainer_wrapper.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"} +{"seq_id":"13309110547","text":"import random\r\nimport hashlib\r\n\r\n# characters to be chosen for salting is stored in list 'salt_list'\r\nsalt_list = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\",\r\n \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\",\r\n \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\r\n\r\nStr = input(\"Enter the string to be hashed: \") # getting the string from user\r\nprint(\"Actual String: \" + Str)\r\n\r\n# number of characters to be inserted to the string for salting is stored in variable 'no_of_saltchars'\r\n# here, number of characters is chosen at random b/w numbers 1 and 10\r\nno_of_saltchars = random.randint(1, 10)\r\n\r\nStrlength = len(Str)\r\nsalted_string = Str\r\n\r\nfor i in range(no_of_saltchars):\r\n # character to be inserted to string is chosen at random from 'salt_list' and stored in variable 'salt_char'\r\n salt_char = str(random.choice(salt_list))\r\n\r\n # position where the character is inserted is stored in variable 'saltchar_pos'\r\n saltchar_pos = random.randint(0, Strlength)\r\n\r\n salted_string = salted_string[0:saltchar_pos] + salt_char + salted_string[saltchar_pos:]\r\n Strlength = len(salted_string)\r\n\r\nprint(\"String after salting: \" + salted_string)\r\n\r\n# number of times the salted string must be iterated using MD5 algorithm is chosen at random\r\n# b/w numbers 1 and 10, and stored in variable 'No_of_iterations'\r\nNo_of_iterations = random.randint(1, 10)\r\n\r\nhash_salt = salted_string\r\n\r\nfor i in range(No_of_iterations):\r\n byte_val = hash_salt.encode() # encoding the string to bytes\r\n hmd5 = hashlib.md5(byte_val)\r\n hash_string = hmd5.hexdigest()\r\n hash_salt = hash_string\r\n\r\nprint(f\"Salted string after {No_of_iterations} iterations: {hash_salt}\")\r\n","repo_name":"Varun-Ajith-Sivaram/hash_program_salting_iterations","sub_path":"hash-salting-iterations.py","file_name":"hash-salting-iterations.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"74045829989","text":"import math\nimport torch\nimport torch.nn as nn\nimport fmengine.mpu as mpu\nimport torch.nn.functional as F\nfrom .modeling_mistral import MistralFlashAttention2\n\n\nclass LoRARowParallelLinear(mpu.ColumnParallelLinear):\n # LoRA implemented in a dense layer\n def __init__(\n self,\n args,\n # ↓ this part is for pretrained ColumnParallelLinear weights\n input_size: int,\n output_size: int,\n gather_output=False,\n init_method=nn.init.xavier_normal_,\n skip_bias_add=True,\n bias=False,\n # ↓ the remaining part is for LoRA\n r: int = 0,\n lora_alpha: int = 1,\n lora_dropout: float = 0.0,\n **kwargs,\n ):\n super().__init__(\n args=args,\n input_size=input_size,\n output_size=output_size,\n gather_output=gather_output,\n init_method=init_method,\n skip_bias_add=skip_bias_add,\n bias=bias,\n )\n assert gather_output == False\n assert r >= 0\n self.r = r\n self.lora_alpha = lora_alpha\n # Optional dropout\n if lora_dropout > 0.0:\n self.lora_dropout = nn.Dropout(p=lora_dropout)\n else:\n self.lora_dropout = lambda x: x\n # Mark the weight as unmerged\n self.merged = False\n\n # Actual trainable parameters\n if r > 0:\n self.lora_A = nn.Parameter(self.weight.new_zeros((r, self.weight.size(1))))\n self.lora_B = nn.Parameter(self.weight.new_zeros((self.weight.size(0)), r))\n self.scaling = self.lora_alpha / self.r\n self.reset_parameters()\n\n def reset_parameters(self):\n \"\"\"Reset all the weights, even including pretrained ones.\"\"\"\n if hasattr(self, \"lora_A\"):\n # initialize A the same way as the default for nn.Linear and B to zero\n # Wondering why 'a' is equal to math.sqrt(5)?: https://github.com/pytorch/pytorch/issues/15314\n nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))\n nn.init.zeros_(self.lora_B)\n\n def merge(self):\n \"\"\"Merges the LoRA weights into the full-rank weights (W = W + delta_W).\"\"\"\n if self.r > 0 and not self.merged:\n # Merge the weights and mark it\n self.linear.weight.data += (self.lora_B @ self.lora_A) * self.scaling\n self.merged = True\n\n def forward(self, x: torch.Tensor):\n # if weights are merged or rank is less or equal to zero (LoRA is disabled) - it's only a regular nn.Linear forward pass;\n # otherwise in addition do the forward pass with LoRA weights and add it's output to the output from pretrained weights\n pretrained = super().forward(x)[0]\n if self.r == 0 or self.merged:\n return (pretrained,)\n x = self.lora_dropout(x)\n x = F.linear(x, self.lora_A)\n x = F.linear(x, self.lora_B)\n x = x * self.scaling\n return (pretrained + x,)\n\n\nclass TensorParallelLoraAttention(MistralFlashAttention2):\n def __init__(\n self,\n args,\n config,\n ):\n super().__init__(config)\n self.q_proj = LoRARowParallelLinear(\n args=args,\n input_size=self.hidden_size,\n output_size=self.num_heads * self.head_dim,\n gather_output=False,\n init_method=nn.init.xavier_normal_,\n skip_bias_add=True,\n bias=False,\n r=args.deepspeed_config.lora.r,\n lora_alpha=args.deepspeed_config.lora.lora_alpha,\n lora_dropout=args.deepspeed_config.lora.lora_dropout,\n )\n\n self.k_proj = mpu.ColumnParallelLinear(\n args=args,\n input_size=self.hidden_size,\n output_size=self.num_key_value_heads * self.head_dim,\n gather_output=False,\n init_method=nn.init.xavier_normal_,\n skip_bias_add=True,\n bias=False,\n )\n self.v_proj = LoRARowParallelLinear(\n args=args,\n input_size=self.hidden_size,\n output_size=self.num_key_value_heads * self.head_dim,\n gather_output=False,\n init_method=nn.init.xavier_normal_,\n skip_bias_add=True,\n bias=False,\n r=args.deepspeed_config.lora.r,\n lora_alpha=args.deepspeed_config.lora.lora_alpha,\n lora_dropout=args.deepspeed_config.lora.lora_dropout,\n )\n self.o_proj = mpu.RowParallelLinear(\n args=args,\n input_size=self.num_heads * self.head_dim,\n output_size=self.hidden_size,\n input_is_parallel=True,\n init_method=nn.init.xavier_normal_,\n skip_bias_add=True,\n parallel_output=False, # True if gpt-j-parallel\n bias=False,\n )\n","repo_name":"eth-easl/fmengine","sub_path":"fmengine/modeling/mistral/lora.py","file_name":"lora.py","file_ext":"py","file_size_in_byte":4816,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"71"} +{"seq_id":"7352229106","text":"import time\n\ndef test():\n start = time.time()\n for d in range(1, 26):\n result = 'Day {:2d} '.format(d)\n try:\n t = time.time()\n solution = __import__('day_{:02d}'.format(d))\n e, h = solution.test()\n result += '{} {} ({}s)'.format(' ✓'[e], ' ✓'[h], round(time.time() - t, 3))\n except ModuleNotFoundError:\n result += '✗ ✗'\n print(result)\n print('[Finished in {}s]'.format(round(time.time() - start, 2)))\n\nif __name__ == '__main__':\n test()","repo_name":"MKolman/advent-of-code","sub_path":"2019/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"6063151699","text":"T=int(input())\n\nfor tcases in range(T):\n N=int(input())\n arr=[]\n arr3=[]\n arr5=[]\n\n for i in range(N):\n arr1=list(input().strip())\n print(arr1)\n arr1.sort()\n arr+=[arr1]\n\n for i in range(N):\n arr2=[]\n arr4=[]\n for j in range(N):\n arr2+= arr[j][i]\n arr4+= arr[j][i]\n arr4.sort()\n arr3+=[arr2]\n arr5+=[arr4]\n\n\n if arr3==arr5:\n print(\"YES\")\n else:\n print(\"NO\")\n","repo_name":"Atleastwin/HackerRank","sub_path":"Exercises/Grid Challenge.py","file_name":"Grid Challenge.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"6079619698","text":"from cosypose.config import DEBUG_DATA_DIR\nimport torch\n\ndef cast(obj):\n return obj.cuda(non_blocking=True)\n\n\ndef h_maskrcnn(data, model, meters, cfg):\n images, targets = data\n images = list(cast(image).permute(2, 0, 1).float() / 255 for image in images)\n targets = [{k: cast(v) for k, v in t.items()} for t in targets]\n\n loss_dict = model(images, targets)\n\n loss_rpn_box_reg = loss_dict['loss_rpn_box_reg']\n loss_objectness = loss_dict['loss_objectness']\n loss_box_reg = loss_dict['loss_box_reg']\n loss_classifier = loss_dict['loss_classifier']\n loss_mask = loss_dict['loss_mask']\n\n loss = cfg.rpn_box_reg_alpha * loss_rpn_box_reg + \\\n cfg.objectness_alpha * loss_objectness + \\\n cfg.box_reg_alpha * loss_box_reg + \\\n cfg.classifier_alpha * loss_classifier + \\\n cfg.mask_alpha * loss_mask\n\n # torch.save(images, DEBUG_DATA_DIR / 'images.pth.tar')\n\n meters['loss_rpn_box_reg'].add(loss_rpn_box_reg.item())\n meters['loss_objectness'].add(loss_objectness.item())\n meters['loss_box_reg'].add(loss_box_reg.item())\n meters['loss_classifier'].add(loss_classifier.item())\n meters['loss_mask'].add(loss_mask.item())\n return loss\n","repo_name":"ylabbe/cosypose","sub_path":"cosypose/training/maskrcnn_forward_loss.py","file_name":"maskrcnn_forward_loss.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":276,"dataset":"github-code","pt":"71"} +{"seq_id":"29653068947","text":"#değişkenler - start\nbaslik = 'HABERİNİZ OLSUN' #string\nvade = 12 #integer\nfaizOrani = 1.47 #float\n\nprint(baslik)\nprint(type(baslik))\nprint(type(vade))\nprint(type(faizOrani))\n\nmesaj = \"Hoşgeldin\"\nmusteriAdi = \"Ayşe\"\nmusteriSoyadi = \"Aydin\"\nsonucMesaj = mesaj + \" \" + musteriAdi + \" \" + musteriSoyadi + \"!\"\n\nprint(sonucMesaj)\n\nsayi1 = 10\nsayi2 = 20\nprint(sayi1 + sayi2)\n\nprint(sonucMesaj)\n#değişkenler - end\n\n#şart blokları - start\ndolarDun = 7.65\ndolarBugun = 7.75\n\nif dolarDun > dolarBugun:\n print(\"Azalış oku\")\n print(\"Bitti\")\nelif dolarDun < dolarBugun:\n print(\"Artış oku\")\nelse:\n print(\"Eşittir oku\")\n\nprint(\"Bitti\")\n#şart blokları - end\n\n#listeler - start\nkrediler = [\n \"Hızlı Kredi\", \"Maaşını Halkbank'tan alanlara özel\",\n \"Mutlu emekli ihtiyaç kredisi\"\n]\n\nprint(krediler)\nprint(krediler[0]) #diziler sıfırıncı elemandan başlar\nprint(krediler[1])\nprint(krediler[2])\n\nprint(len(krediler)) #length - dizideki eleman sayısını verir\n\nkrediler[\n 0] = \"Çabuk kredi\" #dizideki sıfırıncı eleman olan hızlı krediyi çabuk kredi olarak değiştirdi\nprint(krediler)\n#listeler - end\n\n#ldöngüler - start\nkrediler = [\n \"Hızlı Kredi\", \"Maaşını Halkbank'tan alanlara özel\",\n \"Mutlu emekli ihtiyaç kredisi\"]\n\n#kredi -> alias\nfor kredi in krediler:\n print(kredi)\n\n\nfor i in range(10): #0'dan başlar 10 dahil değil\n print(i)\n\nfor i in range(\n len(krediler)\n): #krediler listesinin uzunluğu kadar -> 0'dan 3'e kadar 3 dahil değil\n print(krediler[i])\n\nfor i in range(3, 10): #3'ten başlar 10 dahil değil\n print(i)\n\nfor i in range(0, 10, 2): #0'dan başlar 10 dahil değil 2'şer artar.\n print(i)\n\nfor kredi in krediler:\n print(\"