diff --git "a/107.jsonl" "b/107.jsonl"
new file mode 100644--- /dev/null
+++ "b/107.jsonl"
@@ -0,0 +1,1282 @@
+{"seq_id":"8562672011","text":"print('1)Расчет стоимости доставки:')\r\nrastojanie_dostavki = float(input(\"\\nВведите растояние поездки:\"))\r\nw = rastojanie_dostavki / 100\r\n\r\nnadbavka_za_km = float(input(\"Введите сумму надбавки за км:\"))\r\nprocent_za_km = w * nadbavka_za_km\r\n\r\nrashod_na_100 = float(input(\"Введите расход топлива на 100км:\"))\r\nrashod_na_1 = rashod_na_100 / 100\r\ncena_topliva = float(input(\"Введите цену за литр топлива:\"))\r\ncena_za_100_km = rashod_na_100 * cena_topliva\r\n\r\ncena_za_1_km = cena_za_100_km / 100\r\n\r\nsumma_za_start = float(input(\"Введите сумму за старт(если таковой не имеится, впишите '0'):\"))\r\n\r\n\r\nitogovaja_summa = (procent_za_km + cena_za_1_km) * rastojanie_dostavki + summa_za_start\r\nprint(\"\\nИтоговая сумма доставки:\", itogovaja_summa)\r\n\r\nsumma_avto = float(input(\"\\nВпишите сумму авто:\"))\r\nchislo_naloga = float(input(\"Введите процент налога:\"))\r\nprocent_naloga = summa_avto / 100\r\nnalog_itog = procent_naloga * chislo_naloga\r\nprint(\"\\nСумма налога составит:\", nalog_itog)\r\n\r\nagentskiy_sbor = float(input(\"\\nВведите сумму агентского сбора:\"))\r\nprint(\"Сумма агентского сбора:\", agentskiy_sbor)\r\n\r\nregistracionnyi_sbor = float(input(\"\\nВведите сумму регистрационного сбора:\"))\r\nprint(\"Сумма регистрационного сбора составляет:\", registracionnyi_sbor)\r\n\r\nitogovaja_summa_za_uslugi = itogovaja_summa + nalog_itog + agentskiy_sbor + registracionnyi_sbor\r\nprint(\"\\n\\nИтоговая сумма за услуги составит:\", itogovaja_summa_za_uslugi)\r\nsumma_za_vse = itogovaja_summa_za_uslugi + summa_avto\r\nprint(\"Полная сумма вместе с авто:\", summa_za_vse)\r\ninput()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"Daniil-P-A/Homework","sub_path":"расчеты_авто.py","file_name":"расчеты_авто.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"}
+{"seq_id":"15301296951","text":"# @Time : 2020/07/02\n# @Author : sunyingqiang\n# @Email : 344670075@qq.com\nfrom django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\nfrom .views import ArticleViewSet, ArticlePollView, ArticleSearchViewSet\n\nrouter = DefaultRouter()\nrouter.register('article', ArticleViewSet, basename='article')\nrouter.register('article_search', ArticleSearchViewSet, basename='article_search')\n\nurlpatterns = [\n path(r'', include(router.urls)),\n path(r'poll', ArticlePollView.as_view())\n\n]","repo_name":"supermouse123/drf_blog","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"}
+{"seq_id":"73509893284","text":"import sys\r\nsys.stdin = open('2606_input.txt', 'r')\r\n\r\nN = int(input())\r\nganseon = int(input())\r\n\r\nglist = [list(map(int, input().split())) for _ in range(ganseon)]\r\n\r\nstack = [1]\r\nvisited = [False] * (N + 1)\r\nvisited[1] = True\r\n\r\nanswer = 0\r\n\r\nwhile stack:\r\n cnum = stack.pop()\r\n answer += 1\r\n for x, y in glist:\r\n if x == cnum:\r\n if not visited[y]:\r\n visited[y] = True\r\n stack.append(y)\r\n elif y == cnum:\r\n if not visited[x]:\r\n visited[x] = True\r\n stack.append(x)\r\n\r\nprint(answer - 1)","repo_name":"caddyspoon/Algorithms_Breaker","sub_path":"2020/1208/2606_virus.py","file_name":"2606_virus.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"}
+{"seq_id":"30058489086","text":"import os,sys\nfrom PIL import Image, ImageDraw\nimport numpy as np\n\nground_truth_images_path = \"/datasets_nas/mapa3789/Pixel2Mesh/HandToolsRendered/ShapeNetHandTools_V13/\"\ncropped_images_path = \"/datasets_nas/mapa3789/Pixel2Mesh/HandToolsRendered/ShapeNetHandTools_Occultation_Small/\"\n\n\ndef listFiles(dir, ext, ignoreExt=None):\n \"\"\"\n Return array of all files in dir ending in ext but not ignoreExt.\n \"\"\"\n matches = []\n for root, dirs, files in os.walk(dir):\n for f in files:\n if f.endswith(ext):\n if not ignoreExt or (ignoreExt and not f.endswith(ignoreExt)):\n matches.append(os.path.join(root, f))\n return matches\n\n\n\ndef get_pixels(im):\n pixels = list(im.getdata())\n width, height = im.size\n return [pixels[i * width:(i + 1) * width] for i in range(height)]\n\ndef get_image_object_pixels(pixel_list):\n image_pixels = np.asarray(pixel_list)\n image_pixels = np.sum(image_pixels, axis=2) # sum color + alpha together\n obj_pixels = image_pixels[image_pixels[:,:]!=0]\n return image_pixels, obj_pixels\n\ndef get_percentage_obj_img(image_pixels, object_pixels):\n return float(object_pixels.size) / float(image_pixels.size)\n\ndef calc_percentage_occultation(before_ratio, after_ratio):\n return 1 - (1 / (before_ratio + 1.e-8)) * after_ratio\n\n\ndef export_proportions(proportions):\n export = np.asarray(proportions)\n\n #add mean as column\n mean_percentage_cutout = export[1:,3].astype(np.float).mean()\n export = np.insert(export, 4, mean_percentage_cutout, axis=1)\n export[0][4] = 'mean percentage cutout'\n\n np.savetxt(os.path.join(cropped_images_path, \"tmp/proportions.csv\"), export, delimiter=\",\", fmt=\"%s\")\n\n print(\"FINISHED: mean percentage cutout: {}\".format(mean_percentage_cutout))\n print(\"RUN AGAIN WITH DIFFERENT RADIUS RATIO IF NOT SATISFIED\")\n\nproportions = [['Image file', 'object to image proportion (oip)', 'oip after masking', 'percentage cutout']]\n\ndef run():\n ground_truth_images = listFiles(ground_truth_images_path, \".png\")\n cropped_images = listFiles(cropped_images_path, \".png\")\n\n if(len(ground_truth_images) == 0):\n print(\"No .png files found\")\n sys.exit()\n elif(len(ground_truth_images) != len(cropped_images)):\n print(\"ground truth images and cropped images do not match (different size)\")\n sys.exit()\n\n\n for index, file in enumerate(ground_truth_images):\n\n if ((index) % 50 == 0):\n print(\"{}/{}\".format(index, len(ground_truth_images)))\n\n im = Image.open(file).convert(\"RGBA\")\n\n #original image\n pixels = get_pixels(im)\n image_pixels, obj_pixels = get_image_object_pixels(pixels)\n obj_to_image_proportion_0 = get_percentage_obj_img(image_pixels, obj_pixels)\n\n\n im = Image.open(cropped_images[index]).convert(\"RGBA\")\n\n pixels = get_pixels(im)\n image_pixels, obj_pixels = get_image_object_pixels(pixels)\n obj_to_image_proportion_1 = get_percentage_obj_img(image_pixels, obj_pixels)\n percentage_occultation = calc_percentage_occultation(obj_to_image_proportion_0, obj_to_image_proportion_1)\n\n proportions.append([file, obj_to_image_proportion_0, obj_to_image_proportion_1, percentage_occultation])\n\n\nrun()\nexport_proportions(proportions)\n","repo_name":"markuspaschi/ShapeNetTools","sub_path":"DataSet_Tools/AddOcclusion/calc_proportions.py","file_name":"calc_proportions.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"52"}
+{"seq_id":"4682250349","text":"from django.core.management import BaseCommand, CommandError\nfrom django.utils import timezone\nfrom snippet.models import Snippet\n\n\nclass Command(BaseCommand):\n help = 'Delete expired snippets'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--quiet',\n action='store_true',\n dest='quiet',\n default=False,\n help='Suppress any output except errors',\n )\n\n def handle(self, *args, **options):\n qs = Snippet.objects.filter(\n expiration__lt=timezone.now()\n ).order_by('pub_date', 'update_date')\n\n if not options['quiet']:\n for s in qs:\n print('{0} {1}'.format(s.slug, s.expiration))\n\n n, _ = qs.delete()\n\n if not options['quiet']:\n print(\"Deleted {0} snippets\".format(n))\n","repo_name":"aither64/havesnippet","sub_path":"snippet/management/commands/expiresnippets.py","file_name":"expiresnippets.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"}
+{"seq_id":"26073006514","text":"from fastapi import FastAPI, Body\nimport schemas\n\napp = FastAPI()\n\nfakeDatabase = {\n 1: {'task': 'Clean car'},\n 2: {'task': 'Write Blog'},\n 3: {'task': 'Start Stream'}\n}\n\n\n@app.get(\"/\")\ndef getItems():\n return fakeDatabase\n\n# to run app uvicorn main:app --reload\n# Swagger UI automatically included in /docs#\n\n\n@app.get(\"/{id}\")\ndef getItem(id: int):\n return fakeDatabase[id]\n\n\n\"\"\" \nmethod 1\n@app.post(\"/\")\ndef addItem(task:str):\n newId = len(fakeDatabase.keys()) + 1\n fakeDatabase[newId] = {\"task\": task}\n return fakeDatabase\n\"\"\"\n# method2 using pydantic schema\n\n\n@app.post(\"/\")\ndef addItem(item: schemas.Item):\n newId = len(fakeDatabase.keys()) + 1\n fakeDatabase[newId] = {\"task\": item.task}\n return fakeDatabase\n\n\n\"\"\"\n # method 3 using request body\n@app.post(\"/\")\ndef addItem(body=Body()):\n newId = len(fakeDatabase.keys()) + 1\n fakeDatabase[newId] = {\"task\": body['task']}\n return fakeDatabase \"\"\"\n\n\n@app.put(\"/{id}\")\ndef updateItem(id: int, item: schemas.Item):\n fakeDatabase[id]['task'] = item.task\n return fakeDatabase\n\n\n@app.delete(\"/{id}\")\ndef deleteItem(id: int):\n del fakeDatabase[id]\n return fakeDatabase\n","repo_name":"jamestha3d/simplefastAPI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"}
+{"seq_id":"38774161631","text":"import fault\nfrom fault.common import get_renamed_port\nfrom fault import AnyValue\n\n\nclass FunctionalTester(fault.Tester):\n \"\"\"\n This Tester provides a convenience mechanism for verifying a DUT against a\n functional model. The basic pattern is that every time `eval` is invoked\n on the Tester, a check is done to verify that the current outputs of the\n functional model are equivalent to the outputs of the DUT. This pattern\n works best with a model that is fairly low-level (e.g. cycle accurate). The\n user has the flexibility to relax accuracy of the model by setting the\n outputs of the functional model to be `fault.AnyValue`. Anything is equal\n to `fault.AnyValue`, so the user can manage when to actually perform the\n consistency check by only updating `fault.AnyValue` at the appropriate\n time.\n \"\"\"\n def __init__(self, circuit, clock, functional_model, input_mapping=None):\n super().__init__(circuit, clock)\n self.functional_model = functional_model\n self.input_mapping = input_mapping\n\n def expect(self, port, value):\n raise RuntimeError(\"Cannot call expect on FunctionTester, expectations\"\n \" are automatically generated based on the\"\n \" functional model\")\n\n def eval(self):\n super().eval()\n for name, port in self._circuit.interface.ports.items():\n if port.is_input():\n fn_model_port = get_renamed_port(self._circuit, name)\n super().expect(port, getattr(self.functional_model,\n fn_model_port))\n\n def expect_any_outputs(self):\n for name, port in self._circuit.interface.ports.items():\n if port.is_input():\n fn_model_port = get_renamed_port(self._circuit, name)\n setattr(self.functional_model, fn_model_port, AnyValue)\n","repo_name":"leonardt/fault","sub_path":"fault/functional_tester.py","file_name":"functional_tester.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"52"}
+{"seq_id":"75125759844","text":"a = 10\n\nmaisIgual, menosIgual, vezesIgual, divididoIgual, moduloIgual = 9,9,9,9,9\n\nprint(str(vezesIgual) + ' - ' + str(divididoIgual))\n\nmaisIgual += 1 #resultado 10\nmenosIgual -= 1 #resultado 8\nvezesIgual *= 1 #resultado 9\ndivididoIgual /= 1 #resultado 9\nmoduloIgual %= 2 #resultado 1\n\nprint(moduloIgual)\n\na, b, c = 2,4,8\na, b, c = a*2, a+b+c, a*b*c","repo_name":"Gpzim98/ProgrameFacil","sub_path":"Python/aula10 - Operadores.py","file_name":"aula10 - Operadores.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"es","doc_type":"code","stars":35,"dataset":"github-code","pt":"52"}
+{"seq_id":"41190709676","text":"class Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n # Find which row that main conatain the integer\n # From each row perform binary search\n \n top, bot = 0, len(matrix) - 1\n midRow = 0\n while top <= bot:\n midRow = int((bot + top) / 2)\n if matrix[midRow][-1] >= target and matrix[midRow][0] <= target:\n break\n elif matrix[midRow][0] > target:\n bot = midRow - 1\n else:\n top = midRow + 1\n \n if top > bot:\n return False\n l, r = 0, len(matrix[midRow]) - 1\n while l <= r:\n midCol = int((l + r) / 2)\n if matrix[midRow][midCol] == target:\n return True\n elif matrix[midRow][midCol] > target:\n r = midCol - 1\n else:\n l = midCol + 1\n \n return False\n\n \n \n \n \n ","repo_name":"hjtan75/fulltime-leetcode","sub_path":"0074-search-a-2d-matrix/0074-search-a-2d-matrix.py","file_name":"0074-search-a-2d-matrix.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"}
+{"seq_id":"5483192382","text":"import os\nimport json\nimport jsonlines\nfrom tqdm import tqdm\nfrom chatgpt import q2r\nfrom config import answer_prompt, task_list\n\n\ndef read_question_list(file_path):\n question_list = []\n with jsonlines.open(file_path) as reader:\n for obj in reader:\n question_list.append(obj['question'])\n return question_list\n\n\ndef write_output(output_filename, output_dict):\n with open(output_filename, 'a', encoding='utf-8') as f:\n f.write(json.dumps(output_dict, ensure_ascii=False) + '\\n')\n\n\ndef main():\n for task_name in task_list:\n print('本次任务类别:', task_name)\n question_list = read_question_list('./data/generate/generate_question_%s.jsonl' % task_name)\n print('本次任务问题数量:', len(question_list))\n output_filename = './data/train/train_data_%s.jsonl' % task_name\n\n # 检查文件是否存在,如果不存在则创建一个空文件\n if not os.path.exists(output_filename):\n with open(output_filename, 'w', encoding='utf-8'):\n pass\n\n # 读取文件并将已有问题存储到一个集合中\n existing_questions = set()\n with open(output_filename, 'r', encoding='utf-8') as f:\n for line in f:\n entry = json.loads(line)\n existing_questions.add(entry['question'])\n\n # 遍历问题列表并检查问题是否已存在\n for i, question in tqdm(enumerate(question_list)):\n print('第%s个' % i)\n if question in existing_questions:\n print('问题已存在')\n continue # 如果问题已存在,跳过\n print('问题:', question)\n question_input = answer_prompt + question\n try:\n result = q2r(question_input)\n print('回答:', result)\n except Exception as e:\n print('异常:', e)\n continue # 如果有异常,跳过\n output = {'index': i, 'question': question, 'answer': result}\n\n # 将新结果追加到文件中\n write_output(output_filename, output)\n print(\"已保存\")\n print(\"len(问题):\", len(question))\n print(\"len(回答):\", len(result))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"morning-hao/domain-self-instruct","sub_path":"domain_self_answer.py","file_name":"domain_self_answer.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"}
+{"seq_id":"2812899314","text":"import socket\nimport threading\n\nclass Server:\n def __init__(self):\n self.host = \"127.0.0.1\"\n self.port = 8080\n self.socket = None\n self.conn = None\n self.receive_thread = None\n\n \n def listen(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.bind((self.host, self.port))\n self.sock.listen()\n print(f\"server is listening on {self.host}:{self.port}\")\n \n while True:\n self.conn, addr = self.sock.accept()\n print(f\"Connected to {addr[0]}:{addr[1]}\")\n \n self.receive_thread = threading.Thread(target=self.receive)\n self.receive_thread.start()\n \n\n \n def receive(self): \n while True:\n data = self.receive_data()\n if not data:\n break\n print(f\"Received message from client: {data}\")\n self.send_data(data.upper())\n\n\n def send_data(self, data):\n if self.conn is not None:\n self.conn.send(data.encode())\n \n \n def receive_data(self):\n if self.conn is not None:\n data = self.conn.recv(1024).decode()\n return data\n \n def close(self):\n self.sock.close()\n \n \n \nserver = Server()\nserver.listen()\nmessage = server.recieve_data()\nprint(message)\n\nserver.send_data(\"Hello_world\")\n\nwhile True:\n message = input(\"Enter a message to send to the server: \")\n server.send_data(message) \n\n\n\n# HEADER = 64\n# FORMAT = 'utf-8'\n# HOST = '192.168.1.222'\n# PORT = 9090\n# DISCONECT = \"disconect\"\n\n# #just for accepting conecitons\n# server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# server.bind((HOST, PORT))\n\n# # while True:\n# # comunication_socket, address = server.accept()\n# # print(f\"Connecting to {address}\")\n# # message = comunication_socket.recv(1024).decode('utf-8')\n# # print(f\"mesage from client {message}\")\n# # comunication_socket.send(f\"Connecting to {address}, {message}, wowowowo\".encode('utf-8'))\n# # comunication_socket.close()\n# # print(\"conection ended\")\n \n\n# def handle_client(conn, addr):\n# print(f\"Connecting to {addr}\")\n# connected = True\n# while connected:\n# msg_length = conn.recv(HEADER).decode(FORMAT)\n# if msg_length:\n# msg_length = int(msg_length)\n# msg = conn.recv(msg_length).decode(FORMAT)\n# if msg == DISCONECT:\n# connected = False\n# print(f\"[{addr}],{msg}\")\n# conn.close()\n \n# # print(f\"Connecting to {address}\")\n# # message = comunication_socket.recv(1024).decode('utf-8')\n# # print(f\"mesage from client {message}\")\n# # comunication_socket.send(f\"Connecting to {address}, {message}, wowowowo\".encode('utf-8'))\n# # comunication_socket.close()\n# # print(\"conection ended\")\n \n# pass\n# def start():\n\n# server.listen()\n# print(f\"listening on {HOST}\")\n# while True:\n# comunication_socket, address = server.accept()\n# thread = threading.Thread(target=handle_client, args=(comunication_socket, address))\n# thread.start()\n# print(f\"active connections: {threading.activeCount() -1}\")\n\n# print(\"server is starting ...\")\n# start()\n\n # print(f\"listening on {HOST}\")\n # while True:\n # comunication_socket, address = server.accept()\n # thread = threading.Thread(target=handle_client, args=(comunication_socket, address))\n # thread.start()\n # print(f\"active connections: {threading.activeCount() -1}\")\n ","repo_name":"MPC-KRY/KryPi","sub_path":"Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"}
+{"seq_id":"24515442804","text":"\"\"\"\nCreated on March 11, 2022\n\n@author: dlytle\n\n\"\"\"\n\nimport argparse\nimport datetime\nimport logging\nimport time\nimport uuid\n\nimport stomp\nimport xmltodict\nimport yaml\n\n\n# Set stomp so it only logs WARNING and higher messages. (default is DEBUG)\nlogging.getLogger(\"stomp\").setLevel(logging.WARNING)\n\n\nclass DTO:\n \"\"\"Digital Telescope Operator Class\n\n _extended_summary_\n \"\"\"\n\n hosts = \"\"\n log_file = \"\"\n command_input_file = \"\"\n message_topic = \"\"\n message_from_device = \"\"\n verbose = False\n wait_array = [True, True, True, True, True, True]\n\n def __init__(self):\n self.message_from_device = \"Go\"\n\n # Read the config file.\n with open(\n \"/home/lorax/Lorax-TNG/DTO/configure.yaml\", \"r\", encoding=\"utf-8\"\n ) as stream:\n try:\n self.config = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n\n # Get the log file name from the configuration.\n # Set up the logger.\n self.log_file = self.config[\"log_file\"]\n logging.basicConfig(\n filename=self.log_file,\n format=\"%(asctime)s %(levelname)-8s %(message)s\",\n level=logging.DEBUG,\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n self.dto_logger = logging.getLogger(\"dto_log\")\n\n # Tell em we've started.\n self.dto_logger.info(\"Initializing: logging started\")\n\n # Get the broker host from the configuration.\n # Make a connection to the broker.\n self.hosts = [tuple(self.config[\"broker_hosts\"])]\n self.dto_logger.info(\n \"connecting to broker at %s\", str(self.config[\"broker_hosts\"])\n )\n\n try:\n # Get a connection handle.s\n self.conn = stomp.Connection(host_and_ports=self.hosts)\n\n # Set up a listener and and connect.\n self.conn.set_listener(\"\", self.MyListener(self))\n self.conn.connect(wait=True)\n except:\n self.dto_logger.error(\"Connection to broker failed\")\n\n self.dto_logger.info(\"connected to broker\")\n\n self.broker_subscribe(self.config[\"mount_dto_topic\"])\n self.broker_subscribe(self.config[\"dome_dto_topic\"])\n self.broker_subscribe(self.config[\"camera_dto_topic\"])\n self.broker_subscribe(self.config[\"filterwheel_dto_topic\"])\n self.broker_subscribe(self.config[\"focuser_dto_topic\"])\n self.broker_subscribe(self.config[\"ccdcooler_dto_topic\"])\n\n self.command_input_file = self.config[\"command_input_file\"]\n\n def broker_subscribe(self, topic):\n \"\"\"Subscribe to broker topic\"\"\"\n if self.verbose:\n print(\"subscribing to topic: %s\", topic)\n self.dto_logger.info(\"subscribing to topic: %s\", topic)\n self.conn.subscribe(\n id=1,\n destination=\"/topic/\" + topic,\n headers={},\n )\n self.dto_logger.info(\"subscribed to topic %s\", topic)\n\n class MyListener(stomp.ConnectionListener):\n \"\"\"MyListener _summary_\n\n _extended_summary_\n\n Parameters\n ----------\n stomp : _type_\n _description_\n \"\"\"\n\n def __init__(self, parent):\n self.parent = parent\n\n def on_error(self, message):\n print(f'received an error \"{message}\"')\n\n def on_message(self, message):\n topic = message.headers[\"destination\"]\n #\n #\n if self.parent.config[\"mount_dto_topic\"] in topic:\n self.set_wait_array(topic, message.body, 0)\n elif self.parent.config[\"dome_dto_topic\"] in topic:\n self.set_wait_array(topic, message.body, 1)\n elif self.parent.config[\"camera_dto_topic\"] in topic:\n self.set_wait_array(topic, message.body, 2)\n elif self.parent.config[\"filterwheel_dto_topic\"] in topic:\n self.set_wait_array(topic, message.body, 3)\n elif self.parent.config[\"focuser_dto_topic\"] in topic:\n self.set_wait_array(topic, message.body, 4)\n elif self.parent.config[\"ccdcooler_dto_topic\"] in topic:\n self.set_wait_array(topic, message.body, 5)\n\n def set_wait_array(self, message_topic, message_body, index):\n print(\"message from \" + message_topic + \": \" + message_body)\n self.parent.message_topic = message_topic\n self.parent.message_from_device = message_body\n if \"WAIT\" in message_body.upper():\n print(message_body)\n print(\"setting \" + message_topic + \" wait false\")\n self.parent.wait_array[index] = False\n elif \"GO\" in message_body.upper():\n self.parent.wait_array[index] = True\n\n\ndef send_command_to_topic(command, topic):\n print(\"sending '\" + command + \"' to \" + dto.config[topic])\n dto.conn.send(\n body=command,\n destination=\"/topic/\" + dto.config[topic],\n )\n\n\ndef construct_command_xml(recipient: str, command: str):\n \"\"\"Construct the XML Message for the DTO command\n\n _extended_summary_\n\n Parameters\n ----------\n recipient : str\n The recipient of the DTO command\n command : str\n The DTO command\n\n Returns\n -------\n str\n The XML message to be sent over the broker\n \"\"\"\n # Build the XML Status Packet\n status = {\n \"message_id\": uuid.uuid4(),\n \"timestamput\": datetime.datetime.utcnow(),\n \"sender\": \"DTO\",\n \"recipient\": recipient,\n \"command\": command,\n }\n\n return xmltodict.unparse({\"dtoCommand\": status}, pretty=True)\n\n\nif __name__ == \"__main__\":\n # Parse Arguments\n parser = argparse.ArgumentParser(\"DTO\")\n parser.add_argument(\"cmd_file\", type=str, help=\"Command file\")\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n help=\"Provide more verbose output to the screen\",\n )\n args = parser.parse_args()\n\n # Run the DTO\n if args.verbose:\n DTO.verbose = True\n dto = DTO()\n\n with open(args.cmd_file, \"r\", encoding=\"utf-8\") as fp:\n line = fp.readline()\n cnt = 1\n while line:\n if args.verbose:\n print(\"Line {}: {}\".format(cnt, line.strip()))\n # Strip line, parse out target and command.\n targ, comm = [s.strip() for s in line.strip().split(\": \")]\n\n if \"mount\" in targ:\n send_command_to_topic(comm, \"mount_command_topic\")\n if \"camera\" in targ:\n send_command_to_topic(comm, \"camera_command_topic\")\n if \"dome\" in targ:\n send_command_to_topic(comm, \"dome_command_topic\")\n if \"filterwheel\" in targ:\n send_command_to_topic(comm, \"fw_command_topic\")\n if \"ccdcooler\" in targ:\n send_command_to_topic(comm, \"ccdcooler_command_topic\")\n if \"focuser\" in targ:\n send_command_to_topic(comm, \"focuser_command_topic\")\n if \"sleep\" in targ:\n time.sleep(float(comm))\n\n if \"allserv\" in targ:\n send_command_to_topic(comm, \"mount_command_topic\")\n send_command_to_topic(comm, \"camera_command_topic\")\n send_command_to_topic(comm, \"dome_command_topic\")\n send_command_to_topic(comm, \"fw_command_topic\")\n send_command_to_topic(comm, \"ccdcooler_command_topic\")\n send_command_to_topic(comm, \"focuser_command_topic\")\n\n time.sleep(1.0)\n # If any of the wait_array values are false wait until all true.\n if args.verbose:\n print(dto.wait_array)\n while not all(dto.wait_array):\n # print(dto.message_from_device)\n print(\"waiting...\")\n time.sleep(0.1)\n\n line = fp.readline()\n time.sleep(1.0)\n while not all(dto.wait_array):\n # print(dto.message_from_device)\n print(\"waiting...\")\n time.sleep(0.1)\n cnt += 1\n","repo_name":"LowellObservatory/Lorax-TNG","sub_path":"DTO/DTO.py","file_name":"DTO.py","file_ext":"py","file_size_in_byte":8105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"}
+{"seq_id":"39207509776","text":"import matplotlib.pyplot as plt\r\n\r\nf = open(\"tcp-example.tr\",\"r\")\r\nx = f.readlines()\r\nf.close()\r\n\r\nenqueue = []\r\ndequeue = []\r\n\r\ndiff = []\r\n\r\nfor i in x:\r\n ls = i.split()\r\n if (\"/NodeList/1/DeviceList/1\" in ls[2]):\r\n if ls[0] == \"+\":\r\n enqueue.append(float(ls[1]))\r\n elif ls[0] == \"-\":\r\n dequeue.append(float(ls[1]))\r\n\r\nfor i in range(min(len(enqueue),len(dequeue))):\r\n diff.append(dequeue[i]-enqueue[i])\r\n\r\n\r\nf = open(\"tcp-example.txt\",\"w\")\r\n\r\nfor i in range(len(diff)):\r\n print(f\"{enqueue[i]} {diff[i]}\",file = f)\r\n\r\nf.close()\r\n\r\nplt.plot(enqueue[:len(diff)],diff)\r\nplt.show()\r\n","repo_name":"utkar22/Computer_Networks_Assignments","sub_path":"Assignment 3/plot_queue_time.py","file_name":"plot_queue_time.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"}
+{"seq_id":"5807627987","text":"import time\nfrom collections import deque\n\nimport torch\nimport torch.nn.functional as F\n\nfrom breakout_a3c.envs import create_atari_env\nfrom breakout_a3c.model import ActorCritic\n\nfrom os import listdir\nfrom os.path import isfile, join\n\ndef test(rank, args, shared_model, counter):\n torch.manual_seed(args.seed + rank)\n\n if args.test_gan:\n log_name = 'breakout_a3c/' + args.gan_dir\n gan_path = args.gan_models_path + args.gan_dir + '/checkpoints'\n files = [join(gan_path, f).split('_')[1].split('.')[0] for f in listdir(gan_path) if\n isfile(join(gan_path, f)) and f.startswith('gen')]\n gan_file = files.pop(0)\n env = create_atari_env(args.env_name, args, True, gan_file)\n else:\n env = create_atari_env(args.env_name, args)\n env.seed(args.seed + rank)\n\n model = ActorCritic(env.observation_space.shape[0], env.action_space)\n\n model.eval()\n\n state = env.reset()\n state = torch.from_numpy(state)\n reward_sum = 0\n done = True\n\n start_time = time.time()\n\n # a quick hack to prevent the agent from stucking\n actions = deque(maxlen=100)\n episode_length = 0\n while True:\n episode_length += 1\n # Sync with the shared model\n if done:\n model.load_state_dict(shared_model.state_dict())\n cx = torch.zeros(1, 256)\n hx = torch.zeros(1, 256)\n else:\n cx = cx.detach()\n hx = hx.detach()\n\n with torch.no_grad():\n value, logit, (hx, cx) = model((state.unsqueeze(0), (hx, cx)))\n prob = F.softmax(logit, dim=-1)\n action = prob.max(1, keepdim=True)[1].numpy()\n\n state, reward, done, _ = env.step(action[0, 0])\n done = done or episode_length >= args.max_episode_length\n reward_sum += reward\n\n # a quick hack to prevent the agent from stucking\n actions.append(action[0, 0])\n if actions.count(actions[0]) == actions.maxlen:\n done = True\n\n if done:\n if args.test_gan:\n iterations = gan_file\n print(\"Model {}, Score {}\\n\".format(iterations, reward_sum))\n with open('breakout_a3c/' + log_name + '.txt', 'a') as f:\n f.write(\"Model {}, Score {}\\n\".format(iterations, reward_sum))\n else:\n print(\"Time {}, num steps {}, FPS {:.0f}, episode reward {}, episode length {}\".format(\n time.strftime(\"%Hh %Mm %Ss\",\n time.gmtime(time.time() - start_time)),\n counter.value, counter.value / (time.time() - start_time),\n reward_sum, episode_length))\n reward_sum = 0\n episode_length = 0\n actions.clear()\n state = env.reset()\n\n if args.save:\n torch.save({\n 'state_dict': model.state_dict(),\n }, args.env_name + \".pth.tar\")\n\n if args.test_gan:\n if files:\n gan_file = files.pop(0)\n else:\n break\n env = create_atari_env(args.env_name, args, True, gan_file)\n else:\n time.sleep(30)\n\n state = torch.from_numpy(state)\n","repo_name":"ShaniGam/RL-GAN","sub_path":"breakout_a3c/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"52"}
+{"seq_id":"18532906841","text":"import argparse\nimport numpy as np\nfrom collections import namedtuple\nfrom utils.os_utils import smart_makedirs\nfrom utils.bio import read_bio_seq, write_bio_seqs\nfrom itertools import groupby\nimport os\n\nfrom cen_mut_sim import mutate\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--seq\", required=True)\n parser.add_argument(\"-o\", \"--outdir\", required=True)\n parser.add_argument(\"--seed\", type=int, default=0)\n parser.add_argument(\"-m\", \"--mut\", type=float, default=0.02)\n parser.add_argument(\"-d\", \"--del-len\", type=int, default=1000)\n params = parser.parse_args()\n\n smart_makedirs(params.outdir)\n np.random.seed(params.seed)\n seq = read_bio_seq(params.seq)\n\n del_pos = np.random.randint(0, len(seq) - params.del_len, 1)[0]\n prefix, suffix = seq[:del_pos], seq[del_pos + params.del_len:]\n mut = params.mut\n mut_prefix, uncompr_cigar_prefix = mutate(prefix, mism=mut/2, delet=mut/4, ins=mut/4)\n mut_suffix, uncompr_cigar_suffix = mutate(suffix, mism=mut/2, delet=mut/4, ins=mut/4)\n\n uncompr_cigar = uncompr_cigar_prefix + ['D'] * params.del_len + uncompr_cigar_suffix\n mut_seq = mut_prefix + mut_suffix\n\n cigar = []\n for k, g in groupby(uncompr_cigar):\n cigar.append((k, len(list(g))))\n cigar = ''.join(str(v)+str(k) for k, v in cigar)\n\n with open(os.path.join(params.outdir, \"true_cigar.txt\"), 'w') as f:\n print(cigar, file=f)\n\n write_bio_seqs(os.path.join(params.outdir, \"mod.fasta\"), {\"mod\" : mut_seq})\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"seryrzu/unialigner","sub_path":"tandem_aligner/py/mut_seq_sim.py","file_name":"mut_seq_sim.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"52"}
+{"seq_id":"19097871405","text":"import requests, json\n\n\ndef send_request(endpoint, token, method, data=None):\n try:\n panel_address = token[\"panel_address\"]\n token_type = token[\"token_type\"]\n access_token = token[\"access_token\"]\n request_address = f\"{panel_address}/api/{endpoint}\"\n headers = {\n \"accept\": \"application/json\",\n \"Authorization\": f\"{token_type} {access_token}\",\n }\n response = requests.request(\n method, request_address, headers=headers, data=json.dumps(data)\n )\n # print(response.content)\n response.raise_for_status() # Raise an exception for non-200 status codes\n result = json.loads(response.content)\n return result\n except requests.exceptions.RequestException as ex:\n if response.content:\n raise Exception(f\"Request Exception: { response.content }\")\n else:\n raise ex\n except json.JSONDecodeError as ex:\n raise f\"JSON Decode Error: {ex}\"\n","repo_name":"mewhrzad/marzpy","sub_path":"marzpy/api/send_requests.py","file_name":"send_requests.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"52"}
+{"seq_id":"15583222490","text":"import pickle\r\nfrom os.path import exists\r\n\r\n\r\nclass OutputManager:\r\n def __init__(self, cs_folder):\r\n assert exists(cs_folder)\r\n self.cs_folder = cs_folder\r\n self.results_filename = 'results'\r\n self.results_path_filename_extn = self.cs_folder + \\\r\n self.results_filename + '.pkl'\r\n\r\n def write_results(self, results):\r\n assert isinstance(results, dict), \\\r\n \"'results' must be a dictionary.\"\r\n\r\n if exists(self.results_path_filename_extn):\r\n results_buffer = self.read_results()\r\n for k, v in results.items():\r\n if isinstance(v, list):\r\n results_buffer[k].extend(v)\r\n elif isinstance(v, dict):\r\n results_buffer[k].update(v)\r\n else:\r\n results_buffer[k] = v\r\n else:\r\n results_buffer = results\r\n\r\n with open(self.results_path_filename_extn, 'wb') as file:\r\n pickle.dump(results_buffer, file)\r\n\r\n def read_results(self):\r\n assert exists(self.results_path_filename_extn),\\\r\n self.results_filename + \".pkl does not exist.\"\r\n with open(self.results_path_filename_extn, 'rb') as file:\r\n results_buffer = pickle.load(file)\r\n return results_buffer\r\n","repo_name":"omega-icl/deus","sub_path":"src/deus/activities/output/output_manager.py","file_name":"output_manager.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"}
+{"seq_id":"23354403745","text":"import numpy as np\nimport random\n\n\"\"\"У капелюсі є m*k кульок: по k кульок m кольорів (m > 1). За один раз\nвитягають d кульок (1 < d <= k). Яка ймовірність того, що всі вони одного\nкольору?\nРозв’язати задачу методом Монте-Карло з використанням масивів numpy.\nВекторизувати програмний код, наскільки можливо.\"\"\"\n\nTEST_NUM = 10000\nm = random.randint(2, 10) # кількість кольорів\ncolors = np.arange(m) # масив кольорів\nk = random.randint(2, 10) # кількість кульок одного кольору\nd = random.randint(2, k) # кількість кульок, які ми дістаємо\n\n\ndef check(choice):\n res = np.zeros(TEST_NUM, dtype=bool)\n for i in range(TEST_NUM):\n if np.all(choice[i] == choice[i][0]): # перевіряємо, щоб всі значення в рядку були однакові\n # чи всі значення в рядку еквівалентні першому значенню рядка\n print(choice[i])\n res[i] = True\n # print(res[i])\n return res\n\n\ndef beads_probability(beads, count):\n # beads - можливі варіанти кульок\n # count - кількість кулок, які ми витягаємо за один раз\n # to_extract - те, що ми хочемо отримати\n choice = np.zeros((TEST_NUM, count), dtype=int)\n for i in range(TEST_NUM):\n choice[i, :] = np.random.choice(beads, count, replace=False)\n choice.sort(axis=1) # сортуємо по рядкам\n print(choice)\n res = check(choice)\n return np.sum(res) / TEST_NUM\n\n\nif __name__ == \"__main__\":\n hat = np.random.choice(colors, m * k)\n p = beads_probability(hat, d)\n print(f\"{p * 100:.2f}%\")\n print(f\"Загальна кількість куль: {m * k}\")\n print(f\"Кількість кольорів: {m}\")\n print(f\"Кількість кульок одного кольру: {k}\")\n print(f\"Масив кольорів: {colors}\")\n print(f\"Кількість кульок, яку витягли за один раз: {d}\")\n","repo_name":"anichka14/numpy","sub_path":"t20.24.py","file_name":"t20.24.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"}
+{"seq_id":"4855690612","text":"\"\"\"Available models in biomasstry package.\"\"\"\n\nfrom .fcn import FCN\nfrom .temporal_model import TemporalSentinelModel\nfrom .unet_tae import UTAE\n\n__all__ = (\n \"FCN\",\n \"TemporalSentinelModel\",\n \"UTAE\",\n)\n\n# https://stackoverflow.com/questions/40018681\nfor module in __all__:\n globals()[module].__module__ = \"biomasstry.models\"","repo_name":"goopyflux/BioMassters","sub_path":"src/biomasstry/models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"}
+{"seq_id":"43096596111","text":"import sys\nimport os\ncurdir = os.path.dirname(os.path.abspath(__file__))\nsys.path += [curdir]\nimport argparse\n\nfrom evaluation.tagging_evaluation_util import get_tagging_results\nfrom evaluation.evaluation_stats_util import F1Stats\nfrom evaluation.raw_result_parser import iter_results\nfrom evaluation.ioutil import open_file\n\nclass Options:\n def __init__(self):\n self.raw_prediction = 'example/raw_prediction_example.txt'\n self.test_file = 'example/test_file_example.json.gz'\n self.schema = 'BIO2'\n self.fuzzy = True\n\n\ndef get_val_result(options, labels):\n stats = F1Stats(options.fuzzy, need_every_score=options.need_every_score)\n # print(\"len(labels)[evaluate_tagging_result]:\",len(labels))\n for q_tokens, e_tokens, tags, golden_answers in \\\n iter_results(labels, options.test_file, options.schema):\n if q_tokens is None: continue # one question has been processed\n # print('len tags:', len(tags), 'len e_tokens:', len(e_tokens))\n pred_answers = get_tagging_results(e_tokens, tags)\n # print(\"pred_answers:\", pred_answers)\n # print(\"golden_answers:\", golden_answers)\n stats.update(golden_answers, pred_answers)\n # print((stats.get_metrics_str()))\n return stats\n\n\nif __name__ == '__main__':\n options = Options()\n get_val_result(options, labels=options.raw_prediction)\n# chunk_f1=0.413521 chunk_precision=0.436303 chunk_recall=0.393000 true_chunks=4000 result_chunks=3603 correct_chunks=1572\n# chunk_f1=0.460345 chunk_precision=0.485706 chunk_recall=0.437500 true_chunks=4000 result_chunks=3603 correct_chunks=1750\n","repo_name":"WangJiuniu/SRQA","sub_path":"evaluation/evaluate_tagging_result.py","file_name":"evaluate_tagging_result.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"}
+{"seq_id":"15113206684","text":"ensaladaDeFrutas = [\"manzana\", \"mango\", \"banano\", \"yogurt\", \"queso\", \"helado\", \"fresa\"]\n\n\n# for ingrediente in ensaladaDeFrutas: # recorrido de la lista por ciclo FOR IN\n# print(ingrediente.upper())\n\n\n# print(ensaladaDeFrutas[:4]) # devuelve una lista hasta el index indicado\n# print(ensaladaDeFrutas[3]) # devuelve solo el index indicado\n# print(ensaladaDeFrutas[4:]) # devuelve desde el index indicado hasta el final de la lista\n# print(ensaladaDeFrutas[-1]) # empieza a contar desde del último index para atras [-2] muestra el penultimo, etc.\n# print(ensaladaDeFrutas[-3:]) \n\n\n# ---------------\n# verificar por un if si hay un elemento dentro de una lista\n# fruta = input('que ingrediente en la ensalada de frutas?: ')\n# if fruta in ensaladaDeFrutas:\n# print(fruta+ ' si esta incluido en la ensalada de frutas')\n# else:\n# print(fruta+' no esta incluido en la ensalada de frutas')\n\n\n# ---------------\n# Reemplazar un valor de un index en una lista\nensaladaDeFrutas[3] = \"granola\"\nprint(ensaladaDeFrutas)\n\n# Reemplazar un valor de un rango en una lista\nensaladaDeFrutas[2:4] = [\"sandia\",\"kiwi\"] \nprint(ensaladaDeFrutas)\n","repo_name":"OrionAlzate/Python-course","sub_path":"lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"}
+{"seq_id":"38614413286","text":"'''\nGiven an integer array nums and an integer k, return true if it is possible to divide this array into k non-empty subsets whose sums are all equal.\n\nExample 1:\nInput: nums = [4,3,2,3,5,2,1], k = 4\nOutput: true\nExplanation: It's possible to divide it into 4 subsets (5), (1, 4), (2,3), (2,3) with equal sums.\n\nExample 2:\nInput: nums = [1,2,3,4], k = 3\nOutput: false\n\nConstraints:\n1 <= k <= nums.length <= 16\n1 <= nums[i] <= 104\nThe frequency of each element is in the range [1, 4].\n'''\n\n\nclass Solution:\n def canPartitionKSubsets(self, nums: List[int], k: int) -> bool:\n\n # if not nums or int(sum(nums)/k) != sum(nums)/k:\n # return False\n\n # def dfs(parts, nums, idx):\n # if idx == len(nums):\n # return not sum(parts)\n # for i in range(len(parts)):\n # if parts[i] >= nums[idx]:\n # parts[i] -= nums[idx]\n # if dfs(parts, nums, idx+1):\n # return True\n # parts[i] += nums[idx]\n\n # nums.sort(reverse=True)\n # parts = [sum(nums)/k]*k\n # return dfs(parts, nums, 0)\n\n totalSum = sum(nums)\n if totalSum % k != 0: return False\n subSum = totalSum // k\n nums.sort(reverse=True)\n parts = [subSum] * k\n\n def findSubSum(parts, idx):\n if idx == len(nums): return not sum(parts)\n for i in range(len(parts)):\n if parts[i] >= nums[idx]:\n parts[i] -= nums[idx]\n if findSubSum(parts, idx + 1):\n return True\n parts[i] += nums[idx]\n return False\n\n return findSubSum(parts, 0)","repo_name":"kumarsup/leetcode-solution-data_structure_and_algorithm","sub_path":"698_Partition_to_K_Equal_Sum_Subsets.py","file_name":"698_Partition_to_K_Equal_Sum_Subsets.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"}
+{"seq_id":"13077619295","text":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom . import Extension\nfrom ..blockprocessors import BlockProcessor\nfrom ..util import etree\n\n\nclass TableProcessor(BlockProcessor):\n \"\"\" Process Tables. \"\"\"\n\n def test(self, parent, block):\n rows = block.split('\\n')\n return (len(rows) > 1 and '|' in rows[0] and\n '|' in rows[1] and '-' in rows[1] and\n rows[1].strip()[0] in ['|', ':', '-'])\n\n def run(self, parent, blocks):\n \"\"\" Parse a table block and build table. \"\"\"\n block = blocks.pop(0).split('\\n')\n header = block[0].strip()\n seperator = block[1].strip()\n rows = [] if len(block) < 3 else block[2:]\n # Get format type (bordered by pipes or not)\n border = False\n if header.startswith('|'):\n border = True\n # Get alignment of columns\n align = []\n for c in self._split_row(seperator, border):\n if c.startswith(':') and c.endswith(':'):\n align.append('center')\n elif c.startswith(':'):\n align.append('left')\n elif c.endswith(':'):\n align.append('right')\n else:\n align.append(None)\n # Build table\n table = etree.SubElement(parent, 'table')\n thead = etree.SubElement(table, 'thead')\n self._build_row(header, thead, align, border)\n tbody = etree.SubElement(table, 'tbody')\n for row in rows:\n self._build_row(row.strip(), tbody, align, border)\n\n def _build_row(self, row, parent, align, border):\n \"\"\" Given a row of text, build table cells. \"\"\"\n tr = etree.SubElement(parent, 'tr')\n tag = 'td'\n if parent.tag == 'thead':\n tag = 'th'\n cells = self._split_row(row, border)\n # We use align here rather than cells to ensure every row\n # contains the same number of columns.\n for i, a in enumerate(align):\n c = etree.SubElement(tr, tag)\n try:\n c.text = cells[i].strip()\n except IndexError: # pragma: no cover\n c.text = \"\"\n if a:\n c.set('align', a)\n\n def _split_row(self, row, border):\n \"\"\" split a row of text into list of cells. \"\"\"\n if border:\n if row.startswith('|'):\n row = row[1:]\n if row.endswith('|'):\n row = row[:-1]\n return row.split('|')\n\n\nclass TableExtension(Extension):\n \"\"\" Add tables to Markdown. \"\"\"\n\n def extendMarkdown(self, md, md_globals):\n \"\"\" Add an instance of TableProcessor to BlockParser. \"\"\"\n md.parser.blockprocessors.add('table',\n TableProcessor(md.parser),\n ' added \" + message + \"
\", b\" \")\n X_batch = tf.strings.regex_replace(X_batch, b\"[^a-zA-Z']\", b\" \")\n X_batch = tf.strings.split(X_batch)\n return X_batch.to_tensor(default_value=b\"\"])\n for tr in table:\n html_string = ''.join([html_string, \"
\"])\n\n return html_string\n\n def to_line(self):\n self.temp_text = self.temp_text.replace('\\n', ' ').replace('\\r', ' ').replace('\\t', ' ')\n \n if self.temp_text.strip() != \"\":\n if self.depth_t > 0 or self.depth_l > 0:\n self.temp_lines[-1][self.lines_t[-1]] = {'type': 'text',\n 'value': self.temp_text.strip()}\n self.lines_t[-1] += 1\n else:\n self.result_dict[self.lines] = {'type': 'text',\n 'value': self.temp_text.strip()}\n self.lines += 1\n\n self.temp_text = \"\"\n \n def start(self, tag, attrs):\n tag = tag.split('}')[1] if '}' in tag else tag\n\n if tag in self.parsing:\n self.parsing[tag] += 1\n\n if tag=='custom-shape':\n self.custom_shape_concat=True\n\n if tag!='custom-shape' and self.parsing['custom-shape']==0 and self.custom_shape_concat:\n self.custom_shape_concat = False\n self.to_line()\n \n if tag=='image':\n for attr in attrs:\n if attr.endswith('href'):\n if self.depth_t > 0 or self.depth_l > 0:\n self.temp_lines[-1][self.lines_t[-1]] = {'type':'img',\n 'value': attrs[attr]}\n self.lines_t[-1] += 1\n else:\n self.result_dict[self.lines] = {'type': 'img',\n 'value': attrs[attr]}\n self.lines += 1\n \n elif tag=='line-break':\n self.to_line()\n\n elif tag in ['tab', 's']:\n self.temp_text = ''.join([self.temp_text, ' '])\n \n elif tag=='list-item':\n self.temp_lines.append({})\n self.lines_t.append(0)\n\n elif tag=='list-header':\n self.temp_lines.append({})\n self.lines_t.append(0)\n self.list_header.append(0)\n\n elif tag=='list':\n self.list_item.append(0)\n self.temp_list.append({})\n self.depth_l += 1\n\n elif tag=='table-cell':\n self.temp_rowspan.append('1')\n self.temp_colspan.append('1')\n for attr in attrs:\n if attr.endswith('number-rows-spanned'):\n self.temp_rowspan[-1] = attrs[attr]\n elif attr.endswith('number-columns-spanned'):\n self.temp_colspan[-1] = attrs[attr]\n \n self.temp_lines.append({})\n self.lines_t.append(0)\n\n elif tag=='table-row':\n self.cols.append(0)\n self.temp_row.append({})\n\n elif tag=='table':\n self.temp_caption.append('')\n for attr in attrs:\n if attr.endswith('}name'):\n self.temp_caption[-1] = (attrs[attr])\n\n self.rows.append(0)\n self.temp_table.append({})\n self.depth_t += 1\n self.leaf_table = True\n\n if self.leaf_lines:\n self.leaf_lines = False\n \n return TreeBuilder.start(self, tag, attrs)\n\n def end(self, tag):\n tag = tag.split('}')[1] if '}' in tag else tag\n\n if tag == 'automatic-styles':\n self.body_start = True\n \n elif tag=='g':\n self.custom_shape_concat=False\n self.to_line()\n\n elif (not self.custom_shape_concat) and tag=='p' and self.parsing['note']==0:\n self.to_line()\n\n elif tag=='list-item':\n self.temp_list[-1][self.list_item[-1]] = {'type': 'list-item',\n 'value': self.temp_lines[-1]}\n \n self.list_item[-1] += 1\n self.temp_lines = self.temp_lines[:-1]\n self.lines_t = self.lines_t[:-1]\n\n elif tag=='list-header':\n self.temp_list[-1][self.list_header[-1]] = {'type': 'list-header',\n 'value': self.temp_lines[-1]}\n self.list_header[-1] += 1\n self.temp_lines = self.temp_lines[:-1]\n self.lines_t = self.lines_t[:-1]\n\n elif tag=='list':\n if self.depth_t > 0 or self.depth_l > 1:\n self.temp_lines[-1][self.lines_t[-1]] = {'type': 'list',\n 'value': self.temp_list[-1]}\n self.lines_t[-1] += 1\n else:\n self.result_dict[self.lines] = {'type': 'list',\n 'value': self.temp_list[-1]}\n self.lines += 1\n \n self.temp_list = self.temp_list[:-1]\n self.depth_l -= 1\n self.list_item = self.list_item[:-1]\n\n elif tag=='table-cell':\n if self.custom_shape_concat:\n self.to_line()\n \n if self.leaf_lines:\n self.leaf_lines = False\n\n if len(self.temp_lines) > 0:\n table_idx = 0\n \n for temp_line in self.temp_lines[-1]:\n if self.temp_lines[-1][temp_line]['type'] == 'table':\n table_idx = temp_line\n\n for temp_line in range(table_idx, len(self.temp_lines[-1])):\n self.result_dict[self.lines] = self.temp_lines[-1][temp_line]\n self.lines += 1\n \n self.temp_row[-1][self.cols[-1]] = {'rowspan': self.temp_rowspan[-1],\n 'colspan': self.temp_colspan[-1],\n 'value': self.temp_lines[-1]}\n \n self.cols[-1] += 1\n\n self.temp_rowspan = self.temp_rowspan[:-1]\n self.temp_colspan = self.temp_colspan[:-1]\n self.temp_lines = self.temp_lines[:-1]\n self.lines_t = self.lines_t[:-1]\n\n elif tag=='table-row':\n self.temp_table[-1][self.rows[-1]] = self.temp_row[-1]\n\n self.temp_row = self.temp_row[:-1]\n self.rows[-1] += 1\n self.cols = self.cols[:-1]\n\n elif tag=='table':\n caption = self.temp_caption[-1]\n self.temp_caption = self.temp_caption[:-1]\n\n if self.leaf_table:\n self.leaf_table = False\n self.leaf_lines = True\n\n if len(self.temp_lines) > 0:\n for temp_line in self.temp_lines[-1]:\n self.result_dict[self.lines] = self.temp_lines[-1][temp_line]\n self.lines += 1\n \n self.lines_t[-1] = 0\n\n html_string = self.to_html(self.temp_table[-1])\n \n if self.depth_t > 1 or self.depth_l > 1:\n self.temp_lines[-1][self.lines_t[-1]] = {'type': 'table',\n 'caption': caption,\n 'number': self.table_number,\n 'html': html_string,\n 'value': self.temp_table[-1]}\n self.lines_t[-1] += 1\n else:\n self.result_dict[self.lines] = {'type': 'table',\n 'caption': caption,\n 'number': self.table_number,\n 'html': html_string,\n 'value': self.temp_table[-1]}\n self.lines += 1\n \n self.table_number += 1\n\n self.temp_table = self.temp_table[:-1]\n self.depth_t -= 1\n self.rows = self.rows[:-1]\n\n if tag in self.parsing:\n self.parsing[tag] -= 1\n \n return TreeBuilder.end(self, tag)\n\n def data(self, data):\n if self.parsing['span'] > 0 and self.parsing['note']==0:\n self.temp_text = ''.join([self.temp_text, data])\n elif self.parsing['p'] > 0 and self.parsing['note']==0:\n self.temp_text = ''.join([self.temp_text, data])\n \n return TreeBuilder.data(self, data)\n\n def close(self):\n return self.result_dict","repo_name":"hkyoon94/AGC_task12","sub_path":"inference/tree_builder.py","file_name":"tree_builder.py","file_ext":"py","file_size_in_byte":10931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"}
+{"seq_id":"432572204","text":"\nfrom typing import List\nimport numpy as np\nimport os\nimport multiprocessing as mp\nimport time\nfrom datetime import datetime\nimport pickle\nfrom dataclasses import dataclass\n\nimport argparse\n\nfrom rankers import BayesianRanker\n\nfrom initializers.bow_initializer import BoWInitializer\n\nfrom displays.ransam_display import RanSamDisplay\nfrom displays import TopNDisplay\nfrom displays import SOMDisplay \n\nfrom users import RanSamPriorUser\nfrom users import LogitUser\nfrom users import IdealUser\nfrom users import NullUser\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"--seed\", default=42, type=int, help=\"Random seed.\")\nparser.add_argument(\"--processes\", default=-1, type=int, help=\"Number of precesses spawned.\")\n\nparser.add_argument(\"--params_batch\", default=0, type=int, help=\"Which experiments to be conducted [0,1,2].\")\n\nparser.add_argument(\"--annotations\", default=\"data/annotations.csv\", type=str,\n help=\"Annotations to be simulated.\")\n\nparser.add_argument(\"--dataset_path\", default=\"v3c1\", type=str,\n help=\"Root to dataset path.\")\nparser.add_argument(\"--features_name\", default=\"V3C1_20191228.w2vv.images.normed.128pca.viretfromat\", type=str,\n help=\"Name of file with image features.\")\n\nparser.add_argument(\"--keywords_list_name\", default=\"word2idx.txt\", type=str,\n help=\"Name of file with keyword features.\")\nparser.add_argument(\"--kw_features_name\", default=\"txt_weight-11147x2048floats.bin\", type=str,\n help=\"Name of file with keyword features.\")\nparser.add_argument(\"--kw_bias_name\", default=\"txt_bias-2048floats.bin\", type=str,\n help=\"Name of file with keyword bias.\")\nparser.add_argument(\"--pca_matrix_name\", default=\"V3C1_20191228.w2vv.pca.matrix.bin\", type=str,\n help=\"Name of file with pca matrix.\")\nparser.add_argument(\"--pca_mean_name\", default=\"V3C1_20191228.w2vv.pca.mean.bin\", type=str,\n help=\"Name of file with pca mean.\")\n\nparser.add_argument(\"--pickle_root\", default=\"pickle\", type=str,\n help=\"Root of pickle models.\")\nparser.add_argument(\"--pickle_model\", default=\"pcu.prior.pickle\", type=str,\n help=\"Name of pickled user model.\")\n\nparser.add_argument(\"--verbose\", default=False, action=\"store_true\", help=\"Verbose\")\n\nparser.add_argument(\"--output_prefix\", default=\"\", type=str,\n help=\"Prefix of the output file.\")\n\n@dataclass\nclass SimParameters:\n likes: int\n display_types: list\n database_part: float\n text_query: str\n target_id: int\n\nclass Simulator(mp.Process):\n\n def __init__(self, sim_args, par_q: mp.Queue, res_q: mp.Queue, **wargs):\n super().__init__(**wargs)\n np.random.seed(args.seed)\n self._par_q = par_q\n self._res_q = res_q\n\n features = np.fromfile(os.path.join(sim_args.dataset_path, sim_args.features_name), dtype='float32')\n features = features[3:]\n features = features.reshape(int(features.shape[0] / 128), 128)\n self._features = features\n self._kw_init = BoWInitializer(features, \n os.path.join(sim_args.dataset_path, sim_args.keywords_list_name), \n os.path.join(sim_args.dataset_path, sim_args.kw_features_name),\n os.path.join(sim_args.dataset_path, sim_args.kw_bias_name),\n os.path.join(sim_args.dataset_path, sim_args.pca_matrix_name),\n os.path.join(sim_args.dataset_path, sim_args.pca_mean_name)\n )\n with open(os.path.join(sim_args.pickle_root, sim_args.pickle_model), 'rb') as handle:\n self._user = pickle.load(handle)\n self._user._features = features\n self._ranker = BayesianRanker(features, features.shape[0])\n\n self._displays = {\"som\": SOMDisplay(self._features, seed=sim_args.seed), \"top\": TopNDisplay()}\n\n def run(self):\n while True:\n par = self._par_q.get()\n if par is None:\n break\n \n # Parse simulation parameters\n likes = par.likes\n display_types = par.display_types\n database_part = par.database_part\n text_query = par.text_query\n target_id = par.target_id\n\n # Make some assumtions on parameters\n assert likes > 0\n assert likes < 64\n assert database_part is None or (database_part <= 1.0 and database_part > 0.0) \n assert isinstance(target_id, int)\n\n # Initialize search structures\n self._user._count = likes\n self._user._target = target_id\n\n self._ranker.reset()\n self._ranker._scores = self._kw_init.score(text_query)\n self._ranker.normalize()\n\n # Set zero score to filtered elements\n zero_indeces = np.array([], dtype=np.int64)\n if database_part is not None:\n nonzero_count = int(database_part * self._ranker._scores.shape[0])\n zero_indeces = np.flip(np.argsort(self._ranker._scores))[nonzero_count:]\n self._ranker._scores[zero_indeces] = 0 \n\n # Run simulations\n found = -1\n for iteration, disp_type in enumerate(display_types):\n display = self._displays[disp_type].generate(self._ranker.scores)\n\n if target_id in display:\n found = iteration\n break\n\n likes = self._user.decision(display)\n self._ranker.apply_feedback(likes, display)\n self._ranker._scores[zero_indeces] = 0\n \n # Return result\n par.found = found\n self._res_q.put(par)\n \n\ndef parameters_generation0(args, targets: list, text_queries: list, par_q: mp.Queue):\n like_counts = range(1, 5)\n display_types = [[\"som\" for _ in range(10)], \n [\"top\" for _ in range(10)],\n [\"som\" for _ in range(5)] + [\"top\" for _ in range(5)],\n [(\"som\" if i % 2 == 0 else \"top\") for i in range(10)],\n [(\"som\" if i % 2 == 1 else \"top\") for i in range(10)]]\n reps = 0\n for lik in like_counts:\n for tar, text_query in zip(targets, text_queries):\n for disp_type in display_types:\n par_q.put(SimParameters(lik, disp_type, None, text_query, tar))\n reps += 1\n\n return reps\n\n\ndef parameters_generation1(args, targets: list, text_queries: list, par_q: mp.Queue):\n like_counts = [3]\n display_types = [[(\"som\" if i % 2 == 0 else \"top\") for i in range(10)]]\n db_parts = [0.05, 0.1]\n reps = 0\n for lik in like_counts:\n for tar, text_query in zip(targets, text_queries):\n for db_part in db_parts:\n for disp_type in display_types:\n par_q.put(SimParameters(lik, disp_type, db_part, text_query, tar))\n reps += 1\n\n return reps\n\n\ndef parameters_generation2(args, targets: list, text_queries: list, par_q: mp.Queue):\n like_counts = [3]\n display_types = [[(\"som\" if i % 2 == 0 else \"top\") for i in range(10)]]\n reps = 0\n for lik in like_counts:\n for tar, text_query in zip(targets, text_queries):\n for disp_type in display_types:\n par_q.put(SimParameters(lik, disp_type, None, text_query, tar))\n reps += 1\n\n return reps\n\ndef main(args):\n np.random.seed(args.seed)\n processes = args.processes\n if processes <= 0:\n processes = mp.cpu_count()\n \n par_q = mp.Queue()\n res_q = mp.Queue()\n jobs = []\n for i in range(processes):\n sim = Simulator(args, par_q, res_q, name=f\"Simulator {i}\")\n jobs.append(sim)\n sim.start()\n \n # Add parameters\n targets = []\n text_queries = []\n with open(args.annotations, \"r\") as f:\n for line in f.readlines():\n target_id, text_query = line.strip().split(\",\")\n targets.append(int(target_id))\n text_queries.append(text_query)\n\n reps = 0\n if args.params_batch == 0:\n reps = parameters_generation0(args, targets, text_queries, par_q)\n elif args.params_batch == 1:\n reps = parameters_generation1(args, targets, text_queries, par_q)\n elif args.params_batch == 2:\n reps = parameters_generation2(args, targets, text_queries, par_q)\n else:\n raise Exception(\"Unknown type of params_batch\")\n\n # Add poison pill\n for i in range(processes):\n par_q.put(None)\n\n # Collect results\n start = datetime.now()\n print(\"Simulations started\\n\")\n res = []\n with open(f\"data/{args.output_prefix}strategy_search_output.{int(time.time())}.csv\", \"w\") as of:\n for i in range(reps):\n last_res = res_q.get()\n res.append(last_res)\n delta = datetime.now() - start\n per_instance = delta / len(res)\n left = (reps - len(res)) * per_instance\n print(f\"Done: {len(res)}/{reps}\\tTime elapsed: {delta}\\tTime left: {left}\\t\\t\\t\", end=\"\\n\", flush=True)\n of.write(f\"{last_res.likes},{last_res.display_types},{last_res.database_part},{last_res.text_query},{last_res.target_id},{last_res.found}\\n\")\n of.flush()\n\n print(\"\\n********************\")\n print(res, flush=True)\n\nif __name__ == \"__main__\":\n args = parser.parse_args([] if \"__file__\" not in globals() else None)\n main(args)\n","repo_name":"siret-junior/somhunter-simulator","sub_path":"strategy_search.py","file_name":"strategy_search.py","file_ext":"py","file_size_in_byte":9451,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"}
+{"seq_id":"1051659478","text":"import json\nimport logging\nimport os\n\nfrom .config import (\n DOCKER_PLUGIN_DIR,\n DOCKER_PLUGIN_CONFIG_PATH,\n FILESTORAGE_MAPPING,\n ETC_DIR,\n ETC_CONFIG_PATH,\n LOG_DIR,\n OPT_DIR,\n PHYSICAL_VOLUME,\n PORT,\n SERVICE_DIR,\n SERVICE_EXEC_START,\n SERVICE_PATH,\n SERVICE_NAME,\n VOLUME_GROUP\n)\nfrom .core import LvmPyError, run_cmd\nfrom .cleanup import cleanup_volumes\n# from .health import run_healthcheck\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_folders():\n logger.info('Creating folders')\n for path in (\n DOCKER_PLUGIN_DIR,\n ETC_DIR,\n LOG_DIR,\n OPT_DIR,\n SERVICE_DIR\n ):\n os.makedirs(path, exist_ok=True)\n\n\ndef stop_service(name=SERVICE_NAME):\n logger.info('Stopping service %s', name)\n run_cmd(['systemctl', 'daemon-reload'])\n try:\n run_cmd(['systemctl', 'stop', name])\n run_cmd(['systemctl', 'disable', name])\n except LvmPyError as e:\n logger.warning('Lvmpy service cannot be stopped %s', e)\n\n\ndef start_service(name=SERVICE_NAME):\n logger.info('Starting service %s', name)\n run_cmd(['systemctl', 'daemon-reload'])\n run_cmd(['systemctl', 'enable', name])\n run_cmd(['systemctl', 'start', name])\n\n\ndef load_btrfs_kernel_module():\n logger.info('Loading btrfs kernel module')\n run_cmd(['modprobe', 'btrfs'])\n\n\ndef generate_systemd_service_config(\n exec_start=SERVICE_EXEC_START,\n etc_config_path=ETC_CONFIG_PATH\n):\n return f\"\"\"\n[Unit]\nDescription=python lvm docker plugin\nConflicts=getty@tty1.service\nAfter=network.target\n\n[Service]\nType=simple\nWorkingDirectory=/opt/docker-lvmpy/\nExecStart={exec_start}\nEnvironmentFile={etc_config_path}\nRestart=on-failure\nKillSignal=SIGINT\nStandardError=syslog\nNotifyAccess=all\n\n[Install]\nWantedBy=multi-user.target\n\"\"\"\n\n\ndef generate_plugin_config(port=PORT):\n return {\n 'Name': 'lvmpy',\n 'Description': 'A simple volume driver for lvm volumes written in python',\n 'Addr': f'http://127.0.0.1:{port}'\n }\n\n\ndef generate_etc_config(block_device, volume_group, filestorage_mapping):\n return '\\n'.join([\n f'PHYSICAL_VOLUME={block_device}',\n f'VOLUME_GROUP={volume_group}',\n f'FILESTORAGE_MAPPING={filestorage_mapping}'\n ])\n\n\ndef generate_config_files(\n block_device=PHYSICAL_VOLUME,\n volume_group=VOLUME_GROUP,\n filestorage_mapping=FILESTORAGE_MAPPING,\n exec_start=SERVICE_EXEC_START,\n etc_config_path=ETC_CONFIG_PATH,\n port=PORT\n):\n logger.info('Generating config files. Exec start [%s]', exec_start)\n\n docker_plugin_config = generate_plugin_config(port=PORT)\n\n with open(DOCKER_PLUGIN_CONFIG_PATH, 'w') as docker_plugin_config_file:\n json.dump(docker_plugin_config, docker_plugin_config_file)\n\n service_config = generate_systemd_service_config(\n exec_start=exec_start,\n etc_config_path=etc_config_path\n )\n\n with open(SERVICE_PATH, 'w') as service_file:\n service_file.write(service_config)\n\n etc_config = generate_etc_config(\n block_device=block_device,\n volume_group=volume_group,\n filestorage_mapping=filestorage_mapping\n )\n with open(ETC_CONFIG_PATH, 'w') as etc_config_file:\n etc_config_file.write(etc_config)\n\n\ndef setup(\n service_name=SERVICE_NAME,\n block_device=PHYSICAL_VOLUME,\n volume_group=VOLUME_GROUP,\n filestorage_mapping=FILESTORAGE_MAPPING,\n exec_start=SERVICE_EXEC_START,\n etc_config_path=ETC_CONFIG_PATH,\n port=PORT\n):\n stop_service(name=service_name)\n load_btrfs_kernel_module()\n cleanup_volumes(\n block_device=block_device,\n volume_group=volume_group\n )\n create_folders()\n generate_config_files(\n block_device=block_device,\n volume_group=volume_group,\n filestorage_mapping=filestorage_mapping,\n exec_start=exec_start,\n etc_config_path=etc_config_path,\n port=port\n )\n start_service(name=service_name)\n # run_healthcheck(vg=volume_group)\n\n\ndef main():\n print('Setting up docker-lvmpy server')\n setup()\n print('Setup of docker-lvmpy completed')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"skalenetwork/docker-lvmpy","sub_path":"src/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"}
+{"seq_id":"70840990244","text":"# Identify location\nimport socket\nlocation = socket.gethostname()\nif location == 'Orthanc':\n dropbox = 'E:\\\\Users\\\\Chris\\\\Dropbox\\\\'\nif location == 'sputnik':\n dropbox = '/home/dhris/Dropbox/'\nif location == 'saruman':\n dropbox = '/home/herdata/spx7cjc/Dropbox/'\n\n# Import smorgasbord\nimport os\nimport warnings\nwarnings.simplefilter('ignore', category=Warning)\nimport matplotlib\nmatplotlib.use(\"Pdf\")\nimport matplotlib.pyplot as plt\nplt.ioff()\nimport astropy.logger\nastropy.log.setLevel('ERROR')\nimport AstroCell.Main\n\n\n\n# Main process\nif __name__ == '__main__':\n\n # State input directory for test data (various options)\n test_dir = 'Test_Data/'\n #img_dir = 'Histochemial/3100_zeb1/'\n #img_dir = 'Flourescant/Liver/APCFLOX1668/'\n #img_dir = 'Flourescant/Mammary/Ref_LO/'\n #img_dir = 'Histochemial/Mammary/Ref_LO/'\n img_dir = 'Histochemial/Mammary/Cytoplasm/'\n in_dir = os.path.join(test_dir, img_dir)\n\n # Set output directory for Dills (like pickle jars, these are snapshots to resume AstroCell from a 'saved' point, for testing)\n if location == 'sputnik':\n dill_dir = os.path.join( os.path.expanduser('~'), '/Data/AstroCell/Dills/' )\n else:\n dill_dir = False\n\n\n # Launch AstroCell\n AstroCell.Main.Run(in_dir=in_dir, cell_colours=2, substructure_flag=True, parallel=7, mc_factor=1.0, dill_dir=dill_dir, verbose=True)","repo_name":"Stargrazer82301/AstroCell","sub_path":"Test/AstroCell_Test.py","file_name":"AstroCell_Test.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"}
+{"seq_id":"73014940644","text":"import json\n\nimport weaviate\n\nimport re\ndef read_md(path):\n with open(path, 'r', encoding='utf-8') as f:\n text = f.read()\n\n # 定义正则表达式模式\n title_pattern = r\"^(# .+)\"\n author_pattern = r\">\\s{2}(.+)\"\n\n # 提取标题和作者\n title_match = re.search(title_pattern, text, re.MULTILINE)\n author_match = re.search(author_pattern, text, re.MULTILINE)\n\n if title_match and author_match:\n title = title_match.group(1)\n author = author_match.group(1)\n # 去除标题中的井号和空格\n title = title.replace(\"# \", \"\")\n # 提取内容部分\n content = text[author_match.end():].strip()\n content = content.replace(\"*\", \"\").replace(\"#\", \"\")\n result = {\n \"title\": title,\n \"author\": author,\n \"content\": content\n }\n return result\nproperties=[]\nproperties.append(read_md(\"data/test-dataset-1.md\"))\nproperties.append(read_md(\"data/test-dataset-2.md\"))\nclient = weaviate.Client(\n url=\"https://lcy-3koo0kfs.weaviate.network\", # Replace with your endpoint\n additional_headers={\n \"X-HuggingFace-Api-Key\": \"\"\n }\n)\n\nclass_obj = {\n \"class\": \"Article\",\n \"vectorizer\": \"text2vec-huggingface\",\n \"moduleConfig\": {\n \"text2vec-huggingface\": {\n \"model\": \"bert-base-chinese\",\n \"options\": {\n \"waitForModel\": True,\n }\n }\n }\n}\nclient.schema.delete_class(class_name='Article')\nclient.schema.create_class(class_obj)\n\nwith client.batch(\n batch_size=100\n) as batch:\n # Batch import all Questions\n for i, d in enumerate(properties):\n print(f\"importing data: {i+1}\")\n\n properties = {\n \"title\": d[\"title\"],\n \"author\": d[\"author\"],\n \"content\": d[\"content\"][0:490],\n }\n client.batch.add_data_object(\n properties,\n \"Article\",\n )\n\nresponse = (\n client.query\n .aggregate(\"Article\")\n .with_meta_count()\n .do()\n)\n\nprint(json.dumps(response, indent=2))\n\n","repo_name":"lcy5058/pythonProject","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"}
+{"seq_id":"3107188007","text":"\"\"\" Behavior_cloning for selfdriving car project\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n# google specific imports\nimport google3\nfrom google3.pyglib import gfile\nfrom google3.util.textprogressbar import pybar\nimport tensorflow.google as tf\nfrom google3.sstable.python import sstable\n\nfrom google3.learning.deepmind.python import app\nfrom google3.learning.deepmind.python import flags\nfrom google3.learning.deepmind.python import logging\n\n#mprint = tf.app.logging.info\nmprint = logging.info\nmprint('google3 imports done')\n\n\n\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers.core import Flatten,Dense,Lambda,Dropout\nfrom keras.layers.convolutional import Conv2D,Cropping2D\nfrom keras.layers.pooling import MaxPooling2D\nimport keras.callbacks as kcb\n\nmprint('keras imports done')\nimport pandas as pd\nfrom PIL import Image\nimport numpy as np\n\nmprint('mlstuff done')\n\nimport matplotlib.pyplot as plt\nmprint('matplotlib imported')\n\nimport csv\nimport tempfile\nfrom contextlib import contextmanager\nimport collections\nimport random\nfrom datetime import datetime\nimport platform\n\nmprint('allimports done')\n\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string('master', 'local',\n \"\"\"BNS name of the TensorFlow runtime to use.\"\"\")\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\ndef _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef tfrecordwriter(tfrecords_path):\n with gfs(tfrecords_path) as fname:\n writer = tf.python_io.TFRecordWriter(fname)\n yield writer\n writer.close()\n sys.stdout.flush()\n\n\n# read from google file system\n@contextmanager\ndef gfsread(name):\n tmpdir = tempfile.mkdtemp()\n tmpfname = tmpdir+'/tmp'\n gfile.Copy(name,tmpfname)\n yield tmpfname\n\n# write to google file system\n@contextmanager\ndef gfs(name,suffix='.tmpdata'):\n tmp_file = tempfile.NamedTemporaryFile(mode='w',suffix=suffix)\n mprint('writing '+name+' to tmp file')\n yield tmp_file.name\n gfile.Copy(tmp_file.name,name,overwrite=True)\n\n# create the model. The model is almost identical to the LeNet model except for\n# the introduction of the cropping and doubling of the number of nodes in the first fully\n# connected layer to 240 and adding a dropout with probability of 0.5. Since this is \n# regression problem we use mean-squared-error and 'adam' optimizer\ndef Lenet():\n height = 160\n width = 320\n depth = 3\n model=Sequential()\n model.add(Lambda(lambda x:x/255-0.5,input_shape=(height, width, depth)))\n model.add(Cropping2D(cropping=((70,25),(0,0))))\n model.add(Conv2D(20, (5, 5), padding='same',activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Conv2D(50, (5, 5), padding='same',activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Flatten())\n model.add(Dense(240,activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(84,activation='relu'))\n model.add(Dense(1))\n model.compile(loss='mse',optimizer='adam')\n return model\n\nDrivingFrame = collections.namedtuple('DrivingFrame', 'center left right steering throttle brake speed')\ndef fromTFExampleMoreFeatures(rcrd):\n ex = tf.train.Example()\n ex.ParseFromString(rcrd)\n height = 160\n width = 320\n def toimg(s):\n return np.fromstring(ex.features.feature[s].bytes_list.value[0],dtype=np.float32).reshape((height,width,-1))\n def toflt(s):\n return ex.features.feature[s].float_list.value[0]\n return DrivingFrame(center = toimg('center'),\n left = toimg('left'),\n right = toimg('right'),\n steering = toflt('steering'),\n throttle=toflt('throttle'),\n brake = toflt('brake'),\n speed = toflt('speed'))\n\n#create the generators necessary to fit the model. Both generators for validation and training are returned from this\n# function. It also returns the number of steps that needs to be taken on each generator.\ndef train_validate_generators(sstable_path,cross_validation_ratio=0.1,batch_size=256):\n mprint('train_validate_generators')\n table = sstable.SSTable(sstable_path)\n n = len(table)\n cv_start = int(n*(1.0-cross_validation_ratio))\n mprint(\"number of entries in table : \"+str(n))\n num_valid = n-cv_start\n mprint(\"num_valid : \"+str(num_valid*3))\n num_train = cv_start\n mprint(\"num_train : \"+str(num_train*3))\n num_valid_steps = int(num_valid/batch_size)+(1 if num_valid%batch_size != 0 else 0)\n num_train_steps = int(num_train/batch_size)\n cv_start_key = table.iteritems(start_offset=cv_start).next()\n tgen = train_generator(sstable_path,batch_size,0.5,None,cv_start_key,None)\n vgen = valid_generator(sstable_path,batch_size,cv_start_key,None,None)\n return tgen,num_train_steps*3,vgen,num_valid_steps*3\n\n#example_generator parses every record and generates training examples for center\n# left and right images\ndef example_generator(sstable_path,start,stop,start_offset,cycle):\n table = sstable.SSTable(sstable_path)\n while True:\n for k,v in table.iteritems(start_offset=start_offset):\n f=fromTFExampleMoreFeatures(v)\n yield (f.center,f.steering)\n yield (f.right,f.steering-0.2)\n yield (f.left,f.steering+0.2)\n if not cycle:\n mprint('finished non-cyclic example generator')\n break\n\n# a function to weight each examples. An attempt was made to use a higher weight for examples with non-zero\n# steering angles. However, that degraded the performance.\ndef weight_fn(batch_labels):\n return np.ones_like(np.squeeze(batch_labels)) #-0.5+2/(1+np.exp((-1.0/3)*np.square(np.squeeze(batch_labels))))\n\n# generate training examples\ndef train_generator(sstable_path,batch_size,reject_prob,start,stop,start_offset):\n mprint('train_generator')\n height = 160\n width = 320\n batch_features = np.zeros((batch_size, height, width, 3))\n batch_labels = np.zeros((batch_size,1))\n yieldid=0\n curid=0\n for img,str_angle in example_generator(sstable_path,start,stop,start_offset,True):\n if random.uniform(0.0,1.0)\"])\n for td in table[tr]:\n cell_tag = \" \"])\n html_string = ''.join([html_string, \" 1:\n cell_tag = ''.join([cell_tag, ' rowspan=\\'', table[tr][td]['rowspan'], '\\''])\n if int(table[tr][td]['colspan']) > 1:\n cell_tag = ''.join([cell_tag, ' colspan=\\'', table[tr][td]['colspan'], '\\''])\n cell_tag = ''.join([cell_tag, '>'])\n\n html_string = ''.join([html_string, cell_tag])\n\n self.cell_text = []\n self.recursive_reader(table[tr][td]['value'])\n\n html_string = ''.join([html_string, '\\n'.join(self.cell_text), \" \"])\n html_string = ''.join([html_string, \" Hotel Booking Cancelation Predictor App
\n
[\\S\\s]+
([\\S\\s]+).+